aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/xilinx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h185
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c106
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h133
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1024
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c104
8 files changed, 835 insertions, 729 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 911b5ef9e680..0014729b8865 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Xilink device configuration
+# Xilinx device configuration
#
config NET_VENDOR_XILINX
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 4a73127e10a6..6668d1b760d8 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -21,36 +21,45 @@
/* Configuration options */
/* Accept all incoming packets.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_PROMISC (1 << 0)
/* Jumbo frame support for Tx & Rx.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_JUMBO (1 << 1)
/* VLAN Rx & Tx frame support.
- * This option defaults to disabled (cleared) */
+ * This option defaults to disabled (cleared)
+ */
#define XTE_OPTION_VLAN (1 << 2)
/* Enable recognition of flow control frames on Rx
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FLOW_CONTROL (1 << 4)
/* Strip FCS and PAD from incoming frames.
* Note: PAD from VLAN frames is not stripped.
- * This option defaults to disabled (set) */
+ * This option defaults to disabled (set)
+ */
#define XTE_OPTION_FCS_STRIP (1 << 5)
/* Generate FCS field and add PAD automatically for outgoing frames.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_FCS_INSERT (1 << 6)
/* Enable Length/Type error checking for incoming frames. When this option is
-set, the MAC will filter frames that have a mismatched type/length field
-and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
-types of frames are encountered. When this option is cleared, the MAC will
-allow these types of frames to be received.
-This option defaults to enabled (set) */
+ * set, the MAC will filter frames that have a mismatched type/length field
+ * and if XTE_OPTION_REPORT_RXERR is set, the user is notified when these
+ * types of frames are encountered. When this option is cleared, the MAC will
+ * allow these types of frames to be received.
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_LENTYPE_ERR (1 << 7)
/* Enable the transmitter.
- * This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_TXEN (1 << 11)
/* Enable the receiver
-* This option defaults to enabled (set) */
+ * This option defaults to enabled (set)
+ */
#define XTE_OPTION_RXEN (1 << 12)
/* Default options set when device is initialized or reset */
@@ -68,18 +77,18 @@ This option defaults to enabled (set) */
#define TX_TAILDESC_PTR 0x04 /* rw */
#define TX_CHNL_CTRL 0x05 /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
-*/
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
+ */
#define CHNL_CTRL_IRQ_IOE (1 << 9)
#define CHNL_CTRL_IRQ_EN (1 << 7)
#define CHNL_CTRL_IRQ_ERR_EN (1 << 2)
@@ -87,35 +96,35 @@ This option defaults to enabled (set) */
#define CHNL_CTRL_IRQ_COAL_EN (1 << 0)
#define TX_IRQ_REG 0x06 /* rw */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
- 29 2 ErrIrq
- 30 1 DlyIrq
- 31 0 CoalIrq
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ * 29 2 ErrIrq
+ * 30 1 DlyIrq
+ * 31 0 CoalIrq
*/
#define TX_CHNL_STS 0x07 /* r */
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define RX_NXTDESC_PTR 0x08 /* r */
#define RX_CURBUF_ADDR 0x09 /* r */
@@ -124,17 +133,17 @@ This option defaults to enabled (set) */
#define RX_TAILDESC_PTR 0x0c /* rw */
#define RX_CHNL_CTRL 0x0d /* rw */
/*
- 0:7 24:31 IRQTimeout
- 8:15 16:23 IRQCount
- 16:20 11:15 Reserved
- 21 10 0
- 22 9 UseIntOnEnd
- 23 8 LdIRQCnt
- 24 7 IRQEn
- 25:28 3:6 Reserved
- 29 2 IrqErrEn
- 30 1 IrqDlyEn
- 31 0 IrqCoalEn
+ * 0:7 24:31 IRQTimeout
+ * 8:15 16:23 IRQCount
+ * 16:20 11:15 Reserved
+ * 21 10 0
+ * 22 9 UseIntOnEnd
+ * 23 8 LdIRQCnt
+ * 24 7 IRQEn
+ * 25:28 3:6 Reserved
+ * 29 2 IrqErrEn
+ * 30 1 IrqDlyEn
+ * 31 0 IrqCoalEn
*/
#define RX_IRQ_REG 0x0e /* rw */
#define IRQ_COAL (1 << 0)
@@ -142,13 +151,13 @@ This option defaults to enabled (set) */
#define IRQ_ERR (1 << 2)
#define IRQ_DMAERR (1 << 7) /* this is not documented ??? */
/*
- 0:7 24:31 DltTmrValue
- 8:15 16:23 ClscCntrValue
- 16:17 14:15 Reserved
- 18:21 10:13 ClscCnt
- 22:23 8:9 DlyCnt
- 24:28 3::7 Reserved
-*/
+ * 0:7 24:31 DltTmrValue
+ * 8:15 16:23 ClscCntrValue
+ * 16:17 14:15 Reserved
+ * 18:21 10:13 ClscCnt
+ * 22:23 8:9 DlyCnt
+ * 24:28 3::7 Reserved
+ */
#define RX_CHNL_STS 0x0f /* r */
#define CHNL_STS_ENGBUSY (1 << 1)
#define CHNL_STS_EOP (1 << 2)
@@ -165,23 +174,23 @@ This option defaults to enabled (set) */
#define CHNL_STS_CMPERR (1 << 20)
#define CHNL_STS_TAILERR (1 << 21)
/*
- 0:9 22:31 Reserved
- 10 21 TailPErr
- 11 20 CmpErr
- 12 19 AddrErr
- 13 18 NxtPErr
- 14 17 CurPErr
- 15 16 BsyWr
- 16:23 8:15 Reserved
- 24 7 Error
- 25 6 IOE
- 26 5 SOE
- 27 4 Cmplt
- 28 3 SOP
- 29 2 EOP
- 30 1 EngBusy
- 31 0 Reserved
-*/
+ * 0:9 22:31 Reserved
+ * 10 21 TailPErr
+ * 11 20 CmpErr
+ * 12 19 AddrErr
+ * 13 18 NxtPErr
+ * 14 17 CurPErr
+ * 15 16 BsyWr
+ * 16:23 8:15 Reserved
+ * 24 7 Error
+ * 25 6 IOE
+ * 26 5 SOE
+ * 27 4 Cmplt
+ * 28 3 SOP
+ * 29 2 EOP
+ * 30 1 EngBusy
+ * 31 0 Reserved
+ */
#define DMA_CONTROL_REG 0x10 /* rw */
#define DMA_CONTROL_RST (1 << 0)
@@ -271,7 +280,7 @@ This option defaults to enabled (set) */
#define XTE_TIE_OFFSET 0x000003A4 /* Interrupt enable */
-/** MII Mamagement Control register (MGTCR) */
+/* MII Management Control register (MGTCR) */
#define XTE_MGTDR_OFFSET 0x000003B0 /* MII data */
#define XTE_MIIMAI_OFFSET 0x000003B4 /* MII control */
@@ -283,7 +292,7 @@ This option defaults to enabled (set) */
#define STS_CTRL_APP0_ERR (1 << 31)
#define STS_CTRL_APP0_IRQONEND (1 << 30)
-/* undoccumented */
+/* undocumented */
#define STS_CTRL_APP0_STOPONEND (1 << 29)
#define STS_CTRL_APP0_CMPLT (1 << 28)
#define STS_CTRL_APP0_SOP (1 << 27)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index e7065c9a8e38..1066420d6a83 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -117,8 +117,8 @@ int temac_indirect_busywait(struct temac_local *lp)
spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
if (WARN_ON(!hard_acs_rdy(lp)))
return -ETIMEDOUT;
- else
- return 0;
+
+ return 0;
}
/*
@@ -261,7 +261,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
* I/O functions
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
unsigned int dcrs;
@@ -286,7 +286,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
* such as with MicroBlaze and x86
*/
static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
- struct device_node *np)
+ struct device_node *np)
{
return -1;
}
@@ -307,11 +307,9 @@ static void temac_dma_bd_release(struct net_device *ndev)
for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i])
break;
- else {
- dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
- XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
- dev_kfree_skb(lp->rx_skb[i]);
- }
+ dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
+ XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb(lp->rx_skb[i]);
}
if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent,
@@ -361,8 +359,9 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
- skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
+ skb = __netdev_alloc_skb_ip_align(ndev,
+ XTE_MAX_JUMBO_FRAME_SIZE,
+ GFP_KERNEL);
if (!skb)
goto out;
@@ -429,7 +428,8 @@ static void temac_do_set_mac_address(struct net_device *ndev)
(ndev->dev_addr[2] << 16) |
(ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
- * so don't affect them Set MAC bits [47:32] in EUAW1 */
+ * so don't affect them Set MAC bits [47:32] in EUAW1
+ */
temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
(ndev->dev_addr[4] & 0x000000ff) |
(ndev->dev_addr[5] << 8));
@@ -529,66 +529,66 @@ static struct temac_option {
{
.opt = XTE_OPTION_JUMBO,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXJMBO_MASK,
+ .m_or = XTE_RXC1_RXJMBO_MASK,
},
/* Turn on VLAN packet support for both Rx and Tx */
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXVLAN_MASK,
+ .m_or = XTE_TXC_TXVLAN_MASK,
},
{
.opt = XTE_OPTION_VLAN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXVLAN_MASK,
+ .m_or = XTE_RXC1_RXVLAN_MASK,
},
/* Turn on FCS stripping on receive packets */
{
.opt = XTE_OPTION_FCS_STRIP,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXFCS_MASK,
+ .m_or = XTE_RXC1_RXFCS_MASK,
},
/* Turn on FCS insertion on transmit packets */
{
.opt = XTE_OPTION_FCS_INSERT,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXFCS_MASK,
+ .m_or = XTE_TXC_TXFCS_MASK,
},
/* Turn on length/type field checking on receive packets */
{
.opt = XTE_OPTION_LENTYPE_ERR,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXLT_MASK,
+ .m_or = XTE_RXC1_RXLT_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_RXFLO_MASK,
+ .m_or = XTE_FCC_RXFLO_MASK,
},
/* Turn on flow control */
{
.opt = XTE_OPTION_FLOW_CONTROL,
.reg = XTE_FCC_OFFSET,
- .m_or =XTE_FCC_TXFLO_MASK,
+ .m_or = XTE_FCC_TXFLO_MASK,
},
/* Turn on promiscuous frame filtering (all frames are received ) */
{
.opt = XTE_OPTION_PROMISC,
.reg = XTE_AFM_OFFSET,
- .m_or =XTE_AFM_EPPRM_MASK,
+ .m_or = XTE_AFM_EPPRM_MASK,
},
/* Enable transmitter if not already enabled */
{
.opt = XTE_OPTION_TXEN,
.reg = XTE_TXC_OFFSET,
- .m_or =XTE_TXC_TXEN_MASK,
+ .m_or = XTE_TXC_TXEN_MASK,
},
/* Enable receiver? */
{
.opt = XTE_OPTION_RXEN,
.reg = XTE_RXC1_OFFSET,
- .m_or =XTE_RXC1_RXEN_MASK,
+ .m_or = XTE_RXC1_RXEN_MASK,
},
{}
};
@@ -640,7 +640,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset RX reset timeout!!\n");
+ "%s RX reset timeout!!\n", __func__);
break;
}
}
@@ -652,7 +652,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset TX reset timeout!!\n");
+ "%s TX reset timeout!!\n", __func__);
break;
}
}
@@ -671,7 +671,7 @@ static void temac_device_reset(struct net_device *ndev)
udelay(1);
if (--timeout == 0) {
dev_err(&ndev->dev,
- "temac_device_reset DMA reset timeout!!\n");
+ "%s DMA reset timeout!!\n", __func__);
break;
}
}
@@ -679,7 +679,7 @@ static void temac_device_reset(struct net_device *ndev)
if (temac_dma_bd_init(ndev)) {
dev_err(&ndev->dev,
- "temac_device_reset descriptor allocation failed\n");
+ "%s descriptor allocation failed\n", __func__);
}
spin_lock_irqsave(lp->indirect_lock, flags);
@@ -690,7 +690,8 @@ static void temac_device_reset(struct net_device *ndev)
spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
- * but leave receiver and transmitter disabled. */
+ * but leave receiver and transmitter disabled.
+ */
temac_setoptions(ndev,
lp->options & ~(XTE_OPTION_TXEN | XTE_OPTION_RXEN));
@@ -722,9 +723,15 @@ static void temac_adjust_link(struct net_device *ndev)
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
- case SPEED_1000: mii_speed |= XTE_EMCFG_LINKSPD_1000; break;
- case SPEED_100: mii_speed |= XTE_EMCFG_LINKSPD_100; break;
- case SPEED_10: mii_speed |= XTE_EMCFG_LINKSPD_10; break;
+ case SPEED_1000:
+ mii_speed |= XTE_EMCFG_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ mii_speed |= XTE_EMCFG_LINKSPD_100;
+ break;
+ case SPEED_10:
+ mii_speed |= XTE_EMCFG_LINKSPD_10;
+ break;
}
/* Write new speed setting out to TEMAC */
@@ -1006,9 +1013,8 @@ static void ll_temac_recv(struct net_device *ndev)
if (((lp->temac_features & TEMAC_FEATURE_RX_CSUM) != 0) &&
(skb->protocol == htons(ETH_P_IP)) &&
(skb->len > 64)) {
-
/* Convert from device endianness (be32) to cpu
- * endiannes, and if necessary swap the bytes
+ * endianness, and if necessary swap the bytes
* (back) for proper IP checksum byte order
* (be16).
*/
@@ -1276,8 +1282,11 @@ static const struct attribute_group temac_attr_group = {
* ethtool support
*/
-static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+ll_temac_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -1291,8 +1300,11 @@ static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+ll_temac_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -1427,6 +1439,8 @@ static int temac_probe(struct platform_device *pdev)
lp->indirect_lock = devm_kmalloc(&pdev->dev,
sizeof(*lp->indirect_lock),
GFP_KERNEL);
+ if (!lp->indirect_lock)
+ return -ENOMEM;
spin_lock_init(lp->indirect_lock);
}
@@ -1506,7 +1520,7 @@ static int temac_probe(struct platform_device *pdev)
of_node_put(dma_np);
return PTR_ERR(lp->sdma_regs);
}
- if (of_get_property(dma_np, "little-endian", NULL)) {
+ if (of_property_read_bool(dma_np, "little-endian")) {
lp->dma_in = temac_dma_in32_le;
lp->dma_out = temac_dma_out32_le;
} else {
@@ -1554,16 +1568,12 @@ static int temac_probe(struct platform_device *pdev)
}
/* Error handle returned DMA RX and TX interrupts */
- if (lp->rx_irq < 0) {
- if (lp->rx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA RX irq\n");
- return lp->rx_irq;
- }
- if (lp->tx_irq < 0) {
- if (lp->tx_irq != -EPROBE_DEFER)
- dev_err(&pdev->dev, "could not get DMA TX irq\n");
- return lp->tx_irq;
- }
+ if (lp->rx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->rx_irq,
+ "could not get DMA RX irq\n");
+ if (lp->tx_irq < 0)
+ return dev_err_probe(&pdev->dev, lp->tx_irq,
+ "could not get DMA TX irq\n");
if (temac_np) {
/* Retrieve the MAC address */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index 6fd2dea4e60f..2371c072b53f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -29,7 +29,8 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
- * in the LSW0 register */
+ * in the LSW0 register
+ */
spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
@@ -88,7 +89,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
}
/* Enable the MDIO bus by asserting the enable bit and writing
- * in the clock config */
+ * in the clock config
+ */
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
bus = devm_mdiobus_alloc(&pdev->dev);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 5b4d153b1492..6370c447ac5c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -119,11 +119,11 @@
#define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
#define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
-/* Default TX/RX Threshold and waitbound values for SGDMA mode */
+/* Default TX/RX Threshold and delay timer values for SGDMA mode */
#define XAXIDMA_DFT_TX_THRESHOLD 24
-#define XAXIDMA_DFT_TX_WAITBOUND 254
-#define XAXIDMA_DFT_RX_THRESHOLD 24
-#define XAXIDMA_DFT_RX_WAITBOUND 254
+#define XAXIDMA_DFT_TX_USEC 50
+#define XAXIDMA_DFT_RX_THRESHOLD 1
+#define XAXIDMA_DFT_RX_USEC 50
#define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
#define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
@@ -386,6 +386,7 @@ struct axidma_bd {
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
* @pcs_phy: Reference to PCS/PMA PHY if used
+ * @pcs: phylink pcs structure for PCS PHY
* @switch_x_sgmii: Whether switchable 1000BaseX/SGMII mode is enabled in the core
* @axi_clk: AXI4-Lite bus clock
* @misc_clks: Misc ethernet clocks (AXI4-Stream, Ref, MGT clocks)
@@ -394,6 +395,28 @@ struct axidma_bd {
* @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
+ * @napi_rx: NAPI RX control structure
+ * @rx_dma_cr: Nominal content of RX DMA control register
+ * @rx_bd_v: Virtual address of the RX buffer descriptor ring
+ * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
+ * @rx_bd_num: Size of RX buffer descriptor ring
+ * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
+ * accessed currently.
+ * @rx_packets: RX packet count for statistics
+ * @rx_bytes: RX byte count for statistics
+ * @rx_stat_sync: Synchronization object for RX stats
+ * @napi_tx: NAPI TX control structure
+ * @tx_dma_cr: Nominal content of TX DMA control register
+ * @tx_bd_v: Virtual address of the TX buffer descriptor ring
+ * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
+ * @tx_bd_num: Size of TX buffer descriptor ring
+ * @tx_bd_ci: Stores the next Tx buffer descriptor in the ring that may be
+ * complete. Only updated at runtime by TX NAPI poll.
+ * @tx_bd_tail: Stores the index of the next Tx buffer descriptor in the ring
+ * to be populated.
+ * @tx_packets: TX packet count for statistics
+ * @tx_bytes: TX byte count for statistics
+ * @tx_stat_sync: Synchronization object for TX stats
* @dma_err_task: Work structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
@@ -401,19 +424,6 @@ struct axidma_bd {
* @phy_mode: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @features: Stores the extended features supported by the axienet hw
- * @tx_bd_v: Virtual address of the TX buffer descriptor ring
- * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring
- * @tx_bd_num: Size of TX buffer descriptor ring
- * @rx_bd_v: Virtual address of the RX buffer descriptor ring
- * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring
- * @rx_bd_num: Size of RX buffer descriptor ring
- * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while alloc. BDs before a TX starts
- * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being
- * accessed currently. Used while processing BDs after the TX
- * completed.
- * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being
- * accessed currently.
* @max_frm_size: Stores the maximum size of the frame that can be that
* Txed/Rxed in the existing hardware. If jumbo option is
* supported, the maximum frame size would be 9k. Else it is
@@ -422,18 +432,19 @@ struct axidma_bd {
* @csum_offload_on_tx_path: Stores the checksum selection on TX side.
* @csum_offload_on_rx_path: Stores the checksum selection on RX side.
* @coalesce_count_rx: Store the irq coalesce on RX side.
+ * @coalesce_usec_rx: IRQ coalesce delay for RX
* @coalesce_count_tx: Store the irq coalesce on TX side.
+ * @coalesce_usec_tx: IRQ coalesce delay for TX
*/
struct axienet_local {
struct net_device *ndev;
struct device *dev;
- struct device_node *phy_node;
-
struct phylink *phylink;
struct phylink_config phylink_config;
struct mdio_device *pcs_phy;
+ struct phylink_pcs pcs;
bool switch_x_sgmii;
@@ -447,6 +458,27 @@ struct axienet_local {
void __iomem *regs;
void __iomem *dma_regs;
+ struct napi_struct napi_rx;
+ u32 rx_dma_cr;
+ struct axidma_bd *rx_bd_v;
+ dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
+ u32 rx_bd_ci;
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ struct u64_stats_sync rx_stat_sync;
+
+ struct napi_struct napi_tx;
+ u32 tx_dma_cr;
+ struct axidma_bd *tx_bd_v;
+ dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
+ u32 tx_bd_ci;
+ u32 tx_bd_tail;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync tx_stat_sync;
+
struct work_struct dma_err_task;
int tx_irq;
@@ -457,16 +489,6 @@ struct axienet_local {
u32 options;
u32 features;
- struct axidma_bd *tx_bd_v;
- dma_addr_t tx_bd_p;
- u32 tx_bd_num;
- struct axidma_bd *rx_bd_v;
- dma_addr_t rx_bd_p;
- u32 rx_bd_num;
- u32 tx_bd_ci;
- u32 tx_bd_tail;
- u32 rx_bd_ci;
-
u32 max_frm_size;
u32 rxmem;
@@ -474,7 +496,9 @@ struct axienet_local {
int csum_offload_on_rx_path;
u32 coalesce_count_rx;
+ u32 coalesce_usec_rx;
u32 coalesce_count_tx;
+ u32 coalesce_usec_tx;
};
/**
@@ -535,6 +559,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset,
iowrite32(value, lp->regs + offset);
}
+/**
+ * axienet_dma_out32 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+
+static inline void axienet_dma_out32(struct axienet_local *lp,
+ off_t reg, u32 value)
+{
+ iowrite32(value, lp->dma_regs + reg);
+}
+
+#if defined(CONFIG_64BIT) && defined(iowrite64)
+/**
+ * axienet_dma_out64 - Memory mapped Axi DMA register write.
+ * @lp: Pointer to axienet local structure
+ * @reg: Address offset from the base address of the Axi DMA core
+ * @value: Value to be written into the Axi DMA register
+ *
+ * This function writes the desired value into the corresponding Axi DMA
+ * register.
+ */
+static inline void axienet_dma_out64(struct axienet_local *lp,
+ off_t reg, u64 value)
+{
+ iowrite64(value, lp->dma_regs + reg);
+}
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ if (lp->features & XAE_FEATURE_DMA_64BIT)
+ axienet_dma_out64(lp, reg, addr);
+ else
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#else /* CONFIG_64BIT */
+
+static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
+ dma_addr_t addr)
+{
+ axienet_dma_out32(lp, reg, lower_32_bits(addr));
+}
+
+#endif /* CONFIG_64BIT */
+
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
int axienet_mdio_enable(struct axienet_local *lp);
void axienet_mdio_disable(struct axienet_local *lp);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9b068b81ae09..d1d772580da9 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,7 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
- * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
+ * Copyright (c) 2019 - 2022 Calian Advanced Technologies
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -33,7 +33,7 @@
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
-#include <linux/spinlock.h>
+#include <linux/math64.h>
#include <linux/phy.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -132,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
return ioread32(lp->dma_regs + reg);
}
-/**
- * axienet_dma_out32 - Memory mapped Axi DMA register write.
- * @lp: Pointer to axienet local structure
- * @reg: Address offset from the base address of the Axi DMA core
- * @value: Value to be written into the Axi DMA register
- *
- * This function writes the desired value into the corresponding Axi DMA
- * register.
- */
-static inline void axienet_dma_out32(struct axienet_local *lp,
- off_t reg, u32 value)
-{
- iowrite32(value, lp->dma_regs + reg);
-}
-
-static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
- dma_addr_t addr)
-{
- axienet_dma_out32(lp, reg, lower_32_bits(addr));
-
- if (lp->features & XAE_FEATURE_DMA_64BIT)
- axienet_dma_out32(lp, reg + 4, upper_32_bits(addr));
-}
-
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
struct axidma_bd *desc)
{
@@ -189,7 +166,7 @@ static void axienet_dma_bd_release(struct net_device *ndev)
struct axienet_local *lp = netdev_priv(ndev);
/* If we end up here, tx_bd_v must have been DMA allocated. */
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
@@ -214,18 +191,88 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
if (lp->rx_bd_v[i].cntrl) {
phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
lp->max_frm_size, DMA_FROM_DEVICE);
}
}
- dma_free_coherent(ndev->dev.parent,
+ dma_free_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
/**
+ * axienet_usec_to_timer - Calculate IRQ delay timer value
+ * @lp: Pointer to the axienet_local structure
+ * @coalesce_usec: Microseconds to convert into timer value
+ */
+static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
+{
+ u32 result;
+ u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
+
+ if (lp->axi_clk)
+ clk_rate = clk_get_rate(lp->axi_clk);
+
+ /* 1 Timeout Interval = 125 * (clock period of SG clock) */
+ result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
+ (u64)125000000);
+ if (result > 255)
+ result = 255;
+
+ return result;
+}
+
+/**
+ * axienet_dma_start - Set up DMA registers and start DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_start(struct axienet_local *lp)
+{
+ /* Start updating the Rx channel control register */
+ lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_rx > 1)
+ lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+
+ /* Start updating the Tx channel control register */
+ lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
+ XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
+ /* Only set interrupt delay timer if not generating an interrupt on
+ * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
+ */
+ if (lp->coalesce_count_tx > 1)
+ lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
+ << XAXIDMA_DELAY_SHIFT) |
+ XAXIDMA_IRQ_DELAY_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+
+ /* Populate the tail pointer and bring the Rx Axi DMA engine out of
+ * halted state. This will make the Rx side ready for reception.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
+ lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
+
+ /* Write to the RS (Run-stop) bit in the Tx channel control register.
+ * Tx channel is now ready to run. But only after we write to the
+ * tail pointer register that the Tx channel will start transmitting.
+ */
+ axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
+ lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+}
+
+/**
* axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
* @ndev: Pointer to the net_device structure
*
@@ -237,7 +284,6 @@ static void axienet_dma_bd_release(struct net_device *ndev)
*/
static int axienet_dma_bd_init(struct net_device *ndev)
{
- u32 cr;
int i;
struct sk_buff *skb;
struct axienet_local *lp = netdev_priv(ndev);
@@ -248,13 +294,13 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_ci = 0;
/* Allocate the Tx and Rx buffer descriptors. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->tx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
return -ENOMEM;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+ lp->rx_bd_v = dma_alloc_coherent(lp->dev,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
@@ -284,9 +330,9 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out;
lp->rx_bd_v[i].skb = skb;
- addr = dma_map_single(ndev->dev.parent, skb->data,
+ addr = dma_map_single(lp->dev, skb->data,
lp->max_frm_size, DMA_FROM_DEVICE);
- if (dma_mapping_error(ndev->dev.parent, addr)) {
+ if (dma_mapping_error(lp->dev, addr)) {
netdev_err(ndev, "DMA mapping error\n");
goto out;
}
@@ -295,50 +341,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
lp->rx_bd_v[i].cntrl = lp->max_frm_size;
}
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting.
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
return 0;
out:
@@ -496,7 +499,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
@@ -506,28 +510,74 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- return -ETIMEDOUT;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
+ }
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
}
return 0;
}
/**
+ * axienet_dma_stop - Stop DMA operation
+ * @lp: Pointer to the axienet_local structure
+ */
+static void axienet_dma_stop(struct axienet_local *lp)
+{
+ int count;
+ u32 cr, sr;
+
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ synchronize_irq(lp->rx_irq);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ synchronize_irq(lp->tx_irq);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ axienet_lock_mii(lp);
+ __axienet_device_reset(lp);
+ axienet_unlock_mii(lp);
+}
+
+/**
* axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
* @ndev: Pointer to the net_device structure
*
* This function is called to reset and initialize the Axi Ethernet core. This
* is typically called during initialization. It does a reset of the Axi DMA
* Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
- * areconnected to Axi Ethernet reset lines, this in turn resets the Axi
+ * are connected to Axi Ethernet reset lines, this in turn resets the Axi
* Ethernet core. No separate hardware reset is done for the Axi Ethernet
* core.
* Returns 0 on success or a negative error number otherwise.
@@ -547,7 +597,7 @@ static int axienet_device_reset(struct net_device *ndev)
lp->options &= (~XAE_OPTION_JUMBO);
if ((ndev->mtu > XAE_MTU) &&
- (ndev->mtu <= XAE_JUMBO_MTU)) {
+ (ndev->mtu <= XAE_JUMBO_MTU)) {
lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
XAE_TRL_SIZE;
@@ -590,54 +640,55 @@ static int axienet_device_reset(struct net_device *ndev)
/**
* axienet_free_tx_chain - Clean up a series of linked TX descriptors.
- * @ndev: Pointer to the net_device structure
+ * @lp: Pointer to the axienet_local structure
* @first_bd: Index of first descriptor to clean up
- * @nr_bds: Number of descriptors to clean up, can be -1 if unknown.
+ * @nr_bds: Max number of descriptors to clean up
+ * @force: Whether to clean descriptors even if not complete
* @sizep: Pointer to a u32 filled with the total sum of all bytes
- * in all cleaned-up descriptors. Ignored if NULL.
+ * in all cleaned-up descriptors. Ignored if NULL.
+ * @budget: NAPI budget (use 0 when not called from NAPI poll)
*
* Would either be called after a successful transmit operation, or after
* there was an error when setting up the chain.
* Returns the number of descriptors handled.
*/
-static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
- int nr_bds, u32 *sizep)
+static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
+ int nr_bds, bool force, u32 *sizep, int budget)
{
- struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- int max_bds = nr_bds;
unsigned int status;
dma_addr_t phys;
int i;
- if (max_bds == -1)
- max_bds = lp->tx_bd_num;
-
- for (i = 0; i < max_bds; i++) {
+ for (i = 0; i < nr_bds; i++) {
cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
status = cur_p->status;
- /* If no number is given, clean up *all* descriptors that have
- * been completed by the MAC.
+ /* If force is not specified, clean up only descriptors
+ * that have been completed by the MAC.
*/
- if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
+ if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys,
+ dma_unmap_single(lp->dev, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
- dev_consume_skb_irq(cur_p->skb);
+ napi_consume_skb(cur_p->skb, budget);
- cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,38 +698,6 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
- * axienet_start_xmit_done - Invoked once a transmit is completed by the
- * Axi DMA Tx channel.
- * @ndev: Pointer to the net_device structure
- *
- * This function is invoked from the Axi DMA Tx isr to notify the completion
- * of transmit operation. It clears fields in the corresponding Tx BDs and
- * unmaps the corresponding buffer so that CPU can regain ownership of the
- * buffer. It finally invokes "netif_wake_queue" to restart transmission if
- * required.
- */
-static void axienet_start_xmit_done(struct net_device *ndev)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- u32 packets = 0;
- u32 size = 0;
-
- packets = axienet_free_tx_chain(ndev, lp->tx_bd_ci, -1, &size);
-
- lp->tx_bd_ci += packets;
- if (lp->tx_bd_ci >= lp->tx_bd_num)
- lp->tx_bd_ci -= lp->tx_bd_num;
-
- ndev->stats.tx_packets += packets;
- ndev->stats.tx_bytes += size;
-
- /* Matches barrier in axienet_start_xmit */
- smp_mb();
-
- netif_wake_queue(ndev);
-}
-
-/**
* axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
* @lp: Pointer to the axienet_local structure
* @num_frag: The number of BDs to check for
@@ -689,19 +708,73 @@ static void axienet_start_xmit_done(struct net_device *ndev)
* This function is invoked before BDs are allocated and transmission starts.
* This function returns 0 if a BD or group of BDs can be allocated for
* transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
+ * returns a busy status.
*/
static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
+
+ /* Ensure we see all descriptor updates from device or TX polling */
+ rmb();
+ cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
+ lp->tx_bd_num];
+ if (cur_p->cntrl)
return NETDEV_TX_BUSY;
return 0;
}
/**
+ * axienet_tx_poll - Invoked once a transmit is completed by the
+ * Axi DMA Tx channel.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of TX packets to process.
+ *
+ * Return: Number of TX packets processed.
+ *
+ * This function is invoked from the NAPI processing to notify the completion
+ * of transmit operation. It clears fields in the corresponding Tx BDs and
+ * unmaps the corresponding buffer so that CPU can regain ownership of the
+ * buffer. It finally invokes "netif_wake_queue" to restart transmission if
+ * required.
+ */
+static int axienet_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
+ struct net_device *ndev = lp->ndev;
+ u32 size = 0;
+ int packets;
+
+ packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
+
+ if (packets) {
+ lp->tx_bd_ci += packets;
+ if (lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci %= lp->tx_bd_num;
+
+ u64_stats_update_begin(&lp->tx_stat_sync);
+ u64_stats_add(&lp->tx_packets, packets);
+ u64_stats_add(&lp->tx_bytes, size);
+ u64_stats_update_end(&lp->tx_stat_sync);
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable TX completion interrupts. This should
+ * cause an immediate interrupt if any TX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
+ }
+ return packets;
+}
+
+/**
* axienet_start_xmit - Starts the transmission.
* @skb: sk_buff pointer that contains data to be Txed.
* @ndev: Pointer to net_device structure.
@@ -723,27 +796,25 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_index_off;
skb_frag_t *frag;
dma_addr_t tail_p, phys;
+ u32 orig_tail_ptr, new_tail_ptr;
struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p;
- u32 orig_tail_ptr = lp->tx_bd_tail;
- num_frag = skb_shinfo(skb)->nr_frags;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ orig_tail_ptr = lp->tx_bd_tail;
+ new_tail_ptr = orig_tail_ptr;
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
+ num_frag = skb_shinfo(skb)->nr_frags;
+ cur_p = &lp->tx_bd_v[orig_tail_ptr];
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -761,9 +832,9 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
}
- phys = dma_map_single(ndev->dev.parent, skb->data,
+ phys = dma_map_single(lp->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
@@ -773,22 +844,20 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) {
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
- cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ cur_p = &lp->tx_bd_v[new_tail_ptr];
frag = &skb_shinfo(skb)->frags[ii];
- phys = dma_map_single(ndev->dev.parent,
+ phys = dma_map_single(lp->dev,
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++;
- axienet_free_tx_chain(ndev, orig_tail_ptr, ii + 1,
- NULL);
- lp->tx_bd_tail = orig_tail_ptr;
-
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
return NETDEV_TX_OK;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -798,87 +867,108 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
cur_p->skb = skb;
- tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
+ tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
+ if (++new_tail_ptr >= lp->tx_bd_num)
+ new_tail_ptr = 0;
+ WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
+
/* Start the transfer */
axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- if (++lp->tx_bd_tail >= lp->tx_bd_num)
- lp->tx_bd_tail = 0;
+
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_tx_poll */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
return NETDEV_TX_OK;
}
/**
- * axienet_recv - Is called from Axi DMA Rx Isr to complete the received
- * BD processing.
- * @ndev: Pointer to net_device structure.
+ * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
+ * @napi: Pointer to NAPI structure.
+ * @budget: Max number of RX packets to process.
*
- * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It
- * does minimal processing and invokes "netif_rx" to complete further
- * processing.
+ * Return: Number of RX packets processed.
*/
-static void axienet_recv(struct net_device *ndev)
+static int axienet_rx_poll(struct napi_struct *napi, int budget)
{
u32 length;
u32 csumstatus;
u32 size = 0;
- u32 packets = 0;
+ int packets = 0;
dma_addr_t tail_p = 0;
- struct axienet_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
struct axidma_bd *cur_p;
+ struct sk_buff *skb, *new_skb;
+ struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
+ while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
-
- phys = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
- DMA_FROM_DEVICE);
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
skb = cur_p->skb;
cur_p->skb = NULL;
- length = cur_p->app4 & 0x0000FFFF;
-
- skb_put(skb, length);
- skb->protocol = eth_type_trans(skb, ndev);
- /*skb_checksum_none_assert(skb);*/
- skb->ip_summed = CHECKSUM_NONE;
-
- /* if we're doing Rx csum offload, set it up */
- if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
- csumstatus = (cur_p->app2 &
- XAE_FULL_CSUM_STATUS_MASK) >> 3;
- if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) ||
- (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* skb could be NULL if a previous pass already received the
+ * packet for this slot in the ring, but failed to refill it
+ * with a newly allocated buffer. In this case, don't try to
+ * receive it again.
+ */
+ if (likely(skb)) {
+ length = cur_p->app4 & 0x0000FFFF;
+
+ phys = desc_get_phys_addr(lp, cur_p);
+ dma_unmap_single(lp->dev, phys, lp->max_frm_size,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, length);
+ skb->protocol = eth_type_trans(skb, lp->ndev);
+ /*skb_checksum_none_assert(skb);*/
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* if we're doing Rx csum offload, set it up */
+ if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
+ csumstatus = (cur_p->app2 &
+ XAE_FULL_CSUM_STATUS_MASK) >> 3;
+ if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
+ csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
+ skb->protocol == htons(ETH_P_IP) &&
+ skb->len > 64) {
+ skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
+ skb->ip_summed = CHECKSUM_COMPLETE;
}
- } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
- skb->protocol == htons(ETH_P_IP) &&
- skb->len > 64) {
- skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
- netif_rx(skb);
+ napi_gro_receive(napi, skb);
- size += length;
- packets++;
+ size += length;
+ packets++;
+ }
- new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
+ new_skb = napi_alloc_skb(napi, lp->max_frm_size);
if (!new_skb)
- return;
+ break;
- phys = dma_map_single(ndev->dev.parent, new_skb->data,
+ phys = dma_map_single(lp->dev, new_skb->data,
lp->max_frm_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
+ if (unlikely(dma_mapping_error(lp->dev, phys))) {
if (net_ratelimit())
- netdev_err(ndev, "RX DMA mapping error\n");
+ netdev_err(lp->ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb);
- return;
+ break;
}
desc_set_phys_addr(lp, phys, cur_p);
@@ -886,16 +976,32 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->skb = new_skb;
+ /* Only update tail_p to mark this slot as usable after it has
+ * been successfully refilled.
+ */
+ tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+
if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
- ndev->stats.rx_packets += packets;
- ndev->stats.rx_bytes += size;
+ u64_stats_update_begin(&lp->rx_stat_sync);
+ u64_stats_add(&lp->rx_packets, packets);
+ u64_stats_add(&lp->rx_bytes, size);
+ u64_stats_update_end(&lp->rx_stat_sync);
if (tail_p)
axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
+
+ if (packets < budget && napi_complete_done(napi, packets)) {
+ /* Re-enable RX completion interrupts. This should
+ * cause an immediate interrupt if any RX packets are
+ * already pending.
+ */
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
+ }
+ return packets;
}
/**
@@ -905,46 +1011,40 @@ static void axienet_recv(struct net_device *ndev)
*
* Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
- * to complete the BD processing.
+ * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
+ * TX BD processing.
*/
static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
- axienet_start_xmit_done(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
- (lp->tx_bd_v[lp->tx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Tx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
+ (lp->tx_bd_v[lp->tx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
+ } else {
+ /* Disable further TX completion interrupts and schedule
+ * NAPI to handle the completions.
+ */
+ u32 cr = lp->tx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_tx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -955,46 +1055,40 @@ out:
*
* Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
- * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
+ * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
* processing.
*/
static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
{
- u32 cr;
unsigned int status;
struct net_device *ndev = _ndev;
struct axienet_local *lp = netdev_priv(ndev);
status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) {
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
- axienet_recv(lp->ndev);
- goto out;
- }
+
if (!(status & XAXIDMA_IRQ_ALL_MASK))
return IRQ_NONE;
- if (status & XAXIDMA_IRQ_ERROR_MASK) {
- dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
- dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
- (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
- (lp->rx_bd_v[lp->rx_bd_ci]).phys);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Disable coalesce, delay timer and error interrupts */
- cr &= (~XAXIDMA_IRQ_ALL_MASK);
- /* write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+ axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
+ netdev_err(ndev, "DMA Rx error 0x%x\n", status);
+ netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
+ (lp->rx_bd_v[lp->rx_bd_ci]).phys);
schedule_work(&lp->dma_err_task);
- axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
+ } else {
+ /* Disable further RX completion interrupts and schedule
+ * NAPI receive.
+ */
+ u32 cr = lp->rx_dma_cr;
+
+ cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ napi_schedule(&lp->napi_rx);
}
-out:
+
return IRQ_HANDLED;
}
@@ -1068,6 +1162,9 @@ static int axienet_open(struct net_device *ndev)
/* Enable worker thread for Axi DMA error handling */
INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
+
/* Enable interrupts for Axi DMA Tx */
ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
ndev->name, ndev);
@@ -1093,6 +1190,8 @@ err_eth_irq:
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
cancel_work_sync(&lp->dma_err_task);
@@ -1112,46 +1211,23 @@ err_tx_irq:
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr, sr;
- int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
+
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+ axienet_dma_stop(lp);
axienet_iow(lp, XAE_IE_OFFSET, 0);
- /* Give DMAs a chance to halt gracefully */
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
- }
-
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
- msleep(20);
- sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
- }
-
- /* Do a reset to ensure DMA is really stopped */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
-
cancel_work_sync(&lp->dma_err_task);
if (lp->eth_irq > 0)
@@ -1220,10 +1296,32 @@ static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phylink_mii_ioctl(lp->phylink, rq, cmd);
}
+static void
+axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct axienet_local *lp = netdev_priv(dev);
+ unsigned int start;
+
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
+}
+
static const struct net_device_ops axienet_netdev_ops = {
.ndo_open = axienet_open,
.ndo_stop = axienet_stop,
.ndo_start_xmit = axienet_start_xmit,
+ .ndo_get_stats64 = axienet_get_stats64,
.ndo_change_mtu = axienet_change_mtu,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -1245,8 +1343,8 @@ static const struct net_device_ops axienet_netdev_ops = {
static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
- strlcpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
}
/**
@@ -1277,7 +1375,7 @@ static int axienet_ethtools_get_regs_len(struct net_device *ndev)
static void axienet_ethtools_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *ret)
{
- u32 *data = (u32 *) ret;
+ u32 *data = (u32 *)ret;
size_t len = sizeof(u32) * AXIENET_REGS_N;
struct axienet_local *lp = netdev_priv(ndev);
@@ -1323,8 +1421,11 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
}
-static void axienet_ethtools_get_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static void
+axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
@@ -1338,15 +1439,19 @@ static void axienet_ethtools_get_ringparam(struct net_device *ndev,
ering->tx_pending = lp->tx_bd_num;
}
-static int axienet_ethtools_set_ringparam(struct net_device *ndev,
- struct ethtool_ringparam *ering)
+static int
+axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
{
struct axienet_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -1415,14 +1520,12 @@ axienet_ethtools_get_coalesce(struct net_device *ndev,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
- regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK)
- >> XAXIDMA_COALESCE_SHIFT;
+
+ ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
+ ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
+ ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
+ ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
return 0;
}
@@ -1455,8 +1558,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
if (ecoalesce->rx_max_coalesced_frames)
lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+ lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
if (ecoalesce->tx_max_coalesced_frames)
lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
+ if (ecoalesce->tx_coalesce_usecs)
+ lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
return 0;
}
@@ -1487,7 +1594,8 @@ static int axienet_ethtools_nway_reset(struct net_device *dev)
}
static const struct ethtool_ops axienet_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES,
+ .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USECS,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1503,137 +1611,78 @@ static const struct ethtool_ops axienet_ethtool_ops = {
.nway_reset = axienet_ethtools_nway_reset,
};
-static void axienet_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- /* Only support the mode we are configured for */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- if (lp->switch_x_sgmii)
- break;
- fallthrough;
- default:
- if (state->interface != lp->phy_mode) {
- netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
- phy_modes(state->interface),
- phy_modes(lp->phy_mode));
- linkmode_zero(supported);
- return;
- }
- }
-
- phylink_set(mask, Autoneg);
- phylink_set_port_modes(mask);
-
- phylink_set(mask, Asym_Pause);
- phylink_set(mask, Pause);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_NA:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 1000baseT_Full);
- if (state->interface == PHY_INTERFACE_MODE_1000BASEX)
- break;
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- phylink_set(mask, 100baseT_Full);
- phylink_set(mask, 10baseT_Full);
- fallthrough;
- default:
- break;
- }
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
+ return container_of(pcs, struct axienet_local, pcs);
}
-static void axienet_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void axienet_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- phylink_mii_c22_pcs_get_state(lp->pcs_phy, state);
- break;
- default:
- break;
- }
+ phylink_mii_c22_pcs_get_state(pcs_phy, state);
}
-static void axienet_mac_an_restart(struct phylink_config *config)
+static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct axienet_local *lp = netdev_priv(ndev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
- phylink_mii_c22_pcs_an_restart(lp->pcs_phy);
+ phylink_mii_c22_pcs_an_restart(pcs_phy);
}
-static int axienet_mac_prepare(struct phylink_config *config, unsigned int mode,
- phy_interface_t iface)
+static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
{
- struct net_device *ndev = to_net_dev(config->dev);
+ struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
+ struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
struct axienet_local *lp = netdev_priv(ndev);
int ret;
- switch (iface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- if (!lp->switch_x_sgmii)
- return 0;
-
- ret = mdiobus_write(lp->pcs_phy->bus,
- lp->pcs_phy->addr,
- XLNX_MII_STD_SELECT_REG,
- iface == PHY_INTERFACE_MODE_SGMII ?
+ if (lp->switch_x_sgmii) {
+ ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
+ interface == PHY_INTERFACE_MODE_SGMII ?
XLNX_MII_STD_SELECT_SGMII : 0);
- if (ret < 0)
- netdev_warn(ndev, "Failed to switch PHY interface: %d\n",
+ if (ret < 0) {
+ netdev_warn(ndev,
+ "Failed to switch PHY interface: %d\n",
ret);
- return ret;
- default:
- return 0;
+ return ret;
+ }
}
+
+ ret = phylink_mii_c22_pcs_config(pcs_phy, mode, interface, advertising);
+ if (ret < 0)
+ netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
+
+ return ret;
}
-static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static const struct phylink_pcs_ops axienet_pcs_ops = {
+ .pcs_get_state = axienet_pcs_get_state,
+ .pcs_config = axienet_pcs_config,
+ .pcs_an_restart = axienet_pcs_an_restart,
+};
+
+static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct net_device *ndev = to_net_dev(config->dev);
struct axienet_local *lp = netdev_priv(ndev);
- int ret;
- switch (state->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- ret = phylink_mii_c22_pcs_config(lp->pcs_phy, mode,
- state->interface,
- state->advertising);
- if (ret < 0)
- netdev_warn(ndev, "Failed to configure PCS: %d\n",
- ret);
- break;
+ if (interface == PHY_INTERFACE_MODE_1000BASEX ||
+ interface == PHY_INTERFACE_MODE_SGMII)
+ return &lp->pcs;
- default:
- break;
- }
+ return NULL;
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* nothing meaningful to do */
}
static void axienet_mac_link_down(struct phylink_config *config,
@@ -1687,10 +1736,8 @@ static void axienet_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops axienet_phylink_ops = {
- .validate = axienet_validate,
- .mac_pcs_get_state = axienet_mac_pcs_get_state,
- .mac_an_restart = axienet_mac_an_restart,
- .mac_prepare = axienet_mac_prepare,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = axienet_mac_select_pcs,
.mac_config = axienet_mac_config,
.mac_link_down = axienet_mac_link_down,
.mac_link_up = axienet_mac_link_up,
@@ -1705,29 +1752,27 @@ static const struct phylink_mac_ops axienet_phylink_ops = {
*/
static void axienet_dma_err_handler(struct work_struct *work)
{
+ u32 i;
u32 axienet_status;
- u32 cr, i;
+ struct axidma_bd *cur_p;
struct axienet_local *lp = container_of(work, struct axienet_local,
dma_err_task);
struct net_device *ndev = lp->ndev;
- struct axidma_bd *cur_p;
+
+ napi_disable(&lp->napi_tx);
+ napi_disable(&lp->napi_rx);
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- /* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. MDIO must be disabled before resetting.
- * Hold MDIO bus lock to avoid MDIO accesses during the reset.
- */
- axienet_lock_mii(lp);
- __axienet_device_reset(lp);
- axienet_unlock_mii(lp);
+
+ axienet_dma_stop(lp);
for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->cntrl) {
dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
- dma_unmap_single(ndev->dev.parent, addr,
+ dma_unmap_single(lp->dev, addr,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
@@ -1760,50 +1805,7 @@ static void axienet_dma_err_handler(struct work_struct *work)
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
- /* Start updating the Rx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = ((cr & ~XAXIDMA_COALESCE_MASK) |
- (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = ((cr & ~XAXIDMA_DELAY_MASK) |
- (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Rx channel control register */
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
-
- /* Start updating the Tx channel control register */
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- /* Update the interrupt coalesce count */
- cr = (((cr & ~XAXIDMA_COALESCE_MASK)) |
- (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT));
- /* Update the delay timer count */
- cr = (((cr & ~XAXIDMA_DELAY_MASK)) |
- (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT));
- /* Enable coalesce, delay timer and error interrupts */
- cr |= XAXIDMA_IRQ_ALL_MASK;
- /* Finally write to the Tx channel control register */
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
-
- /* Populate the tail pointer and bring the Rx Axi DMA engine out of
- * halted state. This will make the Rx side ready for reception.
- */
- axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
- axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
-
- /* Write to the RS (Run-stop) bit in the Tx channel control register.
- * Tx channel is now ready to run. But only after we write to the
- * tail pointer register that the Tx channel will start transmitting
- */
- axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr | XAXIDMA_CR_RUNSTOP_MASK);
+ axienet_dma_start(lp);
axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
axienet_status &= ~XAE_RCW1_RX_MASK;
@@ -1824,6 +1826,8 @@ static void axienet_dma_err_handler(struct work_struct *work)
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
+ napi_enable(&lp->napi_rx);
+ napi_enable(&lp->napi_tx);
}
/**
@@ -1872,6 +1876,12 @@ static int axienet_probe(struct platform_device *pdev)
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
+ u64_stats_init(&lp->rx_stat_sync);
+ u64_stats_init(&lp->tx_stat_sync);
+
+ netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
+ netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
+
lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
if (!lp->axi_clk) {
/* For backward compatibility, if named AXI clock is not present,
@@ -2056,6 +2066,11 @@ static int axienet_probe(struct platform_device *pdev)
iowrite32(0x0, desc);
}
}
+ if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
+ dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
+ ret = -EINVAL;
+ goto cleanup_clk;
+ }
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
if (ret) {
@@ -2078,32 +2093,58 @@ static int axienet_probe(struct platform_device *pdev)
}
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
+ lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
+
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
- lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
- if (lp->phy_node) {
- ret = axienet_mdio_setup(lp);
- if (ret)
- dev_warn(&pdev->dev,
- "error registering MDIO bus: %d\n", ret);
- }
if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
- if (!lp->phy_node) {
- dev_err(&pdev->dev, "phy-handle required for 1000BaseX/SGMII\n");
+ np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
+ if (!np) {
+ /* Deprecated: Always use "pcs-handle" for pcs_phy.
+ * Falling back to "phy-handle" here is only for
+ * backward compatibility with old device trees.
+ */
+ np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ }
+ if (!np) {
+ dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
ret = -EINVAL;
goto cleanup_mdio;
}
- lp->pcs_phy = of_mdio_find_device(lp->phy_node);
+ lp->pcs_phy = of_mdio_find_device(np);
if (!lp->pcs_phy) {
ret = -EPROBE_DEFER;
+ of_node_put(np);
goto cleanup_mdio;
}
- lp->phylink_config.pcs_poll = true;
+ of_node_put(np);
+ lp->pcs.ops = &axienet_pcs_ops;
+ lp->pcs.poll = true;
}
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10FD | MAC_100FD | MAC_1000FD;
+
+ __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
+ if (lp->switch_x_sgmii) {
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ lp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ lp->phylink_config.supported_interfaces);
+ }
lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
lp->phy_mode,
@@ -2130,8 +2171,6 @@ cleanup_mdio:
put_device(&lp->pcs_phy->dev);
if (lp->mii_bus)
axienet_mdio_teardown(lp);
- of_node_put(lp->phy_node);
-
cleanup_clk:
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
@@ -2160,9 +2199,6 @@ static int axienet_remove(struct platform_device *pdev)
clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
clk_disable_unprepare(lp->axi_clk);
- of_node_put(lp->phy_node);
- lp->phy_node = NULL;
-
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 48f544f6c999..0b3b6935c558 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -106,7 +106,7 @@ static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* Return: 0 on success, -ETIMEDOUT on a timeout
*
* Writes the value to the requested register by first writing the value
- * into MWD register. The the MCR register is then appropriately setup
+ * into MWD register. The MCR register is then appropriately setup
* to finish the write operation.
*/
static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
@@ -126,7 +126,7 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
return ret;
}
- axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val);
+ axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32)val);
axienet_iow(lp, XAE_MDIO_MCR_OFFSET,
(((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) &
XAE_MDIO_MCR_PHYAD_MASK) |
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0815de581c7f..a3967f8de417 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
+/* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
* This is a new flat driver which is based on the original emac_lite
* driver from John Williams <john.williams@xilinx.com>.
*
- * 2007 - 2013 (c) Xilinx, Inc.
+ * Copyright (c) 2007 - 2013 Xilinx, Inc.
*/
#include <linux/module.h>
@@ -91,13 +90,7 @@
#define XEL_ARP_PACKET_SIZE 28 /* Max ARP packet size */
#define XEL_HEADER_IP_LENGTH_OFFSET 16 /* IP Length Offset */
-
-
#define TX_TIMEOUT (60 * HZ) /* Tx timeout is 60 seconds. */
-#define ALIGNMENT 4
-
-/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
-#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((uintptr_t)adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
@@ -115,7 +108,7 @@
* @next_tx_buf_to_use: next Tx buffer to write to
* @next_rx_buf_to_use: next Rx buffer to read from
* @base_addr: base address of the Emaclite device
- * @reset_lock: lock used for synchronization
+ * @reset_lock: lock to serialize xmit and tx_timeout execution
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
* @phy_dev: pointer to the PHY device
@@ -124,7 +117,6 @@
* @last_link: last link status
*/
struct net_local {
-
struct net_device *ndev;
bool tx_ping_pong;
@@ -133,7 +125,7 @@ struct net_local {
u32 next_rx_buf_to_use;
void __iomem *base_addr;
- spinlock_t reset_lock;
+ spinlock_t reset_lock; /* serialize xmit and tx_timeout execution */
struct sk_buff *deferred_skb;
struct phy_device *phy_dev;
@@ -144,7 +136,6 @@ struct net_local {
int last_link;
};
-
/*************************/
/* EmacLite driver calls */
/*************************/
@@ -207,7 +198,7 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* address in the EmacLite device.
*/
static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
const u16 *from_u16_ptr;
u32 align_buffer;
@@ -265,7 +256,7 @@ static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
* to a 16-bit aligned buffer.
*/
static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr,
- unsigned length)
+ unsigned int length)
{
u16 *to_u16_ptr, *from_u16_ptr;
u32 *from_u32_ptr;
@@ -330,7 +321,6 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
-
/* Switch to next buffer if configured */
if (drvdata->tx_ping_pong != 0)
drvdata->next_tx_buf_to_use ^= XEL_BUFFER_OFFSET;
@@ -346,8 +336,9 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
return -1; /* Buffers were full, return failure */
- } else
+ } else {
return -1; /* Buffer was full, return failure */
+ }
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *)addr, byte_count);
@@ -423,7 +414,6 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* or an IP packet or an ARP packet
*/
if (proto_type > ETH_DATA_LEN) {
-
if (proto_type == ETH_P_IP) {
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
@@ -433,23 +423,25 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
- } else if (proto_type == ETH_P_ARP)
+ } else if (proto_type == ETH_P_ARP) {
length = XEL_ARP_PACKET_SIZE + ETH_HLEN + ETH_FCS_LEN;
- else
+ } else {
/* Field contains type other than IP or ARP, use max
* frame size and let user parse it
*/
length = ETH_FRAME_LEN + ETH_FCS_LEN;
- } else
+ }
+ } else {
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ }
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *)(addr + XEL_RXBUFF_OFFSET),
- data, length);
+ data, length);
/* Acknowledge the frame */
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
@@ -498,7 +490,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
* @dev: Pointer to the network device instance
* @address: Void pointer to the sockaddr structure
*
- * This function copies the HW address from the sockaddr strucutre to the
+ * This function copies the HW address from the sockaddr structure to the
* net_device structure and updates the address in HW.
*
* Return: Error if the net device is busy or 0 if the addr is set
@@ -599,11 +591,10 @@ static void xemaclite_rx_handler(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
- unsigned int align;
u32 len;
len = ETH_FRAME_LEN + ETH_FCS_LEN;
- skb = netdev_alloc_skb(dev, len + ALIGNMENT);
+ skb = netdev_alloc_skb(dev, len + NET_IP_ALIGN);
if (!skb) {
/* Couldn't get memory. */
dev->stats.rx_dropped++;
@@ -611,16 +602,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
return;
}
- /* A new skb should have the data halfword aligned, but this code is
- * here just in case that isn't true. Calculate how many
- * bytes we should reserve to get the data to start on a word
- * boundary
- */
- align = BUFFER_ALIGN(skb->data);
- if (align)
- skb_reserve(skb, align);
-
- skb_reserve(skb, 2);
+ skb_reserve(skb, NET_IP_ALIGN);
len = xemaclite_recv_data(lp, (u8 *)skb->data, len);
@@ -671,8 +653,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the first buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
@@ -682,8 +663,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
/* Check if the Transmission for the second buffer is completed */
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
- (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
-
+ (tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
@@ -823,10 +803,10 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
{
struct mii_bus *bus;
- int rc;
struct resource res;
struct device_node *np = of_get_parent(lp->phy_node);
struct device_node *npp;
+ int rc, ret;
/* Don't register the MDIO bus if the phy_node or its parent node
* can't be found.
@@ -836,16 +816,24 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
return -ENODEV;
}
npp = of_get_parent(np);
-
- of_address_to_resource(npp, 0, &res);
+ ret = of_address_to_resource(npp, 0, &res);
+ of_node_put(npp);
+ if (ret) {
+ dev_err(dev, "%s resource error!\n",
+ dev->of_node->full_name);
+ of_node_put(np);
+ return ret;
+ }
if (lp->ndev->mem_start != res.start) {
struct phy_device *phydev;
+
phydev = of_phy_find_device(lp->phy_node);
if (!phydev)
dev_info(dev,
"MDIO of the phy is not registered yet\n");
else
put_device(&phydev->mdio.dev);
+ of_node_put(np);
return 0;
}
@@ -858,6 +846,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus = mdiobus_alloc();
if (!bus) {
dev_err(dev, "Failed to allocate mdiobus\n");
+ of_node_put(np);
return -ENOMEM;
}
@@ -870,6 +859,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
bus->parent = dev;
rc = of_mdiobus_register(bus, np);
+ of_node_put(np);
if (rc) {
dev_err(dev, "Failed to register mdio bus.\n");
goto err_register;
@@ -926,8 +916,6 @@ static int xemaclite_open(struct net_device *dev)
xemaclite_disable_interrupts(lp);
if (lp->phy_node) {
- u32 bmcr;
-
lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
xemaclite_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
@@ -938,19 +926,6 @@ static int xemaclite_open(struct net_device *dev)
/* EmacLite doesn't support giga-bit speeds */
phy_set_max_speed(lp->phy_dev, SPEED_100);
-
- /* Don't advertise 1000BASE-T Full/Half duplex speeds */
- phy_write(lp->phy_dev, MII_CTRL1000, 0);
-
- /* Advertise only 10 and 100mbps full/half duplex speeds */
- phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL |
- ADVERTISE_CSMA);
-
- /* Restart auto negotiation */
- bmcr = phy_read(lp->phy_dev, MII_BMCR);
- bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
- phy_write(lp->phy_dev, MII_BMCR, bmcr);
-
phy_start(lp->phy_dev);
}
@@ -1085,7 +1060,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
static void xemaclite_ethtools_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *ed)
{
- strlcpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
+ strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
}
static const struct ethtool_ops xemaclite_ethtool_ops = {
@@ -1133,14 +1108,11 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->ndev = ndev;
/* Get IRQ for the device */
- res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "no IRQ found\n");
- rc = -ENXIO;
+ rc = platform_get_irq(ofdev, 0);
+ if (rc < 0)
goto error;
- }
- ndev->irq = res->start;
+ ndev->irq = rc;
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
lp->base_addr = devm_ioremap_resource(&ofdev->dev, res);
@@ -1186,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
if (rc) {
dev_err(dev,
"Cannot register network device, aborting\n");
- goto error;
+ goto put_node;
}
dev_info(dev,
@@ -1194,6 +1166,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
(unsigned long __force)ndev->mem_start, lp->base_addr, ndev->irq);
return 0;
+put_node:
+ of_node_put(lp->phy_node);
error:
free_netdev(ndev);
return rc;