aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/xilinx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/xilinx')
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig6
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h5
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c258
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_mdio.c20
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h35
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c678
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c111
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c6
8 files changed, 752 insertions, 367 deletions
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index af96e05c5bcd..8d994cebb6b0 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -6,7 +6,7 @@
config NET_VENDOR_XILINX
bool "Xilinx devices"
default y
- depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || COMPILE_TEST
+ depends on PPC || PPC32 || MICROBLAZE || ARCH_ZYNQ || MIPS || X86 || ARM || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -26,8 +26,8 @@ config XILINX_EMACLITE
config XILINX_AXI_EMAC
tristate "Xilinx 10/100/1000 AXI Ethernet support"
- depends on MICROBLAZE
- select PHYLIB
+ depends on MICROBLAZE || X86 || ARM || COMPILE_TEST
+ select PHYLINK
---help---
This driver supports the 10/100/1000 Ethernet from Xilinx for the
AXI bus interface used in Xilinx Virtex FPGAs.
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 1aeda084b8f1..276292bca334 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -361,7 +361,7 @@ struct temac_local {
/* For synchronization of indirect register access. Must be
* shared mutex between interfaces in same TEMAC block.
*/
- struct mutex *indirect_mutex;
+ spinlock_t *indirect_lock;
u32 options; /* Current options word */
int last_link;
unsigned int temac_features;
@@ -388,8 +388,9 @@ struct temac_local {
/* xilinx_temac.c */
int temac_indirect_busywait(struct temac_local *lp);
u32 temac_indirect_in32(struct temac_local *lp, int reg);
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg);
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value);
-
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value);
/* xilinx_temac_mdio.c */
int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 14870d659f7d..21c1b4322ea7 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -22,7 +22,6 @@
*
* TODO:
* - Factor out locallink DMA code into separate driver
- * - Fix multicast assignment.
* - Fix support for hardware checksumming.
* - Testing. Lots and lots of testing.
*
@@ -53,6 +52,7 @@
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
#include "ll_temac.h"
@@ -84,51 +84,118 @@ static void _temac_iow_le(struct temac_local *lp, int offset, u32 value)
return iowrite32(value, lp->regs + offset);
}
+static bool hard_acs_rdy(struct temac_local *lp)
+{
+ return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK;
+}
+
+static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
+{
+ ktime_t cur = ktime_get();
+
+ return hard_acs_rdy(lp) || ktime_after(cur, timeout);
+}
+
+/* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
+ * that was used before, and should cover MDIO bus speed down to 3200
+ * Hz.
+ */
+#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
+
+/**
+ * temac_indirect_busywait - Wait for current indirect register access
+ * to complete.
+ */
int temac_indirect_busywait(struct temac_local *lp)
{
- unsigned long end = jiffies + 2;
+ ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS);
- while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (time_before_eq(end, jiffies)) {
- WARN_ON(1);
- return -ETIMEDOUT;
- }
- usleep_range(500, 1000);
- }
- return 0;
+ spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout));
+ if (WARN_ON(!hard_acs_rdy(lp)))
+ return -ETIMEDOUT;
+ else
+ return 0;
}
/**
- * temac_indirect_in32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_in32 - Indirect register read access. This function
+ * must be called without lp->indirect_lock being held.
*/
u32 temac_indirect_in32(struct temac_local *lp, int reg)
{
- u32 val;
+ unsigned long flags;
+ int val;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+ return val;
+}
- if (temac_indirect_busywait(lp))
+/**
+ * temac_indirect_in32_locked - Indirect register read access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
+{
+ /* This initial wait should normally not spin, as we always
+ * try to wait for indirect access to complete before
+ * releasing the indirect_lock.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
+ /* Initiate read from indirect register */
temac_iow(lp, XTE_CTL0_OFFSET, reg);
- if (temac_indirect_busywait(lp))
+ /* Wait for indirect register access to complete. We really
+ * should not see timeouts, and could even end up causing
+ * problem for following indirect access, so let's make a bit
+ * of WARN noise.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return -ETIMEDOUT;
- val = temac_ior(lp, XTE_LSW0_OFFSET);
-
- return val;
+ /* Value is ready now */
+ return temac_ior(lp, XTE_LSW0_OFFSET);
}
/**
- * temac_indirect_out32
- *
- * lp->indirect_mutex must be held when calling this function
+ * temac_indirect_out32 - Indirect register write access. This function
+ * must be called without lp->indirect_lock being held.
*/
void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
{
- if (temac_indirect_busywait(lp))
+ unsigned long flags;
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, reg, value);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+}
+
+/**
+ * temac_indirect_out32_locked - Indirect register write access. This
+ * function must be called with lp->indirect_lock being held. Use
+ * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
+ * repeated lock/unlock and to ensure uninterrupted access to indirect
+ * registers.
+ */
+void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
+{
+ /* As in temac_indirect_in32_locked(), we should normally not
+ * spin here. And if it happens, we actually end up silently
+ * ignoring the write request. Ouch.
+ */
+ if (WARN_ON(temac_indirect_busywait(lp)))
return;
+ /* Initiate write to indirect register */
temac_iow(lp, XTE_LSW0_OFFSET, value);
temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
- temac_indirect_busywait(lp);
+ /* As in temac_indirect_in32_locked(), we should not see timeouts
+ * here. And if it happens, we continue before the write has
+ * completed. Not good.
+ */
+ WARN_ON(temac_indirect_busywait(lp));
}
/**
@@ -344,20 +411,21 @@ out:
static void temac_do_set_mac_address(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ unsigned long flags;
/* set up unicast MAC address filter set its mac address */
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_UAW0_OFFSET,
- (ndev->dev_addr[0]) |
- (ndev->dev_addr[1] << 8) |
- (ndev->dev_addr[2] << 16) |
- (ndev->dev_addr[3] << 24));
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET,
+ (ndev->dev_addr[0]) |
+ (ndev->dev_addr[1] << 8) |
+ (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[3] << 24));
/* There are reserved bits in EUAW1
* so don't affect them Set MAC bits [47:32] in EUAW1 */
- temac_indirect_out32(lp, XTE_UAW1_OFFSET,
- (ndev->dev_addr[4] & 0x000000ff) |
- (ndev->dev_addr[5] << 8));
- mutex_unlock(lp->indirect_mutex);
+ temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET,
+ (ndev->dev_addr[4] & 0x000000ff) |
+ (ndev->dev_addr[5] << 8));
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
}
static int temac_init_mac_address(struct net_device *ndev, const void *address)
@@ -383,49 +451,58 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- u32 multi_addr_msw, multi_addr_lsw, val;
- int i;
+ u32 multi_addr_msw, multi_addr_lsw;
+ int i = 0;
+ unsigned long flags;
+ bool promisc_mode_disabled = false;
- mutex_lock(lp->indirect_mutex);
- if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
- /*
- * We must make the kernel realise we had to move
- * into promisc mode or we start all out war on
- * the cable. If it was a promisc request the
- * flag is already set. If not we assert it.
- */
- ndev->flags |= IFF_PROMISC;
+ if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) {
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (!netdev_mc_empty(ndev)) {
+ return;
+ }
+
+ spin_lock_irqsave(lp->indirect_lock, flags);
+
+ if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- i = 0;
netdev_for_each_mc_addr(ha, ndev) {
- if (i >= MULTICAST_CAM_TABLE_NUM)
+ if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM))
break;
multi_addr_msw = ((ha->addr[3] << 24) |
(ha->addr[2] << 16) |
(ha->addr[1] << 8) |
(ha->addr[0]));
- temac_indirect_out32(lp, XTE_MAW0_OFFSET,
- multi_addr_msw);
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET,
+ multi_addr_msw);
multi_addr_lsw = ((ha->addr[5] << 8) |
(ha->addr[4]) | (i << 16));
- temac_indirect_out32(lp, XTE_MAW1_OFFSET,
- multi_addr_lsw);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET,
+ multi_addr_lsw);
i++;
}
- } else {
- val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
- temac_indirect_out32(lp, XTE_AFM_OFFSET,
- val & ~XTE_AFM_EPPRM_MASK);
- temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0);
- dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
- mutex_unlock(lp->indirect_mutex);
+
+ /* Clear all or remaining/unused address table entries */
+ while (i < MULTICAST_CAM_TABLE_NUM) {
+ temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16);
+ i++;
+ }
+
+ /* Enable address filter block if currently disabled */
+ if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET)
+ & XTE_AFM_EPPRM_MASK) {
+ temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0);
+ promisc_mode_disabled = true;
+ }
+
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
+ if (promisc_mode_disabled)
+ dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
static struct temac_option {
@@ -516,17 +593,19 @@ static u32 temac_setoptions(struct net_device *ndev, u32 options)
struct temac_local *lp = netdev_priv(ndev);
struct temac_option *tp = &temac_options[0];
int reg;
+ unsigned long flags;
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
while (tp->opt) {
- reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or;
- if (options & tp->opt)
+ reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or;
+ if (options & tp->opt) {
reg |= tp->m_or;
- temac_indirect_out32(lp, tp->reg, reg);
+ temac_indirect_out32_locked(lp, tp->reg, reg);
+ }
tp++;
}
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
lp->options |= options;
- mutex_unlock(lp->indirect_mutex);
return 0;
}
@@ -537,6 +616,7 @@ static void temac_device_reset(struct net_device *ndev)
struct temac_local *lp = netdev_priv(ndev);
u32 timeout;
u32 val;
+ unsigned long flags;
/* Perform a software reset */
@@ -545,7 +625,6 @@ static void temac_device_reset(struct net_device *ndev)
dev_dbg(&ndev->dev, "%s()\n", __func__);
- mutex_lock(lp->indirect_mutex);
/* Reset the receiver and wait for it to finish reset */
temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK);
timeout = 1000;
@@ -571,8 +650,11 @@ static void temac_device_reset(struct net_device *ndev)
}
/* Disable the receiver */
- val = temac_indirect_in32(lp, XTE_RXC1_OFFSET);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET,
+ val & ~XTE_RXC1_RXEN_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
@@ -592,12 +674,12 @@ static void temac_device_reset(struct net_device *ndev)
"temac_device_reset descriptor allocation failed\n");
}
- temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0);
- temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0);
- temac_indirect_out32(lp, XTE_TXC_OFFSET, 0);
- temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
-
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0);
+ temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
/* Sync default options with HW
* but leave receiver and transmitter disabled. */
@@ -621,13 +703,14 @@ static void temac_adjust_link(struct net_device *ndev)
struct phy_device *phy = ndev->phydev;
u32 mii_speed;
int link_state;
+ unsigned long flags;
/* hash together the state values to decide if something has changed */
link_state = phy->speed | (phy->duplex << 1) | phy->link;
- mutex_lock(lp->indirect_mutex);
if (lp->last_link != link_state) {
- mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET);
mii_speed &= ~XTE_EMCFG_LINKSPD_MASK;
switch (phy->speed) {
@@ -637,11 +720,12 @@ static void temac_adjust_link(struct net_device *ndev)
}
/* Write new speed setting out to TEMAC */
- temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed);
+ temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
+
lp->last_link = link_state;
phy_print_status(phy);
}
- mutex_unlock(lp->indirect_mutex);
}
#ifdef CONFIG_64BIT
@@ -1011,6 +1095,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
+ .ndo_set_rx_mode = temac_set_multicast_list,
.ndo_set_mac_address = temac_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = temac_ioctl,
@@ -1076,7 +1161,6 @@ static int temac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
@@ -1103,17 +1187,17 @@ static int temac_probe(struct platform_device *pdev)
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
- if (!pdata->indirect_mutex) {
+ if (!pdata->indirect_lock) {
dev_err(&pdev->dev,
- "indirect_mutex missing in platform_data\n");
+ "indirect_lock missing in platform_data\n");
return -EINVAL;
}
- lp->indirect_mutex = pdata->indirect_mutex;
+ lp->indirect_lock = pdata->indirect_lock;
} else {
- lp->indirect_mutex = devm_kmalloc(&pdev->dev,
- sizeof(*lp->indirect_mutex),
- GFP_KERNEL);
- mutex_init(lp->indirect_mutex);
+ lp->indirect_lock = devm_kmalloc(&pdev->dev,
+ sizeof(*lp->indirect_lock),
+ GFP_KERNEL);
+ spin_lock_init(lp->indirect_lock);
}
/* map device registers */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_mdio.c b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
index a4667326f745..6fd2dea4e60f 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_mdio.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_mdio.c
@@ -25,14 +25,15 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
struct temac_local *lp = bus->priv;
u32 rc;
+ unsigned long flags;
/* Write the PHY address to the MIIM Access Initiator register.
* When the transfer completes, the PHY register value will appear
* in the LSW0 register */
- mutex_lock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg);
- rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET);
- mutex_unlock(lp->indirect_mutex);
+ rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n",
phy_id, reg, rc);
@@ -43,6 +44,7 @@ static int temac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
{
struct temac_local *lp = bus->priv;
+ unsigned long flags;
dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
phy_id, reg, val);
@@ -50,10 +52,10 @@ static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val)
/* First write the desired value into the write data register
* and then write the address into the access initiator register
*/
- mutex_lock(lp->indirect_mutex);
- temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val);
- temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
- mutex_unlock(lp->indirect_mutex);
+ spin_lock_irqsave(lp->indirect_lock, flags);
+ temac_indirect_out32_locked(lp, XTE_MGTDR_OFFSET, val);
+ temac_indirect_out32_locked(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg);
+ spin_unlock_irqrestore(lp->indirect_lock, flags);
return 0;
}
@@ -87,9 +89,7 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
/* Enable the MDIO bus by asserting the enable bit and writing
* in the clock config */
- mutex_lock(lp->indirect_mutex);
temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div);
- mutex_unlock(lp->indirect_mutex);
bus = devm_mdiobus_alloc(&pdev->dev);
if (!bus)
@@ -116,10 +116,8 @@ int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev)
if (rc)
return rc;
- mutex_lock(lp->indirect_mutex);
dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n",
temac_indirect_in32(lp, XTE_MC_OFFSET));
- mutex_unlock(lp->indirect_mutex);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 011adae32b89..2dacfc85b3ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -13,6 +13,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/if_vlan.h>
+#include <linux/phylink.h>
/* Packet size info */
#define XAE_HDR_SIZE 14 /* Size of Ethernet header */
@@ -83,6 +84,8 @@
#define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */
#define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */
+#define XAXIDMA_SR_HALT_MASK 0x00000001 /* Indicates DMA channel halted */
+
#define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */
#define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */
#define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */
@@ -356,9 +359,6 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
- * @sw_id_offset: MM2S/S2MM Sw ID
- * @reserved5: Reserved and not used
- * @reserved6: Reserved and not used
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
@@ -373,11 +373,9 @@ struct axidma_bd {
u32 app1; /* TX start << 16 | insert */
u32 app2; /* TX csum seed */
u32 app3;
- u32 app4;
- u32 sw_id_offset;
- u32 reserved5;
- u32 reserved6;
-};
+ u32 app4; /* Last field used by HW */
+ struct sk_buff *skb;
+} __aligned(XAXIDMA_BD_MINIMUM_ALIGNMENT);
/**
* struct axienet_local - axienet private per device data
@@ -385,6 +383,7 @@ struct axidma_bd {
* @dev: Pointer to device structure
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
+ * @regs_start: Resource start for axienet device addresses
* @regs: Base address for the axienet_local device address space
* @dma_regs: Base address for the axidma device address space
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
@@ -422,10 +421,17 @@ struct axienet_local {
/* Connection to PHY device */
struct device_node *phy_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+
+ /* Clock for AXI bus */
+ struct clk *clk;
+
/* MDIO bus data */
struct mii_bus *mii_bus; /* MII bus reference */
/* IO registers, dma functions and IRQs */
+ resource_size_t regs_start;
void __iomem *regs;
void __iomem *dma_regs;
@@ -433,17 +439,19 @@ struct axienet_local {
int tx_irq;
int rx_irq;
+ int eth_irq;
phy_interface_t phy_mode;
u32 options; /* Current options word */
- u32 last_link;
u32 features;
/* Buffer descriptors */
struct axidma_bd *tx_bd_v;
dma_addr_t tx_bd_p;
+ u32 tx_bd_num;
struct axidma_bd *rx_bd_v;
dma_addr_t rx_bd_p;
+ u32 rx_bd_num;
u32 tx_bd_ci;
u32 tx_bd_tail;
u32 rx_bd_ci;
@@ -481,7 +489,7 @@ struct axienet_option {
*/
static inline u32 axienet_ior(struct axienet_local *lp, off_t offset)
{
- return in_be32(lp->regs + offset);
+ return ioread32(lp->regs + offset);
}
static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
@@ -501,12 +509,13 @@ static inline u32 axinet_ior_read_mcr(struct axienet_local *lp)
static inline void axienet_iow(struct axienet_local *lp, off_t offset,
u32 value)
{
- out_be32((lp->regs + offset), value);
+ iowrite32(value, lp->regs + offset);
}
/* Function prototypes visible in xilinx_axienet_mdio.c for other files */
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np);
-int axienet_mdio_wait_until_ready(struct axienet_local *lp);
+int axienet_mdio_enable(struct axienet_local *lp);
+void axienet_mdio_disable(struct axienet_local *lp);
+int axienet_mdio_setup(struct axienet_local *lp);
void axienet_mdio_teardown(struct axienet_local *lp);
#endif /* XILINX_AXI_ENET_H */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 831967f6eff8..4fc627fb4d11 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -7,6 +7,7 @@
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
@@ -21,6 +22,7 @@
* - Add support for extended VLAN support.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
@@ -38,16 +40,18 @@
#include "xilinx_axienet.h"
-/* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */
-#define TX_BD_NUM 64
-#define RX_BD_NUM 128
+/* Descriptors defines for Tx and Rx DMA */
+#define TX_BD_NUM_DEFAULT 64
+#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MAX 4096
+#define RX_BD_NUM_MAX 4096
/* Must be shorter than length of ethtool_drvinfo.driver field to fit */
#define DRIVER_NAME "xaxienet"
#define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
#define DRIVER_VERSION "1.00a"
-#define AXIENET_REGS_N 32
+#define AXIENET_REGS_N 40
/* Match table for of_platform binding */
static const struct of_device_id axienet_of_match[] = {
@@ -125,7 +129,7 @@ static struct axienet_option axienet_options[] = {
*/
static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
{
- return in_be32(lp->dma_regs + reg);
+ return ioread32(lp->dma_regs + reg);
}
/**
@@ -140,7 +144,7 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
static inline void axienet_dma_out32(struct axienet_local *lp,
off_t reg, u32 value)
{
- out_be32((lp->dma_regs + reg), value);
+ iowrite32(value, lp->dma_regs + reg);
}
/**
@@ -156,22 +160,21 @@ static void axienet_dma_bd_release(struct net_device *ndev)
int i;
struct axienet_local *lp = netdev_priv(ndev);
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys,
lp->max_frm_size, DMA_FROM_DEVICE);
- dev_kfree_skb((struct sk_buff *)
- (lp->rx_bd_v[i].sw_id_offset));
+ dev_kfree_skb(lp->rx_bd_v[i].skb);
}
if (lp->rx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v,
lp->rx_bd_p);
}
if (lp->tx_bd_v) {
dma_free_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v,
lp->tx_bd_p);
}
@@ -201,33 +204,33 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/* Allocate the Tx and Rx buffer descriptors. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) *
- ((i + 1) % TX_BD_NUM);
+ ((i + 1) % lp->tx_bd_num);
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p +
sizeof(*lp->rx_bd_v) *
- ((i + 1) % RX_BD_NUM);
+ ((i + 1) % lp->rx_bd_num);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb)
goto out;
- lp->rx_bd_v[i].sw_id_offset = (u32) skb;
+ lp->rx_bd_v[i].skb = skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
skb->data,
lp->max_frm_size,
@@ -269,7 +272,7 @@ static int axienet_dma_bd_init(struct net_device *ndev)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -434,17 +437,20 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
-static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
+static void __axienet_device_reset(struct axienet_local *lp)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
* commands/transfers will be flushed or completed during this
* reset process.
+ * Note that even though both TX and RX have their own reset register,
+ * they both reset the entire DMA core, so only one needs to be used.
*/
- axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) {
+ while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
+ XAXIDMA_CR_RESET_MASK) {
udelay(1);
if (--timeout == 0) {
netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
@@ -470,8 +476,7 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
+ __axienet_device_reset(lp);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@@ -498,6 +503,8 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
@@ -514,63 +521,6 @@ static void axienet_device_reset(struct net_device *ndev)
}
/**
- * axienet_adjust_link - Adjust the PHY link speed/duplex.
- * @ndev: Pointer to the net_device structure
- *
- * This function is called to change the speed and duplex setting after
- * auto negotiation is done by the PHY. This is the function that gets
- * registered with the PHY interface through the "of_phy_connect" call.
- */
-static void axienet_adjust_link(struct net_device *ndev)
-{
- u32 emmc_reg;
- u32 link_state;
- u32 setspeed = 1;
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = ndev->phydev;
-
- link_state = phy->speed | (phy->duplex << 1) | phy->link;
- if (lp->last_link != link_state) {
- if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) {
- if (lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX)
- setspeed = 0;
- } else {
- if ((phy->speed == SPEED_1000) &&
- (lp->phy_mode == PHY_INTERFACE_MODE_MII))
- setspeed = 0;
- }
-
- if (setspeed == 1) {
- emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
- emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
-
- switch (phy->speed) {
- case SPEED_1000:
- emmc_reg |= XAE_EMMC_LINKSPD_1000;
- break;
- case SPEED_100:
- emmc_reg |= XAE_EMMC_LINKSPD_100;
- break;
- case SPEED_10:
- emmc_reg |= XAE_EMMC_LINKSPD_10;
- break;
- default:
- dev_err(&ndev->dev, "Speed other than 10, 100 "
- "or 1Gbps is not supported\n");
- break;
- }
-
- axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
- lp->last_link = link_state;
- phy_print_status(phy);
- } else {
- netdev_err(ndev,
- "Error setting Axi Ethernet mac speed\n");
- }
- }
-}
-
-/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -595,26 +545,31 @@ static void axienet_start_xmit_done(struct net_device *ndev)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_consume_skb_irq((struct sk_buff *)cur_p->app4);
+ if (cur_p->skb)
+ dev_consume_skb_irq(cur_p->skb);
/*cur_p->phys = 0;*/
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
cur_p->status = 0;
+ cur_p->skb = NULL;
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- ++lp->tx_bd_ci;
- lp->tx_bd_ci %= TX_BD_NUM;
+ if (++lp->tx_bd_ci >= lp->tx_bd_num)
+ lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
ndev->stats.tx_packets += packets;
ndev->stats.tx_bytes += size;
+
+ /* Matches barrier in axienet_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -635,7 +590,7 @@ static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
int num_frag)
{
struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM];
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
return NETDEV_TX_BUSY;
return 0;
@@ -670,9 +625,19 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (axienet_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -695,8 +660,8 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -707,13 +672,13 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
- cur_p->app4 = (unsigned long)skb;
+ cur_p->skb = skb;
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- ++lp->tx_bd_tail;
- lp->tx_bd_tail %= TX_BD_NUM;
+ if (++lp->tx_bd_tail >= lp->tx_bd_num)
+ lp->tx_bd_tail = 0;
return NETDEV_TX_OK;
}
@@ -742,13 +707,15 @@ static void axienet_recv(struct net_device *ndev)
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- skb = (struct sk_buff *) (cur_p->sw_id_offset);
- length = cur_p->app4 & 0x0000FFFF;
dma_unmap_single(ndev->dev.parent, cur_p->phys,
lp->max_frm_size,
DMA_FROM_DEVICE);
+ skb = cur_p->skb;
+ cur_p->skb = NULL;
+ length = cur_p->app4 & 0x0000FFFF;
+
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
/*skb_checksum_none_assert(skb);*/
@@ -783,10 +750,10 @@ static void axienet_recv(struct net_device *ndev)
DMA_FROM_DEVICE);
cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0;
- cur_p->sw_id_offset = (u32) new_skb;
+ cur_p->skb = new_skb;
- ++lp->rx_bd_ci;
- lp->rx_bd_ci %= RX_BD_NUM;
+ if (++lp->rx_bd_ci >= lp->rx_bd_num)
+ lp->rx_bd_ci = 0;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
@@ -802,7 +769,7 @@ static void axienet_recv(struct net_device *ndev)
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done"
* to complete the BD processing.
@@ -821,7 +788,7 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Tx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -851,7 +818,7 @@ out:
* @irq: irq number
* @_ndev: net_device pointer
*
- * Return: IRQ_HANDLED for all cases.
+ * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
*
* This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD
* processing.
@@ -870,7 +837,7 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
goto out;
}
if (!(status & XAXIDMA_IRQ_ALL_MASK))
- dev_err(&ndev->dev, "No interrupts asserted in Rx path\n");
+ return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n",
@@ -895,6 +862,35 @@ out:
return IRQ_HANDLED;
}
+/**
+ * axienet_eth_irq - Ethernet core Isr.
+ * @irq: irq number
+ * @_ndev: net_device pointer
+ *
+ * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
+ *
+ * Handle miscellaneous conditions indicated by Ethernet core IRQ.
+ */
+static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
+{
+ struct net_device *ndev = _ndev;
+ struct axienet_local *lp = netdev_priv(ndev);
+ unsigned int pending;
+
+ pending = axienet_ior(lp, XAE_IP_OFFSET);
+ if (!pending)
+ return IRQ_NONE;
+
+ if (pending & XAE_INT_RXFIFOOVR_MASK)
+ ndev->stats.rx_missed_errors++;
+
+ if (pending & XAE_INT_RXRJECT_MASK)
+ ndev->stats.rx_frame_errors++;
+
+ axienet_iow(lp, XAE_IS_OFFSET, pending);
+ return IRQ_HANDLED;
+}
+
static void axienet_dma_err_handler(unsigned long data);
/**
@@ -904,67 +900,72 @@ static void axienet_dma_err_handler(unsigned long data);
* Return: 0, on success.
* non-zero error value on failure
*
- * This is the driver open routine. It calls phy_start to start the PHY device.
+ * This is the driver open routine. It calls phylink_start to start the
+ * PHY device.
* It also allocates interrupt service routines, enables the interrupt lines
* and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
* descriptors are initialized.
*/
static int axienet_open(struct net_device *ndev)
{
- int ret, mdio_mcreg;
+ int ret;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- ret = axienet_mdio_wait_until_ready(lp);
- if (ret < 0)
- return ret;
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. If MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK)));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
axienet_device_reset(ndev);
- /* Enable the MDIO */
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- ret = axienet_mdio_wait_until_ready(lp);
+ ret = axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
if (ret < 0)
return ret;
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0, lp->phy_mode);
-
- if (!phydev)
- dev_err(lp->dev, "of_phy_connect() failed\n");
- else
- phy_start(phydev);
+ ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
+ if (ret) {
+ dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
+ return ret;
}
+ phylink_start(lp->phylink);
+
/* Enable tasklets for Axi DMA error handling */
tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler,
(unsigned long) lp);
/* Enable interrupts for Axi DMA Tx */
- ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_tx_irq;
/* Enable interrupts for Axi DMA Rx */
- ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev);
+ ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
+ ndev->name, ndev);
if (ret)
goto err_rx_irq;
+ /* Enable interrupts for Axi Ethernet core (if defined) */
+ if (lp->eth_irq > 0) {
+ ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
+ ndev->name, ndev);
+ if (ret)
+ goto err_eth_irq;
+ }
return 0;
+err_eth_irq:
+ free_irq(lp->rx_irq, ndev);
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (phydev)
- phy_disconnect(phydev);
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
@@ -976,34 +977,61 @@ err_tx_irq:
*
* Return: 0, on success.
*
- * This is the driver stop routine. It calls phy_disconnect to stop the PHY
+ * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
* device. It also removes the interrupt handlers and disables the interrupts.
* The Axi DMA Tx/Rx BDs are released.
*/
static int axienet_stop(struct net_device *ndev)
{
- u32 cr;
+ u32 cr, sr;
+ int count;
struct axienet_local *lp = netdev_priv(ndev);
dev_dbg(&ndev->dev, "axienet_close()\n");
- cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
- cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
- axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET,
- cr & (~XAXIDMA_CR_RUNSTOP_MASK));
+ phylink_stop(lp->phylink);
+ phylink_disconnect_phy(lp->phylink);
+
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
+ cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
+
+ cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
+ axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
+
+ axienet_iow(lp, XAE_IE_OFFSET, 0);
+
+ /* Give DMAs a chance to halt gracefully */
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ }
+
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
+ msleep(20);
+ sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ }
+
+ /* Do a reset to ensure DMA is really stopped */
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
+
tasklet_kill(&lp->dma_err_tasklet);
+ if (lp->eth_irq > 0)
+ free_irq(lp->eth_irq, ndev);
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
-
axienet_dma_bd_release(ndev);
return 0;
}
@@ -1151,6 +1179,48 @@ static void axienet_ethtools_get_regs(struct net_device *ndev,
data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
+ data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
+ data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
+ data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
+ data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
+ data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
+ data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
+ data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
+ data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
+}
+
+static void axienet_ethtools_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ ering->rx_max_pending = RX_BD_NUM_MAX;
+ ering->rx_mini_max_pending = 0;
+ ering->rx_jumbo_max_pending = 0;
+ ering->tx_max_pending = TX_BD_NUM_MAX;
+ ering->rx_pending = lp->rx_bd_num;
+ ering->rx_mini_pending = 0;
+ ering->rx_jumbo_pending = 0;
+ ering->tx_pending = lp->tx_bd_num;
+}
+
+static int axienet_ethtools_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ if (ering->rx_pending > RX_BD_NUM_MAX ||
+ ering->rx_mini_pending ||
+ ering->rx_jumbo_pending ||
+ ering->rx_pending > TX_BD_NUM_MAX)
+ return -EINVAL;
+
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ lp->rx_bd_num = ering->rx_pending;
+ lp->tx_bd_num = ering->tx_pending;
+ return 0;
}
/**
@@ -1166,12 +1236,9 @@ static void
axienet_ethtools_get_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval;
struct axienet_local *lp = netdev_priv(ndev);
- epauseparm->autoneg = 0;
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK;
- epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK;
+
+ phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1190,27 +1257,9 @@ static int
axienet_ethtools_set_pauseparam(struct net_device *ndev,
struct ethtool_pauseparam *epauseparm)
{
- u32 regval = 0;
struct axienet_local *lp = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
- }
-
- regval = axienet_ior(lp, XAE_FCC_OFFSET);
- if (epauseparm->tx_pause)
- regval |= XAE_FCC_FCTX_MASK;
- else
- regval &= ~XAE_FCC_FCTX_MASK;
- if (epauseparm->rx_pause)
- regval |= XAE_FCC_FCRX_MASK;
- else
- regval &= ~XAE_FCC_FCRX_MASK;
- axienet_iow(lp, XAE_FCC_OFFSET, regval);
-
- return 0;
+ return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
}
/**
@@ -1289,17 +1338,170 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
return 0;
}
+static int
+axienet_ethtools_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(lp->phylink, cmd);
+}
+
+static int
+axienet_ethtools_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct axienet_local *lp = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(lp->phylink, cmd);
+}
+
static const struct ethtool_ops axienet_ethtool_ops = {
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
.get_link = ethtool_op_get_link,
+ .get_ringparam = axienet_ethtools_get_ringparam,
+ .set_ringparam = axienet_ethtools_set_ringparam,
.get_pauseparam = axienet_ethtools_get_pauseparam,
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = axienet_ethtools_get_link_ksettings,
+ .set_link_ksettings = axienet_ethtools_set_link_ksettings,
+};
+
+static void axienet_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ /* Only support the mode we are configured for */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != lp->phy_mode) {
+ netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
+ phy_modes(state->interface),
+ phy_modes(lp->phy_mode));
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
+
+ phylink_set(mask, Autoneg);
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, Pause);
+ phylink_set(mask, 1000baseX_Full);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static int axienet_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ state->interface = lp->phy_mode;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ if (emmc_reg & XAE_EMMC_LINKSPD_1000)
+ state->speed = SPEED_1000;
+ else if (emmc_reg & XAE_EMMC_LINKSPD_100)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+
+ state->pause = 0;
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (fcc_reg & XAE_FCC_FCTX_MASK)
+ state->pause |= MLO_PAUSE_TX;
+ if (fcc_reg & XAE_FCC_FCRX_MASK)
+ state->pause |= MLO_PAUSE_RX;
+
+ state->an_complete = 0;
+ state->duplex = 1;
+
+ return 1;
+}
+
+static void axienet_mac_an_restart(struct phylink_config *config)
+{
+ /* Unsupported, do nothing */
+}
+
+static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct axienet_local *lp = netdev_priv(ndev);
+ u32 emmc_reg, fcc_reg;
+
+ emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
+ emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
+
+ switch (state->speed) {
+ case SPEED_1000:
+ emmc_reg |= XAE_EMMC_LINKSPD_1000;
+ break;
+ case SPEED_100:
+ emmc_reg |= XAE_EMMC_LINKSPD_100;
+ break;
+ case SPEED_10:
+ emmc_reg |= XAE_EMMC_LINKSPD_10;
+ break;
+ default:
+ dev_err(&ndev->dev,
+ "Speed other than 10, 100 or 1Gbps is not supported\n");
+ break;
+ }
+
+ axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
+
+ fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
+ if (state->pause & MLO_PAUSE_TX)
+ fcc_reg |= XAE_FCC_FCTX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCTX_MASK;
+ if (state->pause & MLO_PAUSE_RX)
+ fcc_reg |= XAE_FCC_FCRX_MASK;
+ else
+ fcc_reg &= ~XAE_FCC_FCRX_MASK;
+ axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
+}
+
+static void axienet_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ /* nothing meaningful to do */
+}
+
+static void axienet_mac_link_up(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phy)
+{
+ /* nothing meaningful to do */
+}
+
+static const struct phylink_mac_ops axienet_phylink_ops = {
+ .validate = axienet_validate,
+ .mac_link_state = axienet_mac_link_state,
+ .mac_an_restart = axienet_mac_an_restart,
+ .mac_config = axienet_mac_config,
+ .mac_link_down = axienet_mac_link_down,
+ .mac_link_up = axienet_mac_link_up,
};
/**
@@ -1313,38 +1515,33 @@ static void axienet_dma_err_handler(unsigned long data)
{
u32 axienet_status;
u32 cr, i;
- int mdio_mcreg;
struct axienet_local *lp = (struct axienet_local *) data;
struct net_device *ndev = lp->ndev;
struct axidma_bd *cur_p;
axienet_setoptions(ndev, lp->options &
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
- mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
- axienet_mdio_wait_until_ready(lp);
/* Disable the MDIO interface till Axi Ethernet Reset is completed.
* When we do an Axi Ethernet reset, it resets the complete core
- * including the MDIO. So if MDIO is not disabled when the reset
- * process is started, MDIO will be broken afterwards.
+ * including the MDIO. MDIO must be disabled before resetting
+ * and re-enabled afterwards.
+ * Hold MDIO bus lock to avoid MDIO accesses during the reset.
*/
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
- ~XAE_MDIO_MC_MDIOEN_MASK));
+ mutex_lock(&lp->mii_bus->mdio_lock);
+ axienet_mdio_disable(lp);
+ __axienet_device_reset(lp);
+ axienet_mdio_enable(lp);
+ mutex_unlock(&lp->mii_bus->mdio_lock);
- __axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
- __axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
-
- axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
- axienet_mdio_wait_until_ready(lp);
-
- for (i = 0; i < TX_BD_NUM; i++) {
+ for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i];
if (cur_p->phys)
dma_unmap_single(ndev->dev.parent, cur_p->phys,
(cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE);
- if (cur_p->app4)
- dev_kfree_skb_irq((struct sk_buff *) cur_p->app4);
+ if (cur_p->skb)
+ dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0;
cur_p->cntrl = 0;
cur_p->status = 0;
@@ -1353,10 +1550,10 @@ static void axienet_dma_err_handler(unsigned long data)
cur_p->app2 = 0;
cur_p->app3 = 0;
cur_p->app4 = 0;
- cur_p->sw_id_offset = 0;
+ cur_p->skb = NULL;
}
- for (i = 0; i < RX_BD_NUM; i++) {
+ for (i = 0; i < lp->rx_bd_num; i++) {
cur_p = &lp->rx_bd_v[i];
cur_p->status = 0;
cur_p->app0 = 0;
@@ -1404,7 +1601,7 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET,
cr | XAXIDMA_CR_RUNSTOP_MASK);
axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
- (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
/* Write to the RS (Run-stop) bit in the Tx channel control register.
* Tx channel is now ready to run. But only after we write to the
@@ -1422,6 +1619,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
if (axienet_status & XAE_INT_RXRJECT_MASK)
axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
+ axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
+ XAE_INT_RECV_ERROR_MASK : 0);
axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
/* Sync default options with HW but leave receiver and
@@ -1453,7 +1652,7 @@ static int axienet_probe(struct platform_device *pdev)
struct axienet_local *lp;
struct net_device *ndev;
const void *mac_addr;
- struct resource *ethres, dmares;
+ struct resource *ethres;
u32 value;
ndev = alloc_etherdev(sizeof(*lp));
@@ -1476,6 +1675,8 @@ static int axienet_probe(struct platform_device *pdev)
lp->ndev = ndev;
lp->dev = &pdev->dev;
lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
/* Map device registers */
ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
@@ -1484,6 +1685,7 @@ static int axienet_probe(struct platform_device *pdev)
ret = PTR_ERR(lp->regs);
goto free_netdev;
}
+ lp->regs_start = ethres->start;
/* Setup checksum offload, but default to off if not specified */
lp->features = 0;
@@ -1568,38 +1770,56 @@ static int axienet_probe(struct platform_device *pdev)
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not find DMA node\n");
- ret = -ENODEV;
- goto free_netdev;
- }
- ret = of_address_to_resource(np, 0, &dmares);
- if (ret) {
- dev_err(&pdev->dev, "unable to get DMA resource\n");
+ if (np) {
+ struct resource dmares;
+
+ ret = of_address_to_resource(np, 0, &dmares);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "unable to get DMA resource\n");
+ of_node_put(np);
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev,
+ &dmares);
+ lp->rx_irq = irq_of_parse_and_map(np, 1);
+ lp->tx_irq = irq_of_parse_and_map(np, 0);
of_node_put(np);
- goto free_netdev;
+ lp->eth_irq = platform_get_irq(pdev, 0);
+ } else {
+ /* Check for these resources directly on the Ethernet node. */
+ struct resource *res = platform_get_resource(pdev,
+ IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get DMA memory resource\n");
+ goto free_netdev;
+ }
+ lp->dma_regs = devm_ioremap_resource(&pdev->dev, res);
+ lp->rx_irq = platform_get_irq(pdev, 1);
+ lp->tx_irq = platform_get_irq(pdev, 0);
+ lp->eth_irq = platform_get_irq(pdev, 2);
}
- lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
if (IS_ERR(lp->dma_regs)) {
dev_err(&pdev->dev, "could not map DMA regs\n");
ret = PTR_ERR(lp->dma_regs);
- of_node_put(np);
goto free_netdev;
}
- lp->rx_irq = irq_of_parse_and_map(np, 1);
- lp->tx_irq = irq_of_parse_and_map(np, 0);
- of_node_put(np);
if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
dev_err(&pdev->dev, "could not determine irqs\n");
ret = -ENOMEM;
goto free_netdev;
}
+ /* Check for Ethernet core IRQ (optional) */
+ if (lp->eth_irq <= 0)
+ dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
+
/* Retrieve the MAC address */
mac_addr = of_get_mac_address(pdev->dev.of_node);
if (IS_ERR(mac_addr)) {
- dev_err(&pdev->dev, "could not find MAC address\n");
- goto free_netdev;
+ dev_warn(&pdev->dev, "could not find MAC address property: %ld\n",
+ PTR_ERR(mac_addr));
+ mac_addr = NULL;
}
axienet_set_mac_address(ndev, mac_addr);
@@ -1608,9 +1828,36 @@ static int axienet_probe(struct platform_device *pdev)
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
- ret = axienet_mdio_setup(lp, pdev->dev.of_node);
+ lp->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(lp->clk)) {
+ dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+ PTR_ERR(lp->clk));
+ lp->clk = NULL;
+ } else {
+ ret = clk_prepare_enable(lp->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+ ret);
+ goto free_netdev;
+ }
+ }
+
+ ret = axienet_mdio_setup(lp);
if (ret)
- dev_warn(&pdev->dev, "error registering MDIO bus\n");
+ dev_warn(&pdev->dev,
+ "error registering MDIO bus: %d\n", ret);
+ }
+
+ lp->phylink_config.dev = &ndev->dev;
+ lp->phylink_config.type = PHYLINK_NETDEV;
+
+ lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
+ lp->phy_mode,
+ &axienet_phylink_ops);
+ if (IS_ERR(lp->phylink)) {
+ ret = PTR_ERR(lp->phylink);
+ dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
+ goto free_netdev;
}
ret = register_netdev(lp->ndev);
@@ -1632,9 +1879,16 @@ static int axienet_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct axienet_local *lp = netdev_priv(ndev);
- axienet_mdio_teardown(lp);
unregister_netdev(ndev);
+ if (lp->phylink)
+ phylink_destroy(lp->phylink);
+
+ axienet_mdio_teardown(lp);
+
+ if (lp->clk)
+ clk_disable_unprepare(lp->clk);
+
of_node_put(lp->phy_node);
lp->phy_node = NULL;
@@ -1643,9 +1897,23 @@ static int axienet_remove(struct platform_device *pdev)
return 0;
}
+static void axienet_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ rtnl_lock();
+ netif_device_detach(ndev);
+
+ if (netif_running(ndev))
+ dev_close(ndev);
+
+ rtnl_unlock();
+}
+
static struct platform_driver axienet_driver = {
.probe = axienet_probe,
.remove = axienet_remove,
+ .shutdown = axienet_shutdown,
.driver = {
.name = "xilinx_axienet",
.of_match_table = axienet_of_match,
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 704babdbc8a2..435ed308d990 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -5,9 +5,11 @@
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
* Copyright (c) 2010 - 2011 PetaLogix
+ * Copyright (c) 2019 SED Systems, a division of Calian Ltd.
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/of_address.h>
#include <linux/of_mdio.h>
#include <linux/jiffies.h>
@@ -16,10 +18,10 @@
#include "xilinx_axienet.h"
#define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */
-#define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT
+#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
/* Wait till MDIO interface is ready to accept a new transaction.*/
-int axienet_mdio_wait_until_ready(struct axienet_local *lp)
+static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
@@ -112,23 +114,42 @@ static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg,
}
/**
- * axienet_mdio_setup - MDIO setup function
+ * axienet_mdio_enable - MDIO hardware setup function
* @lp: Pointer to axienet local data structure.
- * @np: Pointer to device node
*
- * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
- * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ * Return: 0 on success, -ETIMEDOUT on a timeout.
*
* Sets up the MDIO interface by initializing the MDIO clock and enabling the
- * MDIO interface in hardware. Register the MDIO interface.
+ * MDIO interface in hardware.
**/
-int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
+int axienet_mdio_enable(struct axienet_local *lp)
{
- int ret;
u32 clk_div, host_clock;
- struct mii_bus *bus;
- struct resource res;
- struct device_node *np1;
+
+ if (lp->clk) {
+ host_clock = clk_get_rate(lp->clk);
+ } else {
+ struct device_node *np1;
+
+ /* Legacy fallback: detect CPU clock frequency and use as AXI
+ * bus clock frequency. This only works on certain platforms.
+ */
+ np1 = of_find_node_by_name(NULL, "cpu");
+ if (!np1) {
+ netdev_warn(lp->ndev, "Could not find CPU device node.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ } else {
+ int ret = of_property_read_u32(np1, "clock-frequency",
+ &host_clock);
+ if (ret) {
+ netdev_warn(lp->ndev, "CPU clock-frequency property not found.\n");
+ host_clock = DEFAULT_HOST_CLOCK;
+ }
+ of_node_put(np1);
+ }
+ netdev_info(lp->ndev, "Setting assumed host clock to %u\n",
+ host_clock);
+ }
/* clk_div can be calculated by deriving it from the equation:
* fMDIO = fHOST / ((1 + clk_div) * 2)
@@ -155,25 +176,6 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
* "clock-frequency" from the CPU
*/
- np1 = of_find_node_by_name(NULL, "cpu");
- if (!np1) {
- netdev_warn(lp->ndev, "Could not find CPU device node.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- goto issue;
- }
- if (of_property_read_u32(np1, "clock-frequency", &host_clock)) {
- netdev_warn(lp->ndev, "clock-frequency property not found.\n");
- netdev_warn(lp->ndev,
- "Setting MDIO clock divisor to default %d\n",
- DEFAULT_CLOCK_DIVISOR);
- clk_div = DEFAULT_CLOCK_DIVISOR;
- of_node_put(np1);
- goto issue;
- }
-
clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1;
/* If there is any remainder from the division of
* fHOST / (MAX_MDIO_FREQ * 2), then we need to add
@@ -186,12 +188,39 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
"Setting MDIO clock divisor to %u/%u Hz host clock.\n",
clk_div, host_clock);
- of_node_put(np1);
-issue:
- axienet_iow(lp, XAE_MDIO_MC_OFFSET,
- (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK));
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, clk_div | XAE_MDIO_MC_MDIOEN_MASK);
- ret = axienet_mdio_wait_until_ready(lp);
+ return axienet_mdio_wait_until_ready(lp);
+}
+
+/**
+ * axienet_mdio_disable - MDIO hardware disable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Disable the MDIO interface in hardware.
+ **/
+void axienet_mdio_disable(struct axienet_local *lp)
+{
+ axienet_iow(lp, XAE_MDIO_MC_OFFSET, 0);
+}
+
+/**
+ * axienet_mdio_setup - MDIO setup function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Return: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when
+ * mdiobus_alloc (to allocate memory for mii bus structure) fails.
+ *
+ * Sets up the MDIO interface by initializing the MDIO clock and enabling the
+ * MDIO interface in hardware. Register the MDIO interface.
+ **/
+int axienet_mdio_setup(struct axienet_local *lp)
+{
+ struct device_node *mdio_node;
+ struct mii_bus *bus;
+ int ret;
+
+ ret = axienet_mdio_enable(lp);
if (ret < 0)
return ret;
@@ -199,10 +228,8 @@ issue:
if (!bus)
return -ENOMEM;
- np1 = of_get_parent(lp->phy_node);
- of_address_to_resource(np1, 0, &res);
- snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long) res.start);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "axienet-%.8llx",
+ (unsigned long long)lp->regs_start);
bus->priv = lp;
bus->name = "Xilinx Axi Ethernet MDIO";
@@ -211,7 +238,9 @@ issue:
bus->parent = lp->dev;
lp->mii_bus = bus;
- ret = of_mdiobus_register(bus, np1);
+ mdio_node = of_get_child_by_name(lp->dev->of_node, "mdio");
+ ret = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
if (ret) {
mdiobus_free(bus);
lp->mii_bus = NULL;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index c409bab63bd3..0de52e70abcc 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Xilinx EmacLite Linux driver for the Xilinx Ethernet MAC Lite device.
*
@@ -5,11 +6,6 @@
* driver from John Williams <john.williams@xilinx.com>.
*
* 2007 - 2013 (c) Xilinx, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/module.h>