aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/cadence/macb_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/cadence/macb_main.c')
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c1363
1 files changed, 1000 insertions, 363 deletions
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 2c28da1737fe..4f63f1ba3161 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -23,7 +23,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
@@ -35,7 +34,11 @@
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/reset.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -85,6 +88,9 @@ struct sifive_fu540_macb_mgmt {
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
+#define HS_SPEED_10000M 4
+#define MACB_SERDES_RATE_10G 1
+
/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
@@ -311,7 +317,7 @@ static void macb_get_hwaddr(struct macb *bp)
addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
return;
}
}
@@ -333,7 +339,7 @@ static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
if (status < 0)
goto mdio_pm_exit;
@@ -385,7 +391,7 @@ static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
struct macb *bp = bus->priv;
int status;
- status = pm_runtime_get_sync(&bp->pdev->dev);
+ status = pm_runtime_resume_and_get(&bp->pdev->dev);
if (status < 0)
goto mdio_pm_exit;
@@ -454,15 +460,18 @@ static void macb_init_buffers(struct macb *bp)
/**
* macb_set_tx_clk() - Set a clock to a new frequency
- * @clk Pointer to the clock to change
- * @rate New frequency in Hz
- * @dev Pointer to the struct net_device
+ * @bp: pointer to struct macb
+ * @speed: New frequency in Hz
*/
-static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
+static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!clk)
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
+ return;
+
+ /* In case of MII the PHY is the clock master */
+ if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
return;
switch (speed) {
@@ -479,7 +488,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
return;
}
- rate_rounded = clk_round_rate(clk, rate);
+ rate_rounded = clk_round_rate(bp->tx_clk, rate);
if (rate_rounded < 0)
return;
@@ -489,76 +498,92 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = abs(rate_rounded - rate);
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
- netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+ netdev_warn(bp->dev,
+ "unable to generate target frequency: %ld Hz\n",
rate);
- if (clk_set_rate(clk, rate_rounded))
- netdev_err(dev, "adjusting tx_clk failed.\n");
+ if (clk_set_rate(bp->tx_clk, rate_rounded))
+ netdev_err(bp->dev, "adjusting tx_clk failed.\n");
}
-static void macb_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex)
{
- struct net_device *ndev = to_net_dev(config->dev);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- struct macb *bp = netdev_priv(ndev);
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
+ u32 config;
- /* We only support MII, RMII, GMII, RGMII & SGMII. */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_RMII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
- }
+ config = gem_readl(bp, USX_CONTROL);
+ config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
+ config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
+ config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
+ config |= GEM_BIT(TX_EN);
+ gem_writel(bp, USX_CONTROL, config);
+}
- if (!macb_is_gem(bp) &&
- (state->interface == PHY_INTERFACE_MODE_GMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
- }
+static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
+ u32 val;
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- phylink_set(mask, Asym_Pause);
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
+ val = gem_readl(bp, USX_STATUS);
+ state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
+ val = gem_readl(bp, NCFGR);
+ if (val & GEM_BIT(PAE))
+ state->pause = MLO_PAUSE_RX;
+}
- if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
- (state->interface == PHY_INTERFACE_MODE_NA ||
- state->interface == PHY_INTERFACE_MODE_GMII ||
- state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_rgmii(state->interface))) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
+static int macb_usx_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
- if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
- phylink_set(mask, 1000baseT_Half);
- }
+ gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
+ GEM_BIT(SIGNAL_OK));
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return 0;
}
-static void macb_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
+static void macb_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
{
state->link = 0;
}
-static void macb_mac_an_restart(struct phylink_config *config)
+static void macb_pcs_an_restart(struct phylink_pcs *pcs)
{
/* Not supported */
}
+static int macb_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
+ .pcs_get_state = macb_usx_pcs_get_state,
+ .pcs_config = macb_usx_pcs_config,
+ .pcs_link_up = macb_usx_pcs_link_up,
+};
+
+static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
+ .pcs_get_state = macb_pcs_get_state,
+ .pcs_an_restart = macb_pcs_an_restart,
+ .pcs_config = macb_pcs_config,
+};
+
static void macb_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -566,41 +591,53 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
struct macb *bp = netdev_priv(ndev);
unsigned long flags;
u32 old_ctrl, ctrl;
+ u32 old_ncr, ncr;
spin_lock_irqsave(&bp->lock, flags);
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
-
- /* Clear all the bits we might set later */
- ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE));
+ old_ncr = ncr = macb_or_gem_readl(bp, NCR);
if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
if (state->interface == PHY_INTERFACE_MODE_RMII)
ctrl |= MACB_BIT(RM9200_RMII);
- } else {
- ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+ } else if (macb_is_gem(bp)) {
+ ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+ ncr &= ~GEM_BIT(ENABLE_HS_MAC);
- /* We do not support MLO_PAUSE_RX yet */
- if (state->pause & MLO_PAUSE_TX)
- ctrl |= MACB_BIT(PAE);
-
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
+ } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
+ ctrl |= GEM_BIT(PCSSEL);
+ ncr |= GEM_BIT(ENABLE_HS_MAC);
+ } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
+ bp->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ncr |= MACB_BIT(MIIONRGMII);
+ }
}
- if (state->speed == SPEED_1000)
- ctrl |= GEM_BIT(GBE);
- else if (state->speed == SPEED_100)
- ctrl |= MACB_BIT(SPD);
-
- if (state->duplex)
- ctrl |= MACB_BIT(FD);
-
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
- bp->speed = state->speed;
+ if (old_ncr ^ ncr)
+ macb_or_gem_writel(bp, NCR, ncr);
+
+ /* Disable AN for SGMII fixed link configuration, enable otherwise.
+ * Must be written after PCSSEL is set in NCFGR,
+ * otherwise writes will not take effect.
+ */
+ if (macb_is_gem(bp) && state->interface == PHY_INTERFACE_MODE_SGMII) {
+ u32 pcsctrl, old_pcsctrl;
+
+ old_pcsctrl = gem_readl(bp, PCSCNTRL);
+ if (mode == MLO_AN_FIXED)
+ pcsctrl = old_pcsctrl & ~GEM_BIT(PCSAUTONEG);
+ else
+ pcsctrl = old_pcsctrl | GEM_BIT(PCSAUTONEG);
+ if (old_pcsctrl != pcsctrl)
+ gem_writel(bp, PCSCNTRL, pcsctrl);
+ }
spin_unlock_irqrestore(&bp->lock, flags);
}
@@ -626,16 +663,44 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
netif_tx_stop_all_queues(ndev);
}
-static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
- phy_interface_t interface, struct phy_device *phy)
+static void macb_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
struct net_device *ndev = to_net_dev(config->dev);
struct macb *bp = netdev_priv(ndev);
struct macb_queue *queue;
+ unsigned long flags;
unsigned int q;
+ u32 ctrl;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ ctrl = macb_or_gem_readl(bp, NCFGR);
+
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+
+ if (speed == SPEED_100)
+ ctrl |= MACB_BIT(SPD);
+
+ if (duplex)
+ ctrl |= MACB_BIT(FD);
if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
- macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
+ ctrl &= ~MACB_BIT(PAE);
+ if (macb_is_gem(bp)) {
+ ctrl &= ~GEM_BIT(GBE);
+
+ if (speed == SPEED_1000)
+ ctrl |= GEM_BIT(GBE);
+ }
+
+ if (rx_pause)
+ ctrl |= MACB_BIT(PAE);
+
+ macb_set_tx_clk(bp, speed);
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
@@ -648,16 +713,37 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
}
+ macb_or_gem_writel(bp, NCFGR, ctrl);
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
+ gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
+ gem_readl(bp, HS_MAC_CONFIG)));
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+
/* Enable Rx and Tx */
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
netif_tx_wake_all_queues(ndev);
}
+static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_10GBASER)
+ return &bp->phylink_usx_pcs;
+ else if (interface == PHY_INTERFACE_MODE_SGMII)
+ return &bp->phylink_sgmii_pcs;
+ else
+ return NULL;
+}
+
static const struct phylink_mac_ops macb_phylink_ops = {
- .validate = macb_validate,
- .mac_pcs_get_state = macb_mac_pcs_get_state,
- .mac_an_restart = macb_mac_an_restart,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = macb_mac_select_pcs,
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
@@ -701,13 +787,60 @@ static int macb_phylink_connect(struct macb *bp)
return 0;
}
+static void macb_get_pcs_fixed_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
+}
+
/* based on au1000_eth. c*/
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
+ bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
+ bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
+
bp->phylink_config.dev = &dev->dev;
bp->phylink_config.type = PHYLINK_NETDEV;
+ bp->phylink_config.mac_managed_pm = true;
+
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ bp->phylink_config.poll_fixed_state = true;
+ bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
+ }
+
+ bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ bp->phylink_config.supported_interfaces);
+
+ /* Determine what modes are supported */
+ if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
+ bp->phylink_config.mac_capabilities |= MAC_1000FD;
+ if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
+ bp->phylink_config.mac_capabilities |= MAC_1000HD;
+
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.supported_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_PCS)
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ bp->phylink_config.supported_interfaces);
+
+ if (bp->caps & MACB_CAPS_HIGH_SPEED) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ bp->phylink_config.supported_interfaces);
+ bp->phylink_config.mac_capabilities |= MAC_10000FD;
+ }
+ }
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
@@ -724,6 +857,20 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
+ /* If we have a child named mdio, probe it instead of looking for PHYs
+ * directly under the MAC node
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ int ret = of_mdiobus_register(bp->mii_bus, child);
+
+ of_node_put(child);
+ return ret;
+ }
+
+ if (of_phy_is_fixed_link(np))
+ return mdiobus_register(bp->mii_bus);
+
/* Only create the PHY from the device tree if at least one PHY is
* described. Otherwise scan the entire MDIO bus. We do this to support
* old device tree that did not follow the best practices and did not
@@ -815,7 +962,7 @@ static int macb_halt_tx(struct macb *bp)
return -ETIMEDOUT;
}
-static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
{
if (tx_skb->mapping) {
if (tx_skb->mapped_as_page)
@@ -828,7 +975,7 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
if (tx_skb->skb) {
- dev_kfree_skb_any(tx_skb->skb);
+ napi_consume_skb(tx_skb->skb, budget);
tx_skb->skb = NULL;
}
}
@@ -881,12 +1028,13 @@ static void macb_tx_error_task(struct work_struct *work)
(unsigned int)(queue - bp->queues),
queue->tx_tail, queue->tx_head);
- /* Prevent the queue IRQ handlers from running: each of them may call
- * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
+ /* Prevent the queue NAPI TX poll from running, as it calls
+ * macb_tx_complete(), which in turn may call netif_wake_subqueue().
* As explained below, we have to halt the transmission before updating
* TBQP registers so we call netif_tx_stop_all_queues() to notify the
* network engine about the macb/gem being halted.
*/
+ napi_disable(&queue->napi_tx);
spin_lock_irqsave(&bp->lock, flags);
/* Make sure nobody is trying to queue up new packets */
@@ -914,7 +1062,7 @@ static void macb_tx_error_task(struct work_struct *work)
if (ctrl & MACB_BIT(TX_USED)) {
/* skb is set for the last buffer of the frame */
while (!skb) {
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
tail++;
tx_skb = macb_tx_skb(queue, tail);
skb = tx_skb->skb;
@@ -944,7 +1092,7 @@ static void macb_tx_error_task(struct work_struct *work)
desc->ctrl = ctrl | MACB_BIT(TX_USED);
}
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
/* Set end of TX queue */
@@ -974,27 +1122,50 @@ static void macb_tx_error_task(struct work_struct *work)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock_irqrestore(&bp->lock, flags);
+ napi_enable(&queue->napi_tx);
}
-static void macb_tx_interrupt(struct macb_queue *queue)
+static bool ptp_one_step_sync(struct sk_buff *skb)
{
- unsigned int tail;
- unsigned int head;
- u32 status;
- struct macb *bp = queue->bp;
- u16 queue_index = queue - bp->queues;
+ struct ptp_header *hdr;
+ unsigned int ptp_class;
+ u8 msgtype;
+
+ /* No need to parse packet if PTP TS is not involved */
+ if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
+ goto not_oss;
+
+ /* Identify and return whether PTP one step sync is being processed */
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ goto not_oss;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ goto not_oss;
- status = macb_readl(bp, TSR);
- macb_writel(bp, TSR, status);
+ if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
+ goto not_oss;
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ if (msgtype == PTP_MSGTYPE_SYNC)
+ return true;
- netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+not_oss:
+ return false;
+}
+
+static int macb_tx_complete(struct macb_queue *queue, int budget)
+{
+ struct macb *bp = queue->bp;
+ u16 queue_index = queue - bp->queues;
+ unsigned int tail;
+ unsigned int head;
+ int packets = 0;
+ spin_lock(&queue->tx_ptr_lock);
head = queue->tx_head;
- for (tail = queue->tx_tail; tail != head; tail++) {
+ for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
struct macb_tx_skb *tx_skb;
struct sk_buff *skb;
struct macb_dma_desc *desc;
@@ -1020,8 +1191,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP) &&
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ !ptp_one_step_sync(skb) &&
gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
@@ -1035,10 +1206,11 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
+ packets++;
}
/* Now we can safely release resources */
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, budget);
/* skb is set only for the last buffer of the frame.
* WARNING: at this point skb has been freed by
@@ -1054,6 +1226,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
CIRC_CNT(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index);
+ spin_unlock(&queue->tx_ptr_lock);
+
+ return packets;
}
static void gem_rx_refill(struct macb_queue *queue)
@@ -1071,7 +1246,6 @@ static void gem_rx_refill(struct macb_queue *queue)
/* Make hw descriptor updates visible to CPU */
rmb();
- queue->rx_prepared_head++;
desc = macb_rx_desc(queue, entry);
if (!queue->rx_skbuff[entry]) {
@@ -1110,6 +1284,7 @@ static void gem_rx_refill(struct macb_queue *queue)
dma_wmb();
desc->addr &= ~MACB_BIT(RX_USED);
}
+ queue->rx_prepared_head++;
}
/* Make descriptor updates visible to hardware */
@@ -1410,31 +1585,51 @@ static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
return received;
}
-static int macb_poll(struct napi_struct *napi, int budget)
+static bool macb_rx_pending(struct macb_queue *queue)
{
- struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
struct macb *bp = queue->bp;
- int work_done;
- u32 status;
+ unsigned int entry;
+ struct macb_dma_desc *desc;
- status = macb_readl(bp, RSR);
- macb_writel(bp, RSR, status);
+ entry = macb_rx_ring_wrap(bp, queue->rx_tail);
+ desc = macb_rx_desc(queue, entry);
- netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ return (desc->addr & MACB_BIT(RX_USED)) != 0;
+}
+
+static int macb_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
+ struct macb *bp = queue->bp;
+ int work_done;
work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
- /* Packets received while interrupts were disabled */
- status = macb_readl(bp, RSR);
- if (status) {
+ netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, bp->rx_intr_mask);
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were received while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_rx_pending(queue)) {
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- napi_reschedule(napi);
- } else {
- queue_writel(queue, IER, bp->rx_intr_mask);
+ netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
+ napi_schedule(napi);
}
}
@@ -1443,11 +1638,95 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(unsigned long data)
+static void macb_tx_restart(struct macb_queue *queue)
+{
+ struct macb *bp = queue->bp;
+ unsigned int head_idx, tbqp;
+
+ spin_lock(&queue->tx_ptr_lock);
+
+ if (queue->tx_head == queue->tx_tail)
+ goto out_tx_ptr_unlock;
+
+ tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
+ tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
+ head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
+
+ if (tbqp == head_idx)
+ goto out_tx_ptr_unlock;
+
+ spin_lock_irq(&bp->lock);
+ macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
+
+out_tx_ptr_unlock:
+ spin_unlock(&queue->tx_ptr_lock);
+}
+
+static bool macb_tx_complete_pending(struct macb_queue *queue)
+{
+ bool retval = false;
+
+ spin_lock(&queue->tx_ptr_lock);
+ if (queue->tx_head != queue->tx_tail) {
+ /* Make hw descriptor updates visible to CPU */
+ rmb();
+
+ if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
+ retval = true;
+ }
+ spin_unlock(&queue->tx_ptr_lock);
+ return retval;
+}
+
+static int macb_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
+ struct macb *bp = queue->bp;
+ int work_done;
+
+ work_done = macb_tx_complete(queue, budget);
+
+ rmb(); // ensure txubr_pending is up to date
+ if (queue->txubr_pending) {
+ queue->txubr_pending = false;
+ netdev_vdbg(bp->dev, "poll: tx restart\n");
+ macb_tx_restart(queue);
+ }
+
+ netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
+ (unsigned int)(queue - bp->queues), work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ queue_writel(queue, IER, MACB_BIT(TCOMP));
+
+ /* Packet completions only seem to propagate to raise
+ * interrupts when interrupts are enabled at the time, so if
+ * packets were sent while interrupts were disabled,
+ * they will not cause another interrupt to be generated when
+ * interrupts are re-enabled.
+ * Check for this case here to avoid losing a wakeup. This can
+ * potentially race with the interrupt handler doing the same
+ * actions if an interrupt is raised just after enabling them,
+ * but this should be harmless.
+ */
+ if (macb_tx_complete_pending(queue)) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP));
+ netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
+ napi_schedule(napi);
+ }
+ }
+
+ return work_done;
+}
+
+static void macb_hresp_error_task(struct tasklet_struct *t)
{
- struct macb *bp = (struct macb *)data;
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
struct net_device *dev = bp->dev;
- struct macb_queue *queue = bp->queues;
+ struct macb_queue *queue;
unsigned int q;
u32 ctrl;
@@ -1482,19 +1761,62 @@ static void macb_hresp_error_task(unsigned long data)
netif_tx_start_all_queues(dev);
}
-static void macb_tx_restart(struct macb_queue *queue)
+static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
{
- unsigned int head = queue->tx_head;
- unsigned int tail = queue->tx_tail;
+ struct macb_queue *queue = dev_id;
struct macb *bp = queue->bp;
+ u32 status;
- if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
- queue_writel(queue, ISR, MACB_BIT(TXUBR));
+ status = queue_readl(queue, ISR);
- if (head == tail)
- return;
+ if (unlikely(!status))
+ return IRQ_NONE;
- macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_lock(&bp->lock);
+
+ if (status & MACB_BIT(WOL)) {
+ queue_writel(queue, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(WOL));
+ pm_wakeup_event(&bp->pdev->dev, 0);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
+{
+ struct macb_queue *queue = dev_id;
+ struct macb *bp = queue->bp;
+ u32 status;
+
+ status = queue_readl(queue, ISR);
+
+ if (unlikely(!status))
+ return IRQ_NONE;
+
+ spin_lock(&bp->lock);
+
+ if (status & GEM_BIT(WOL)) {
+ queue_writel(queue, IDR, GEM_BIT(WOL));
+ gem_writel(bp, WOL, 0);
+ netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
+ (unsigned int)(queue - bp->queues),
+ (unsigned long)status);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, GEM_BIT(WOL));
+ pm_wakeup_event(&bp->pdev->dev, 0);
+ }
+
+ spin_unlock(&bp->lock);
+
+ return IRQ_HANDLED;
}
static irqreturn_t macb_interrupt(int irq, void *dev_id)
@@ -1535,9 +1857,27 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
- if (napi_schedule_prep(&queue->napi)) {
+ if (napi_schedule_prep(&queue->napi_rx)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n");
- __napi_schedule(&queue->napi);
+ __napi_schedule(&queue->napi_rx);
+ }
+ }
+
+ if (status & (MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR))) {
+ queue_writel(queue, IDR, MACB_BIT(TCOMP));
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, MACB_BIT(TCOMP) |
+ MACB_BIT(TXUBR));
+
+ if (status & MACB_BIT(TXUBR)) {
+ queue->txubr_pending = true;
+ wmb(); // ensure softirq can see update
+ }
+
+ if (napi_schedule_prep(&queue->napi_tx)) {
+ netdev_vdbg(bp->dev, "scheduling TX softirq\n");
+ __napi_schedule(&queue->napi_tx);
}
}
@@ -1551,12 +1891,6 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
break;
}
- if (status & MACB_BIT(TCOMP))
- macb_tx_interrupt(queue);
-
- if (status & MACB_BIT(TXUBR))
- macb_tx_restart(queue);
-
/* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1762,7 +2096,8 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(TX_LSO, lso_ctrl);
ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
if ((bp->dev->features & NETIF_F_HW_CSUM) &&
- skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
+ skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
+ !ptp_one_step_sync(skb))
ctrl |= MACB_BIT(TX_NOCRC);
} else
/* Only set MSS/MFS on payload descriptors
@@ -1789,7 +2124,7 @@ dma_error:
for (i = queue->tx_head; i != tx_head; i++) {
tx_skb = macb_tx_skb(queue, i);
- macb_tx_unmap(bp, tx_skb);
+ macb_tx_unmap(bp, tx_skb, 0);
}
return 0;
@@ -1850,7 +2185,8 @@ static inline int macb_clear_csum(struct sk_buff *skb)
static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
{
- bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
+ bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
+ skb_is_nonlinear(*skb);
int padlen = ETH_ZLEN - (*skb)->len;
int headroom = skb_headroom(*skb);
int tailroom = skb_tailroom(*skb);
@@ -1859,7 +2195,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
if (!(ndev->features & NETIF_F_HW_CSUM) ||
!((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
- skb_shinfo(*skb)->gso_size) /* Not available for GSO */
+ skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
return 0;
if (padlen <= 0) {
@@ -1910,10 +2246,9 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 queue_index = skb_get_queue_mapping(skb);
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue = &bp->queues[queue_index];
- unsigned long flags;
unsigned int desc_cnt, nr_frags, frag_size, f;
unsigned int hdrlen;
- bool is_lso, is_udp = 0;
+ bool is_lso;
netdev_tx_t ret = NETDEV_TX_OK;
if (macb_clear_csum(skb)) {
@@ -1929,14 +2264,12 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
is_lso = (skb_shinfo(skb)->gso_size != 0);
if (is_lso) {
- is_udp = !!(ip_hdr(skb)->protocol == IPPROTO_UDP);
-
/* length of headers */
- if (is_udp)
+ if (ip_hdr(skb)->protocol == IPPROTO_UDP)
/* only queue eth + ip headers separately for UDP */
hdrlen = skb_transport_offset(skb);
else
- hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdrlen = skb_tcp_all_headers(skb);
if (skb_headlen(skb) < hdrlen) {
netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
/* if this is required, would need to copy to single buffer */
@@ -1969,16 +2302,16 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
}
- spin_lock_irqsave(&bp->lock, flags);
+ spin_lock_bh(&queue->tx_ptr_lock);
/* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < desc_cnt) {
netif_stop_subqueue(dev, queue_index);
- spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
queue->tx_head, queue->tx_tail);
- return NETDEV_TX_BUSY;
+ ret = NETDEV_TX_BUSY;
+ goto unlock;
}
/* Map socket buffer for DMA transfer */
@@ -1991,13 +2324,15 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
skb_tx_timestamp(skb);
+ spin_lock_irq(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+ spin_unlock_irq(&bp->lock);
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index);
unlock:
- spin_unlock_irqrestore(&bp->lock, flags);
+ spin_unlock_bh(&queue->tx_ptr_lock);
return ret;
}
@@ -2517,9 +2852,9 @@ static int macb_open(struct net_device *dev)
netdev_dbg(bp->dev, "open\n");
- err = pm_runtime_get_sync(&bp->pdev->dev);
+ err = pm_runtime_resume_and_get(&bp->pdev->dev);
if (err < 0)
- goto pm_exit;
+ return err;
/* RX buffers initialization */
macb_init_rx_buffer_size(bp, bufsz);
@@ -2531,26 +2866,41 @@ static int macb_open(struct net_device *dev)
goto pm_exit;
}
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_enable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
macb_init_hw(bp);
+ err = phy_power_on(bp->sgmii_phy);
+ if (err)
+ goto reset_hw;
+
err = macb_phylink_connect(bp);
if (err)
- goto pm_exit;
+ goto phy_off;
netif_tx_start_all_queues(dev);
if (bp->ptp_info)
bp->ptp_info->ptp_init(dev);
-pm_exit:
- if (err) {
- pm_runtime_put_sync(&bp->pdev->dev);
- return err;
- }
return 0;
+
+phy_off:
+ phy_power_off(bp->sgmii_phy);
+
+reset_hw:
+ macb_reset_hw(bp);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
+ macb_free_consistent(bp);
+pm_exit:
+ pm_runtime_put_sync(&bp->pdev->dev);
+ return err;
}
static int macb_close(struct net_device *dev)
@@ -2562,12 +2912,16 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- napi_disable(&queue->napi);
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
+ phy_power_off(bp->sgmii_phy);
+
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
netif_carrier_off(dev);
@@ -2627,6 +2981,9 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
struct gem_stats *hwstat = &bp->hw_stats.gem;
struct net_device_stats *nstat = &bp->dev->stats;
+ if (!netif_running(bp->dev))
+ return nstat;
+
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -2797,11 +3154,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= WAKE_MAGIC;
+
+ if (bp->wol & MACB_WOL_ENABLED)
+ wol->wolopts |= WAKE_MAGIC;
+ }
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2809,9 +3168,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct macb *bp = netdev_priv(netdev);
int ret;
+ /* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- if (!ret)
- return 0;
+ /* Don't manage WoL on MAC if handled by the PHY
+ * or if there's a failure in talking to the PHY
+ */
+ if (!ret || ret != -EOPNOTSUPP)
+ return ret;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -2844,7 +3207,9 @@ static int macb_set_link_ksettings(struct net_device *netdev,
}
static void macb_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
@@ -2856,7 +3221,9 @@ static void macb_get_ringparam(struct net_device *netdev,
}
static int macb_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
@@ -3023,6 +3390,9 @@ static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
bool cmp_b = false;
bool cmp_c = false;
+ if (!macb_is_gem(bp))
+ return;
+
tp4sp_v = &(fs->h_u.tcp_ip4_spec);
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
@@ -3114,7 +3484,8 @@ static int gem_add_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
spin_lock_irqsave(&bp->rx_fs_lock, flags);
@@ -3167,8 +3538,8 @@ static int gem_del_flow_filter(struct net_device *netdev,
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
- htons(fs->h_u.tcp_ip4_spec.psrc),
- htons(fs->h_u.tcp_ip4_spec.pdst));
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
+ be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
@@ -3288,6 +3659,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
+ .get_wol = macb_get_wol,
+ .set_wol = macb_set_wol,
.get_link = ethtool_op_get_link,
.get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats,
@@ -3389,6 +3762,7 @@ static void macb_restore_features(struct macb *bp)
{
struct net_device *netdev = bp->dev;
netdev_features_t features = netdev->features;
+ struct ethtool_rx_fs_item *item;
/* TX checksum offload */
macb_set_txcsum_feature(bp, features);
@@ -3397,6 +3771,9 @@ static void macb_restore_features(struct macb *bp)
macb_set_rxcsum_feature(bp, features);
/* RX Flow Filters */
+ list_for_each_entry(item, &bp->rx_fs_list.list, list)
+ gem_prog_cmp_regs(bp, &item->fs);
+
macb_set_rxflow_feature(bp, features);
}
@@ -3406,7 +3783,7 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_start_xmit = macb_start_xmit,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_get_stats = macb_get_stats,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = macb_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
@@ -3434,6 +3811,11 @@ static void macb_configure_caps(struct macb *bp,
dcfg = gem_readl(bp, DCFG1);
if (GEM_BFEXT(IRQCOR, dcfg) == 0)
bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+ if (GEM_BFEXT(NO_PCS, dcfg) == 0)
+ bp->caps |= MACB_CAPS_PCS;
+ dcfg = gem_readl(bp, DCFG12);
+ if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
+ bp->caps |= MACB_CAPS_HIGH_SPEED;
dcfg = gem_readl(bp, DCFG2);
if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
bp->caps |= MACB_CAPS_FIFO_MODE;
@@ -3458,8 +3840,6 @@ static void macb_probe_queues(void __iomem *mem,
unsigned int *queue_mask,
unsigned int *num_queues)
{
- unsigned int hw_q;
-
*queue_mask = 0x1;
*num_queues = 1;
@@ -3473,13 +3853,22 @@ static void macb_probe_queues(void __iomem *mem,
return;
/* bit 0 is never set but queue 0 always exists */
- *queue_mask = readl_relaxed(mem + GEM_DCFG6) & 0xff;
+ *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
+ *num_queues = hweight32(*queue_mask);
+}
- *queue_mask |= 0x1;
+static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
+ struct clk *rx_clk, struct clk *tsu_clk)
+{
+ struct clk_bulk_data clks[] = {
+ { .clk = tsu_clk, },
+ { .clk = rx_clk, },
+ { .clk = pclk, },
+ { .clk = hclk, },
+ { .clk = tx_clk },
+ };
- for (hw_q = 1; hw_q < MACB_MAX_QUEUES; ++hw_q)
- if (*queue_mask & (1 << hw_q))
- (*num_queues)++;
+ clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
}
static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
@@ -3498,23 +3887,15 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
*hclk = devm_clk_get(&pdev->dev, "hclk");
}
- if (IS_ERR_OR_NULL(*pclk)) {
- err = PTR_ERR(*pclk);
- if (!err)
- err = -ENODEV;
-
- dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
- return err;
- }
+ if (IS_ERR_OR_NULL(*pclk))
+ return dev_err_probe(&pdev->dev,
+ IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
+ "failed to get pclk\n");
- if (IS_ERR_OR_NULL(*hclk)) {
- err = PTR_ERR(*hclk);
- if (!err)
- err = -ENODEV;
-
- dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
- return err;
- }
+ if (IS_ERR_OR_NULL(*hclk))
+ return dev_err_probe(&pdev->dev,
+ IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
+ "failed to get hclk\n");
*tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
if (IS_ERR(*tx_clk))
@@ -3597,7 +3978,9 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
- netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
+ spin_lock_init(&queue->tx_ptr_lock);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -3687,6 +4070,7 @@ static int macb_init(struct platform_device *pdev)
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
+ INIT_LIST_HEAD(&bp->rx_fs_list.list);
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
@@ -3697,7 +4081,6 @@ static int macb_init(struct platform_device *pdev)
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
- INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else
@@ -3706,16 +4089,16 @@ static int macb_init(struct platform_device *pdev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
- val = GEM_BIT(RGMII);
+ if (phy_interface_mode_is_rgmii(bp->phy_interface))
+ val = bp->usrio->rgmii;
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(RMII);
+ val = bp->usrio->rmii;
else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
- val = MACB_BIT(MII);
+ val = bp->usrio->mii;
if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
- val |= MACB_BIT(CLKEN);
+ val |= bp->usrio->refclk;
macb_or_gem_writel(bp, USRIO, val);
}
@@ -3730,6 +4113,13 @@ static int macb_init(struct platform_device *pdev)
return 0;
}
+static const struct macb_usrio_config macb_default_usrio = {
+ .mii = MACB_BIT(MII),
+ .rmii = MACB_BIT(RMII),
+ .rgmii = GEM_BIT(RGMII),
+ .refclk = MACB_BIT(CLKEN),
+};
+
#if defined(CONFIG_OF)
/* 1518 rounded up */
#define AT91ETHER_MAX_RBUFF_SZ 0x600
@@ -3738,15 +4128,9 @@ static int macb_init(struct platform_device *pdev)
static struct sifive_fu540_macb_mgmt *mgmt;
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
{
- struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
- struct macb_dma_desc *desc;
- dma_addr_t addr;
- u32 ctl;
- int i;
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
@@ -3768,6 +4152,43 @@ static int at91ether_start(struct net_device *dev)
return -ENOMEM;
}
+ return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+
+ if (q->rx_ring) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ macb_dma_desc_get_size(lp),
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
+ }
+
+ if (q->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
+ }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ u32 ctl;
+ int i, ret;
+
+ ret = at91ether_alloc_coherent(lp);
+ if (ret)
+ return ret;
+
addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i);
@@ -3789,9 +4210,39 @@ static int at91ether_start(struct net_device *dev)
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
return 0;
}
+static void at91ether_stop(struct macb *lp)
+{
+ u32 ctl;
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Free resources. */
+ at91ether_free_coherent(lp);
+}
+
/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
@@ -3799,7 +4250,7 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
- ret = pm_runtime_get_sync(&lp->pdev->dev);
+ ret = pm_runtime_resume_and_get(&lp->pdev->dev);
if (ret < 0)
return ret;
@@ -3809,63 +4260,36 @@ static int at91ether_open(struct net_device *dev)
macb_set_hwaddr(lp);
- ret = at91ether_start(dev);
+ ret = at91ether_start(lp);
if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
+ goto pm_exit;
ret = macb_phylink_connect(lp);
if (ret)
- return ret;
+ goto stop;
netif_start_queue(dev);
return 0;
+
+stop:
+ at91ether_stop(lp);
+pm_exit:
+ pm_runtime_put_sync(&lp->pdev->dev);
+ return ret;
}
/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- struct macb_queue *q = &lp->queues[0];
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
netif_stop_queue(dev);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR *
- macb_dma_desc_get_size(lp),
- q->rx_ring, q->rx_ring_dma);
- q->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- q->rx_buffers, q->rx_buffers_dma);
- q->rx_buffers = NULL;
+ at91ether_stop(lp);
return pm_runtime_put(&lp->pdev->dev);
}
@@ -3877,14 +4301,16 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
struct macb *lp = netdev_priv(dev);
if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+ int desc = 0;
+
netif_stop_queue(dev);
/* Store packet information (to free when Tx completed) */
- lp->skb = skb;
- lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
+ lp->rm9200_txq[desc].skb = skb;
+ lp->rm9200_txq[desc].size = skb->len;
+ lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
@@ -3892,7 +4318,7 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
}
/* Set address of the data in the Transmit Address register */
- macb_writel(lp, TAR, lp->skb_physaddr);
+ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
/* Set length of the packet in the Transmit Control register */
macb_writel(lp, TCR, skb->len);
@@ -3955,6 +4381,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct macb *lp = netdev_priv(dev);
u32 intstatus, ctl;
+ unsigned int desc;
/* MAC Interrupt Status register indicates what interrupts are pending.
* It is automatically cleared once read.
@@ -3971,13 +4398,14 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
dev->stats.tx_errors++;
- if (lp->skb) {
- dev_consume_skb_irq(lp->skb);
- lp->skb = NULL;
- dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
- lp->skb_length, DMA_TO_DEVICE);
+ desc = 0;
+ if (lp->rm9200_txq[desc].skb) {
+ dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
+ lp->rm9200_txq[desc].skb = NULL;
+ dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
+ lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->skb_length;
+ dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
}
netif_wake_queue(dev);
}
@@ -4014,7 +4442,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
.ndo_get_stats = macb_get_stats,
.ndo_set_rx_mode = macb_set_rx_mode,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_do_ioctl = macb_ioctl,
+ .ndo_eth_ioctl = macb_ioctl,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = at91ether_poll_controller,
@@ -4130,8 +4558,10 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
return err;
mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
- if (!mgmt)
- return -ENOMEM;
+ if (!mgmt) {
+ err = -ENOMEM;
+ goto err_disable_clks;
+ }
init.name = "sifive-gemgxl-mgmt";
init.ops = &fu540_c000_ops;
@@ -4142,33 +4572,100 @@ static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
mgmt->hw.init = &init;
*tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
- if (IS_ERR(*tx_clk))
- return PTR_ERR(*tx_clk);
+ if (IS_ERR(*tx_clk)) {
+ err = PTR_ERR(*tx_clk);
+ goto err_disable_clks;
+ }
err = clk_prepare_enable(*tx_clk);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
- else
+ *tx_clk = NULL;
+ goto err_disable_clks;
+ } else {
dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
+ }
return 0;
+
+err_disable_clks:
+ macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
+
+ return err;
}
static int fu540_c000_init(struct platform_device *pdev)
{
- struct resource *res;
+ mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(mgmt->reg))
+ return PTR_ERR(mgmt->reg);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
- return -ENODEV;
+ return macb_init(pdev);
+}
- mgmt->reg = ioremap(res->start, resource_size(res));
- if (!mgmt->reg)
- return -ENOMEM;
+static int init_reset_optional(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct macb *bp = netdev_priv(dev);
+ int ret;
- return macb_init(pdev);
+ if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ /* Ensure PHY device used in SGMII mode is ready */
+ bp->sgmii_phy = devm_phy_optional_get(&pdev->dev, NULL);
+
+ if (IS_ERR(bp->sgmii_phy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(bp->sgmii_phy),
+ "failed to get SGMII PHY\n");
+
+ ret = phy_init(bp->sgmii_phy);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to init SGMII PHY\n");
+ }
+
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
+ /* Fully reset controller at hardware level if mapped in device tree */
+ ret = device_reset_optional(&pdev->dev);
+ if (ret) {
+ phy_exit(bp->sgmii_phy);
+ return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
+ }
+
+ ret = macb_init(pdev);
+
+err_out_phy_exit:
+ if (ret)
+ phy_exit(bp->sgmii_phy);
+
+ return ret;
}
+static const struct macb_usrio_config sama7g5_usrio = {
+ .mii = 0,
+ .rmii = 1,
+ .rgmii = 2,
+ .refclk = BIT(2),
+ .hdfctlen = BIT(6),
+};
+
static const struct macb_config fu540_c000_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP,
@@ -4176,19 +4673,22 @@ static const struct macb_config fu540_c000_config = {
.clk_init = fu540_c000_clk_init,
.init = fu540_c000_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config at91sam9260_config = {
.caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3macb_config = {
- .caps = MACB_CAPS_SG_DISABLED
- | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .caps = MACB_CAPS_SG_DISABLED |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config pc302gem_config = {
@@ -4196,6 +4696,7 @@ static const struct macb_config pc302gem_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d2_config = {
@@ -4203,15 +4704,25 @@ static const struct macb_config sama5d2_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config sama5d29_config = {
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d3_config = {
- .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
- | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
+ .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config sama5d4_config = {
@@ -4219,28 +4730,32 @@ static const struct macb_config sama5d4_config = {
.dma_burst_length = 4,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config emac_config = {
.caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config np4_config = {
.caps = MACB_CAPS_USRIO_DISABLED,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
- .init = macb_init,
+ .init = init_reset_optional,
.jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct macb_config zynq_config = {
@@ -4249,10 +4764,49 @@ static const struct macb_config zynq_config = {
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
+};
+
+static const struct macb_config mpfs_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .usrio = &macb_default_usrio,
+ .jumbo_max_len = 10240,
+};
+
+static const struct macb_config sama7g5_gem_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_MIIONRGMII,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
+};
+
+static const struct macb_config sama7g5_emac_config = {
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+ .usrio = &sama7g5_usrio,
+};
+
+static const struct macb_config versal_config = {
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ .dma_burst_length = 16,
+ .clk_init = macb_clk_init,
+ .init = init_reset_optional,
+ .jumbo_max_len = 10240,
+ .usrio = &macb_default_usrio,
};
static const struct of_device_id macb_dt_ids[] = {
- { .compatible = "cdns,at32ap7000-macb" },
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
{ .compatible = "cdns,macb" },
{ .compatible = "cdns,np4-macb", .data = &np4_config },
@@ -4260,14 +4814,21 @@ static const struct of_device_id macb_dt_ids[] = {
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
+ { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
{ .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
- { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
- { .compatible = "cdns,zynq-gem", .data = &zynq_config },
+ { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
+ { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
{ .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
+ { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
+ { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
+ { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
+ { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
+ { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
+ { .compatible = "xlnx,versal-gem", .data = &versal_config},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, macb_dt_ids);
@@ -4275,11 +4836,12 @@ MODULE_DEVICE_TABLE(of, macb_dt_ids);
static const struct macb_config default_gem_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
- MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_JUMBO |
+ MACB_CAPS_GEM_HAS_PTP,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
+ .usrio = &macb_default_usrio,
.jumbo_max_len = 10240,
};
@@ -4299,12 +4861,10 @@ static int macb_probe(struct platform_device *pdev)
struct net_device *dev;
struct resource *regs;
void __iomem *mem;
- const char *mac;
struct macb *bp;
int err, val;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mem = devm_ioremap_resource(&pdev->dev, regs);
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs);
if (IS_ERR(mem))
return PTR_ERR(mem);
@@ -4368,7 +4928,9 @@ static int macb_probe(struct platform_device *pdev)
bp->wol = 0;
if (of_get_property(np, "magic-packet", NULL))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+
+ bp->usrio = macb_config->usrio;
spin_lock_init(&bp->lock);
@@ -4377,7 +4939,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
@@ -4391,8 +4953,8 @@ static int macb_probe(struct platform_device *pdev)
/* MTU range: 68 - 1500 or 10240 */
dev->min_mtu = GEM_MTU_MIN_SIZE;
- if (bp->caps & MACB_CAPS_JUMBO)
- dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
+ if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
+ dev->max_mtu = bp->jumbo_max_len - ETH_HLEN - ETH_FCS_LEN;
else
dev->max_mtu = ETH_DATA_LEN;
@@ -4412,15 +4974,11 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- mac = of_get_mac_address(np);
- if (PTR_ERR(mac) == -EPROBE_DEFER) {
- err = -EPROBE_DEFER;
+ err = of_get_ethdev_address(np, bp->dev);
+ if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
- } else if (!IS_ERR_OR_NULL(mac)) {
- ether_addr_copy(bp->dev->dev_addr, mac);
- } else {
+ else if (err)
macb_get_hwaddr(bp);
- }
err = of_get_phy_mode(np, &interface);
if (err)
@@ -4429,8 +4987,6 @@ static int macb_probe(struct platform_device *pdev)
else
bp->phy_interface = interface;
- bp->speed = SPEED_UNKNOWN;
-
/* IP specific init */
err = init(pdev);
if (err)
@@ -4438,7 +4994,7 @@ static int macb_probe(struct platform_device *pdev)
err = macb_mii_init(bp);
if (err)
- goto err_out_free_netdev;
+ goto err_out_phy_exit;
netif_carrier_off(dev);
@@ -4448,8 +5004,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
- (unsigned long)bp);
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -4464,15 +5019,14 @@ err_out_unregister_mdio:
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
+err_out_phy_exit:
+ phy_exit(bp->sgmii_phy);
+
err_out_free_netdev:
free_netdev(dev);
err_disable_clocks:
- clk_disable_unprepare(tx_clk);
- clk_disable_unprepare(hclk);
- clk_disable_unprepare(pclk);
- clk_disable_unprepare(rx_clk);
- clk_disable_unprepare(tsu_clk);
+ macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
@@ -4489,6 +5043,7 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
+ phy_exit(bp->sgmii_phy);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -4497,11 +5052,8 @@ static int macb_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
if (!pm_runtime_suspended(&pdev->dev)) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- clk_disable_unprepare(bp->tsu_clk);
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk,
+ bp->rx_clk, bp->tsu_clk);
pm_runtime_set_suspended(&pdev->dev);
}
phylink_destroy(bp->phylink);
@@ -4515,41 +5067,88 @@ static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- struct macb_queue *queue = bp->queues;
+ struct macb_queue *queue;
unsigned long flags;
unsigned int q;
+ int err;
if (!netif_running(netdev))
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
- macb_writel(bp, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
- enable_irq_wake(bp->queues[0].irq);
- netif_device_detach(netdev);
- } else {
- netif_device_detach(netdev);
+ spin_lock_irqsave(&bp->lock, flags);
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_disable(&queue->napi);
+ ++q, ++queue) {
+ /* Disable all interrupts */
+ queue_writel(queue, IDR, -1);
+ queue_readl(queue, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(queue, ISR, -1);
+ }
+ /* Change interrupt handler and
+ * Enable WoL IRQ on queue 0
+ */
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+ if (macb_is_gem(bp)) {
+ err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
+ queue_writel(bp->queues, IER, GEM_BIT(WOL));
+ gem_writel(bp, WOL, MACB_BIT(MAG));
+ } else {
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
+ queue_writel(bp->queues, IER, MACB_BIT(WOL));
+ macb_writel(bp, WOL, MACB_BIT(MAG));
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ enable_irq_wake(bp->queues[0].irq);
+ }
+
+ netif_device_detach(netdev);
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ napi_disable(&queue->napi_rx);
+ napi_disable(&queue->napi_tx);
+ }
+
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock();
phylink_stop(bp->phylink);
+ phy_exit(bp->sgmii_phy);
rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags);
+ }
- if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
- bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
+ bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
- if (netdev->hw_features & NETIF_F_NTUPLE)
- bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
- }
+ if (netdev->hw_features & NETIF_F_NTUPLE)
+ bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
- netif_carrier_off(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_suspend(dev);
return 0;
}
@@ -4558,38 +5157,77 @@ static int __maybe_unused macb_resume(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- struct macb_queue *queue = bp->queues;
+ struct macb_queue *queue;
+ unsigned long flags;
unsigned int q;
+ int err;
if (!netif_running(netdev))
return 0;
- pm_runtime_force_resume(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
- macb_writel(bp, IDR, MACB_BIT(WOL));
- macb_writel(bp, WOL, 0);
- disable_irq_wake(bp->queues[0].irq);
- } else {
- macb_writel(bp, NCR, MACB_BIT(MPE));
-
- if (netdev->hw_features & NETIF_F_NTUPLE)
- gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
+ spin_lock_irqsave(&bp->lock, flags);
+ /* Disable WoL */
+ if (macb_is_gem(bp)) {
+ queue_writel(bp->queues, IDR, GEM_BIT(WOL));
+ gem_writel(bp, WOL, 0);
+ } else {
+ queue_writel(bp->queues, IDR, MACB_BIT(WOL));
+ macb_writel(bp, WOL, 0);
+ }
+ /* Clear ISR on queue 0 */
+ queue_readl(bp->queues, ISR);
+ if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+ queue_writel(bp->queues, ISR, -1);
+ /* Replace interrupt handler on queue 0 */
+ devm_free_irq(dev, bp->queues[0].irq, bp->queues);
+ err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
+ IRQF_SHARED, netdev->name, bp->queues);
+ if (err) {
+ dev_err(dev,
+ "Unable to request IRQ %d (error %d)\n",
+ bp->queues[0].irq, err);
+ spin_unlock_irqrestore(&bp->lock, flags);
+ return err;
+ }
+ spin_unlock_irqrestore(&bp->lock, flags);
- if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
- macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
+ disable_irq_wake(bp->queues[0].irq);
- for (q = 0, queue = bp->queues; q < bp->num_queues;
- ++q, ++queue)
- napi_enable(&queue->napi);
+ /* Now make sure we disable phy before moving
+ * to common restore path
+ */
rtnl_lock();
- phylink_start(bp->phylink);
+ phylink_stop(bp->phylink);
rtnl_unlock();
}
+ for (q = 0, queue = bp->queues; q < bp->num_queues;
+ ++q, ++queue) {
+ napi_enable(&queue->napi_rx);
+ napi_enable(&queue->napi_tx);
+ }
+
+ if (netdev->hw_features & NETIF_F_NTUPLE)
+ gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
+
+ if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
+ macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
+
+ macb_writel(bp, NCR, MACB_BIT(MPE));
macb_init_hw(bp);
macb_set_rx_mode(netdev);
macb_restore_features(bp);
+ rtnl_lock();
+ if (!device_may_wakeup(&bp->dev->dev))
+ phy_init(bp->sgmii_phy);
+
+ phylink_start(bp->phylink);
+ rtnl_unlock();
+
netif_device_attach(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_init(netdev);
@@ -4602,13 +5240,10 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
- clk_disable_unprepare(bp->tx_clk);
- clk_disable_unprepare(bp->hclk);
- clk_disable_unprepare(bp->pclk);
- clk_disable_unprepare(bp->rx_clk);
- }
- clk_disable_unprepare(bp->tsu_clk);
+ if (!(device_may_wakeup(dev)))
+ macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
+ else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
+ macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
return 0;
}
@@ -4618,13 +5253,15 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
clk_prepare_enable(bp->rx_clk);
+ clk_prepare_enable(bp->tsu_clk);
+ } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
+ clk_prepare_enable(bp->tsu_clk);
}
- clk_prepare_enable(bp->tsu_clk);
return 0;
}