aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c838
1 files changed, 401 insertions, 437 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 06dd51f47cfd..c7c9e5f162e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -35,6 +35,7 @@
#include <linux/seq_file.h>
#endif /* CONFIG_DEBUG_FS */
#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
#include <net/pkt_cls.h>
#include "stmmac_ptp.h"
#include "stmmac.h"
@@ -318,21 +319,6 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
}
/**
- * stmmac_hw_fix_mac_speed - callback for speed selection
- * @priv: driver private structure
- * Description: on some platforms (e.g. ST), some HW system configuration
- * registers have to be set according to the link speed negotiated.
- */
-static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
-{
- struct net_device *ndev = priv->dev;
- struct phy_device *phydev = ndev->phydev;
-
- if (likely(priv->plat->fix_mac_speed))
- priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
-}
-
-/**
* stmmac_enable_eee_mode - check and enter in LPI mode
* @priv: driver private structure
* Description: this function is to verify and enter in LPI mode in case of
@@ -395,14 +381,7 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
*/
bool stmmac_eee_init(struct stmmac_priv *priv)
{
- struct net_device *ndev = priv->dev;
- int interface = priv->plat->interface;
- bool ret = false;
-
- if ((interface != PHY_INTERFACE_MODE_MII) &&
- (interface != PHY_INTERFACE_MODE_GMII) &&
- !phy_interface_mode_is_rgmii(interface))
- goto out;
+ int tx_lpi_timer = priv->tx_lpi_timer;
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
@@ -410,52 +389,35 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
(priv->hw->pcs == STMMAC_PCS_TBI) ||
(priv->hw->pcs == STMMAC_PCS_RTBI))
- goto out;
-
- /* MAC core supports the EEE feature. */
- if (priv->dma_cap.eee) {
- int tx_lpi_timer = priv->tx_lpi_timer;
-
- /* Check if the PHY supports EEE */
- if (phy_init_eee(ndev->phydev, 1)) {
- /* To manage at run-time if the EEE cannot be supported
- * anymore (for example because the lp caps have been
- * changed).
- * In that case the driver disable own timers.
- */
- mutex_lock(&priv->lock);
- if (priv->eee_active) {
- netdev_dbg(priv->dev, "disable EEE\n");
- del_timer_sync(&priv->eee_ctrl_timer);
- stmmac_set_eee_timer(priv, priv->hw, 0,
- tx_lpi_timer);
- }
- priv->eee_active = 0;
- mutex_unlock(&priv->lock);
- goto out;
- }
- /* Activate the EEE and start timers */
- mutex_lock(&priv->lock);
- if (!priv->eee_active) {
- priv->eee_active = 1;
- timer_setup(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer, 0);
- mod_timer(&priv->eee_ctrl_timer,
- STMMAC_LPI_T(eee_timer));
-
- stmmac_set_eee_timer(priv, priv->hw,
- STMMAC_DEFAULT_LIT_LS, tx_lpi_timer);
- }
- /* Set HW EEE according to the speed */
- stmmac_set_eee_pls(priv, priv->hw, ndev->phydev->link);
+ return false;
+
+ /* Check if MAC core supports the EEE feature. */
+ if (!priv->dma_cap.eee)
+ return false;
+
+ mutex_lock(&priv->lock);
- ret = true;
+ /* Check if it needs to be deactivated */
+ if (!priv->eee_active) {
+ if (priv->eee_enabled) {
+ netdev_dbg(priv->dev, "disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0, tx_lpi_timer);
+ }
mutex_unlock(&priv->lock);
+ return false;
+ }
- netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ if (priv->eee_active && !priv->eee_enabled) {
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ tx_lpi_timer);
}
-out:
- return ret;
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ return true;
}
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
@@ -838,97 +800,171 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-/**
- * stmmac_adjust_link - adjusts the link parameters
- * @dev: net device structure
- * Description: this is the helper called by the physical abstraction layer
- * drivers to communicate the phy link status. According the speed and duplex
- * this driver can invoke registered glue-logic as well.
- * It also invoke the eee initialization because it could happen when switch
- * on different networks (that are eee capable).
- */
-static void stmmac_adjust_link(struct net_device *dev)
+static void stmmac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
{
- struct stmmac_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- bool new_state = false;
-
- if (!phydev)
- return;
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int tx_cnt = priv->plat->tx_queues_to_use;
+ int max_speed = priv->plat->max_speed;
- mutex_lock(&priv->lock);
+ phylink_set(mac_supported, 10baseT_Half);
+ phylink_set(mac_supported, 10baseT_Full);
+ phylink_set(mac_supported, 100baseT_Half);
+ phylink_set(mac_supported, 100baseT_Full);
+
+ phylink_set(mac_supported, Autoneg);
+ phylink_set(mac_supported, Pause);
+ phylink_set(mac_supported, Asym_Pause);
+ phylink_set_port_modes(mac_supported);
+
+ if (priv->plat->has_gmac ||
+ priv->plat->has_gmac4 ||
+ priv->plat->has_xgmac) {
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
+ }
+
+ /* Cut down 1G if asked to */
+ if ((max_speed > 0) && (max_speed < 1000)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_xgmac) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 5000baseT_Full);
+ phylink_set(mac_supported, 10000baseSR_Full);
+ phylink_set(mac_supported, 10000baseLR_Full);
+ phylink_set(mac_supported, 10000baseER_Full);
+ phylink_set(mac_supported, 10000baseLRM_Full);
+ phylink_set(mac_supported, 10000baseT_Full);
+ phylink_set(mac_supported, 10000baseKX4_Full);
+ phylink_set(mac_supported, 10000baseKR_Full);
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (tx_cnt > 1) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ bitmap_and(supported, supported, mac_supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_andnot(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mac_supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_andnot(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
- if (phydev->link) {
- u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+static int stmmac_mac_link_state(struct phylink_config *config,
+ struct phylink_link_state *state)
+{
+ return -EOPNOTSUPP;
+}
- /* Now we make sure that we can be in full duplex mode.
- * If not, we operate in half-duplex mode. */
- if (phydev->duplex != priv->oldduplex) {
- new_state = true;
- if (!phydev->duplex)
- ctrl &= ~priv->hw->link.duplex;
- else
- ctrl |= priv->hw->link.duplex;
- priv->oldduplex = phydev->duplex;
- }
- /* Flow Control operation */
- if (phydev->pause)
- stmmac_mac_flow_ctrl(priv, phydev->duplex);
-
- if (phydev->speed != priv->speed) {
- new_state = true;
- ctrl &= ~priv->hw->link.speed_mask;
- switch (phydev->speed) {
- case SPEED_1000:
- ctrl |= priv->hw->link.speed1000;
- break;
- case SPEED_100:
- ctrl |= priv->hw->link.speed100;
- break;
- case SPEED_10:
- ctrl |= priv->hw->link.speed10;
- break;
- default:
- netif_warn(priv, link, priv->dev,
- "broken speed: %d\n", phydev->speed);
- phydev->speed = SPEED_UNKNOWN;
- break;
- }
- if (phydev->speed != SPEED_UNKNOWN)
- stmmac_hw_fix_mac_speed(priv);
- priv->speed = phydev->speed;
- }
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ u32 ctrl;
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl &= ~priv->hw->link.speed_mask;
- if (!priv->oldlink) {
- new_state = true;
- priv->oldlink = true;
+ if (state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (state->speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_5000:
+ ctrl |= priv->hw->link.xgmii.speed5000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.xgmii.speed2500;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (state->speed) {
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
+ break;
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
+ break;
+ default:
+ return;
}
- } else if (priv->oldlink) {
- new_state = true;
- priv->oldlink = false;
- priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
}
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
+ priv->speed = state->speed;
- mutex_unlock(&priv->lock);
+ if (priv->plat->fix_mac_speed)
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, state->speed);
- if (phydev->is_pseudo_fixed_link)
- /* Stop PHY layer to call the hook to adjust the link in case
- * of a switch is attached to the stmmac driver.
- */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ if (!state->duplex)
+ ctrl &= ~priv->hw->link.duplex;
else
- /* At this stage, init the EEE if supported.
- * Never called in case of fixed_link.
- */
+ ctrl |= priv->hw->link.duplex;
+
+ /* Flow Control operation */
+ if (state->pause)
+ stmmac_mac_flow_ctrl(priv, state->duplex);
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+}
+
+static void stmmac_mac_an_restart(struct phylink_config *config)
+{
+ /* Not Supported */
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface,
+ struct phy_device *phy)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, true);
+ }
}
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .validate = stmmac_validate,
+ .mac_link_state = stmmac_mac_link_state,
+ .mac_config = stmmac_mac_config,
+ .mac_an_restart = stmmac_mac_an_restart,
+ .mac_link_down = stmmac_mac_link_down,
+ .mac_link_up = stmmac_mac_link_up,
+};
+
/**
* stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
* @priv: driver private structure
@@ -965,79 +1001,48 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 tx_cnt = priv->plat->tx_queues_to_use;
- struct phy_device *phydev;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
- char bus_id[MII_BUS_ID_SIZE];
- int interface = priv->plat->interface;
- int max_speed = priv->plat->max_speed;
- priv->oldlink = false;
- priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
+ struct device_node *node;
+ int ret;
- if (priv->plat->phy_node) {
- phydev = of_phy_connect(dev, priv->plat->phy_node,
- &stmmac_adjust_link, 0, interface);
- } else {
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ node = priv->plat->phylink_node;
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
- phy_id_fmt);
+ if (node)
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
- interface);
- }
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
+ * manually parse it
+ */
+ if (!node || ret) {
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
- if (IS_ERR_OR_NULL(phydev)) {
- netdev_err(priv->dev, "Could not attach to PHY\n");
- if (!phydev)
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
return -ENODEV;
+ }
- return PTR_ERR(phydev);
+ ret = phylink_connect_phy(priv->phylink, phydev);
}
- /* Stop Advertising 1000BASE Capability if interface is not GMII */
- if ((interface == PHY_INTERFACE_MODE_MII) ||
- (interface == PHY_INTERFACE_MODE_RMII) ||
- (max_speed < 1000 && max_speed > 0))
- phy_set_max_speed(phydev, SPEED_100);
+ return ret;
+}
- /*
- * Half-duplex mode not supported with multiqueue
- * half-duplex can only works with single queue
- */
- if (tx_cnt > 1) {
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_100baseT_Half_BIT);
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- }
+static int stmmac_phy_setup(struct stmmac_priv *priv)
+{
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int mode = priv->plat->interface;
+ struct phylink *phylink;
- /*
- * Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if (!priv->plat->phy_node && phydev->phy_id == 0) {
- phy_disconnect(phydev);
- return -ENODEV;
- }
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
- /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid
- * subsequent PHY polling, make sure we force a link transition if
- * we have a UP/DOWN/UP transition
- */
- if (phydev->is_pseudo_fixed_link)
- phydev->irq = PHY_POLL;
+ phylink = phylink_create(&priv->phylink_config, fwnode,
+ mode, &stmmac_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
- phy_attached_info(phydev);
+ priv->phylink = phylink;
return 0;
}
@@ -1192,26 +1197,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- struct sk_buff *skb;
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
- if (!skb) {
- netdev_err(priv->dev,
- "%s: Rx init fails; skb is NULL\n", __func__);
+ buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->page)
return -ENOMEM;
- }
- rx_q->rx_skbuff[i] = skb;
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
- netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
- dev_kfree_skb_any(skb);
- return -EINVAL;
- }
-
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[i]);
+ buf->addr = page_pool_get_dma_addr(buf->page);
+ stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
@@ -1227,13 +1220,11 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
- if (rx_q->rx_skbuff[i]) {
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_q->rx_skbuff[i]);
- }
- rx_q->rx_skbuff[i] = NULL;
+ if (buf->page)
+ page_pool_put_page(rx_q->page_pool, buf->page, false);
+ buf->page = NULL;
}
/**
@@ -1316,10 +1307,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
queue);
if (ret)
goto err_init_rx_buffers;
-
- netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
- rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
- (unsigned int)rx_q->rx_skbuff_dma[i]);
}
rx_q->cur_rx = 0;
@@ -1493,8 +1480,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
- kfree(rx_q->rx_skbuff_dma);
- kfree(rx_q->rx_skbuff);
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool) {
+ page_pool_request_shutdown(rx_q->page_pool);
+ page_pool_destroy(rx_q->page_pool);
+ }
}
}
@@ -1546,20 +1536,29 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct page_pool_params pp_params = { 0 };
rx_q->queue_index = queue;
rx_q->priv_data = priv;
- rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
- sizeof(dma_addr_t),
- GFP_KERNEL);
- if (!rx_q->rx_skbuff_dma)
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = DMA_RX_SIZE;
+ pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
goto err_dma;
+ }
- rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
- if (!rx_q->rx_skbuff)
+ rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
goto err_dma;
if (priv->extend_desc) {
@@ -2049,14 +2048,15 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
struct stmmac_channel *ch = &priv->channel[chan];
if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
- napi_schedule_irqoff(&ch->rx_napi);
+ if (napi_schedule_prep(&ch->rx_napi)) {
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ __napi_schedule_irqoff(&ch->rx_napi);
+ status |= handle_tx;
+ }
}
- if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
- stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use))
napi_schedule_irqoff(&ch->tx_napi);
- }
return status;
}
@@ -2118,10 +2118,10 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- dwmac_mmc_intr_all_mask(priv->mmcaddr);
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->mmcaddr, mode);
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
netdev_info(priv->dev, "No MAC Management Counters available\n");
@@ -2154,8 +2154,8 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
if (!is_valid_ether_addr(priv->dev->dev_addr))
eth_hw_addr_random(priv->dev);
- netdev_info(priv->dev, "device MAC address %pM\n",
- priv->dev->dev_addr);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
}
}
@@ -2262,20 +2262,21 @@ static void stmmac_tx_timer(struct timer_list *t)
}
/**
- * stmmac_init_tx_coalesce - init tx mitigation options.
+ * stmmac_init_coalesce - init mitigation options.
* @priv: driver private structure
* Description:
- * This inits the transmit coalesce parameters: i.e. timer rate,
+ * This inits the coalesce parameters: i.e. timer rate,
* timer handler and default threshold used for enabling the
* interrupt on completion bit.
*/
-static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
{
u32 tx_channel_count = priv->plat->tx_queues_to_use;
u32 chan;
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
+ priv->rx_coal_frames = STMMAC_RX_FRAMES;
for (chan = 0; chan < tx_channel_count; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
@@ -2561,9 +2562,9 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
if (priv->use_riwt) {
- ret = stmmac_rx_watchdog(priv, priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
+ ret = stmmac_rx_watchdog(priv, priv->ioaddr, MIN_DMA_RIWT, rx_cnt);
if (!ret)
- priv->rx_riwt = MAX_DMA_RIWT;
+ priv->rx_riwt = MIN_DMA_RIWT;
}
if (priv->hw->pcs)
@@ -2645,10 +2646,9 @@ static int stmmac_open(struct net_device *dev)
goto init_error;
}
- stmmac_init_tx_coalesce(priv);
+ stmmac_init_coalesce(priv);
- if (dev->phydev)
- phy_start(dev->phydev);
+ phylink_start(priv->phylink);
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
@@ -2695,8 +2695,7 @@ lpiirq_error:
wolirq_error:
free_irq(dev->irq, dev);
irq_error:
- if (dev->phydev)
- phy_stop(dev->phydev);
+ phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
del_timer_sync(&priv->tx_queue[chan].txtimer);
@@ -2705,9 +2704,7 @@ irq_error:
init_error:
free_dma_desc_resources(priv);
dma_desc_error:
- if (dev->phydev)
- phy_disconnect(dev->phydev);
-
+ phylink_disconnect_phy(priv->phylink);
return ret;
}
@@ -2726,10 +2723,8 @@ static int stmmac_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (dev->phydev) {
- phy_stop(dev->phydev);
- phy_disconnect(dev->phydev);
- }
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
stmmac_stop_all_queues(priv);
@@ -2772,7 +2767,7 @@ static int stmmac_release(struct net_device *dev)
* This function fills descriptor and request new descriptors according to
* buffer length to fill
*/
-static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
@@ -2783,11 +2778,18 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
tmp_len = total_len;
while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
- desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
TSO_MAX_BUFF_SIZE : tmp_len;
@@ -2833,11 +2835,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry, des;
+ unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
+ dma_addr_t des;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2894,14 +2897,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[first_entry].buf = des;
tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
- first->des0 = cpu_to_le32(des);
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
- /* Fill start of payload in buff2 of first descriptor */
- if (pay_len)
- first->des1 = cpu_to_le32(des + proto_hdr_len);
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
- /* If needed take extra descriptors to fill the remaining payload */
- tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ }
stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -2947,12 +2955,15 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage tx mitigation */
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3028,12 +3039,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int i, csum_insertion = 0, is_jumbo = 0;
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
- int entry;
- unsigned int first_entry;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
unsigned int enh_desc;
- unsigned int des;
+ dma_addr_t des;
+ int entry;
tx_q = &priv->tx_queue[queue];
@@ -3042,17 +3053,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
- /*
- * There is no way to determine the number of TSO
- * capable Queues. Let's use always the Queue 0
- * because if TSO is supported then at least this
- * one will be capable.
- */
- skb_set_queue_mapping(skb, 0);
-
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
return stmmac_tso_xmit(skb, dev);
- }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3166,12 +3168,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* element in case of no SG.
*/
tx_q->tx_count_frames += nfrags + 1;
- if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
+ if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
+ !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
+ (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ stmmac_tx_timer_arm(priv, queue);
+ } else {
+ tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, desc);
priv->xstats.tx_set_ic_bit++;
- tx_q->tx_count_frames = 0;
- } else {
- stmmac_tx_timer_arm(priv, queue);
}
skb_tx_timestamp(skb);
@@ -3275,59 +3280,38 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
- int bfsize = priv->dma_buf_sz;
-
while (dirty-- > 0) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
+ bool use_rx_wd;
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
p = rx_q->dma_rx + entry;
- if (likely(!rx_q->rx_skbuff[entry])) {
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
- if (unlikely(!skb)) {
- /* so for a while no zero-copy! */
- rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
- if (unlikely(net_ratelimit()))
- dev_err(priv->device,
- "fail to alloc skb entry %d\n",
- entry);
- break;
- }
-
- rx_q->rx_skbuff[entry] = skb;
- rx_q->rx_skbuff_dma[entry] =
- dma_map_single(priv->device, skb->data, bfsize,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device,
- rx_q->rx_skbuff_dma[entry])) {
- netdev_err(priv->dev, "Rx DMA map failed\n");
- dev_kfree_skb(skb);
+ if (!buf->page) {
+ buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->page)
break;
- }
-
- stmmac_set_desc_addr(priv, p, rx_q->rx_skbuff_dma[entry]);
- stmmac_refill_desc3(priv, rx_q, p);
-
- if (rx_q->rx_zeroc_thresh > 0)
- rx_q->rx_zeroc_thresh--;
-
- netif_dbg(priv, rx_status, priv->dev,
- "refill entry #%d\n", entry);
}
- dma_wmb();
- stmmac_set_rx_owner(priv, p, priv->use_riwt);
+ buf->addr = page_pool_get_dma_addr(buf->page);
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_refill_desc3(priv, rx_q, p);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames %= priv->rx_coal_frames;
+ use_rx_wd = priv->use_riwt && rx_q->rx_count_frames;
dma_wmb();
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
}
rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
}
@@ -3346,9 +3330,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
unsigned int count = 0;
- bool xmac;
-
- xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3362,11 +3343,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ struct dma_desc *np, *p;
int entry, status;
- struct dma_desc *p;
- struct dma_desc *np;
entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
@@ -3396,20 +3378,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_rx_extended_status(priv, &priv->dev->stats,
&priv->xstats, rx_q->dma_erx + entry);
if (unlikely(status == discard_frame)) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
- if (priv->hwts_rx_en && !priv->extend_desc) {
- /* DESC2 & DESC3 will be overwritten by device
- * with timestamp value, hence reinitialize
- * them in stmmac_rx_refill() function so that
- * device can reuse it.
- */
- dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
- rx_q->rx_skbuff[entry] = NULL;
- dma_unmap_single(priv->device,
- rx_q->rx_skbuff_dma[entry],
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
- }
+ buf->page = NULL;
} else {
struct sk_buff *skb;
int frame_len;
@@ -3449,58 +3420,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
frame_len, status);
}
- /* The zero-copy is always used for all the sizes
- * in case of GMAC4 because it needs
- * to refill the used descriptors, always.
- */
- if (unlikely(!xmac &&
- ((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(rx_q)))) {
- skb = netdev_alloc_skb_ip_align(priv->dev,
- frame_len);
- if (unlikely(!skb)) {
- if (net_ratelimit())
- dev_warn(priv->device,
- "packet dropped\n");
- priv->dev->stats.rx_dropped++;
- continue;
- }
-
- dma_sync_single_for_cpu(priv->device,
- rx_q->rx_skbuff_dma
- [entry], frame_len,
- DMA_FROM_DEVICE);
- skb_copy_to_linear_data(skb,
- rx_q->
- rx_skbuff[entry]->data,
- frame_len);
-
- skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device,
- rx_q->rx_skbuff_dma
- [entry], frame_len,
- DMA_FROM_DEVICE);
- } else {
- skb = rx_q->rx_skbuff[entry];
- if (unlikely(!skb)) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
- priv->dev->stats.rx_dropped++;
- continue;
- }
- prefetch(skb->data - NET_IP_ALIGN);
- rx_q->rx_skbuff[entry] = NULL;
- rx_q->rx_zeroc_thresh++;
-
- skb_put(skb, frame_len);
- dma_unmap_single(priv->device,
- rx_q->rx_skbuff_dma[entry],
- priv->dma_buf_sz,
- DMA_FROM_DEVICE);
+ skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
+ if (unlikely(!skb)) {
+ priv->dev->stats.rx_dropped++;
+ continue;
}
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ frame_len, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, page_address(buf->page),
+ frame_len);
+ skb_put(skb, frame_len);
+ dma_sync_single_for_device(priv->device, buf->addr,
+ frame_len, DMA_FROM_DEVICE);
+
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",
frame_len);
@@ -3520,6 +3453,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
napi_gro_receive(&ch->rx_napi, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
@@ -3562,8 +3499,8 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
work_done = min(work_done, budget);
- if (work_done < budget && napi_complete_done(napi, work_done))
- stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ if (work_done < budget)
+ napi_complete_done(napi, work_done);
/* Force transmission restart */
tx_q = &priv->tx_queue[chan];
@@ -3786,6 +3723,7 @@ static void stmmac_poll_controller(struct net_device *dev)
*/
static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
+ struct stmmac_priv *priv = netdev_priv (dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -3795,9 +3733,7 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!dev->phydev)
- return -EINVAL;
- ret = phy_mii_ioctl(dev->phydev, rq, cmd);
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_set(dev, rq);
@@ -3833,23 +3769,7 @@ static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return ret;
}
-static int stmmac_setup_tc_block(struct stmmac_priv *priv,
- struct tc_block_offload *f)
-{
- if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
- return -EOPNOTSUPP;
-
- switch (f->command) {
- case TC_BLOCK_BIND:
- return tcf_block_cb_register(f->block, stmmac_setup_tc_block_cb,
- priv, priv, f->extack);
- case TC_BLOCK_UNBIND:
- tcf_block_cb_unregister(f->block, stmmac_setup_tc_block_cb, priv);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-}
+static LIST_HEAD(stmmac_block_cb_list);
static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
@@ -3858,7 +3778,10 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
switch (type) {
case TC_SETUP_BLOCK:
- return stmmac_setup_tc_block(priv, type_data);
+ return flow_block_cb_setup_simple(type_data,
+ &stmmac_block_cb_list,
+ stmmac_setup_tc_block_cb,
+ priv, priv, true);
case TC_SETUP_QDISC_CBS:
return stmmac_tc_setup_cbs(priv, priv, type_data);
default:
@@ -3866,6 +3789,22 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
}
}
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ /*
+ * There is no way to determine the number of TSO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
{
struct stmmac_priv *priv = netdev_priv(ndev);
@@ -4082,6 +4021,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
.ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
@@ -4154,6 +4094,12 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
priv->hw->pmt = priv->plat->pmt;
+ if (priv->dma_cap.hash_tb_sz) {
+ priv->hw->multicast_filter_bins =
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
+ priv->hw->mcast_bits_log2 =
+ ilog2(priv->hw->multicast_filter_bins);
+ }
/* TXCOE doesn't work in thresh DMA mode */
if (priv->plat->force_thresh_dma_mode)
@@ -4231,9 +4177,8 @@ int stmmac_dvr_probe(struct device *device,
u32 queue, maxq;
int ret = 0;
- ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
- MTL_MAX_TX_QUEUES,
- MTL_MAX_RX_QUEUES);
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
if (!ndev)
return -ENOMEM;
@@ -4265,8 +4210,7 @@ int stmmac_dvr_probe(struct device *device,
priv->wq = create_singlethread_workqueue("stmmac_wq");
if (!priv->wq) {
dev_err(priv->device, "failed to create workqueue\n");
- ret = -ENOMEM;
- goto error_wq;
+ return -ENOMEM;
}
INIT_WORK(&priv->service_task, stmmac_service_task);
@@ -4313,6 +4257,24 @@ int stmmac_dvr_probe(struct device *device,
priv->tso = true;
dev_info(priv->device, "TSO feature enabled\n");
}
+
+ if (priv->dma_cap.addr64) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.addr64));
+ if (!ret) {
+ dev_info(priv->device, "Using %d bits DMA width\n",
+ priv->dma_cap.addr64);
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.addr64 = 32;
+ }
+ }
+
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -4390,6 +4352,12 @@ int stmmac_dvr_probe(struct device *device,
}
}
+ ret = stmmac_phy_setup(priv);
+ if (ret) {
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
+ goto error_phy_setup;
+ }
+
ret = register_netdev(ndev);
if (ret) {
dev_err(priv->device, "%s: ERROR %i registering the device\n",
@@ -4407,6 +4375,8 @@ int stmmac_dvr_probe(struct device *device,
return ret;
error_netdev_register:
+ phylink_destroy(priv->phylink);
+error_phy_setup:
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
@@ -4422,8 +4392,6 @@ error_mdio_register:
}
error_hw_init:
destroy_workqueue(priv->wq);
-error_wq:
- free_netdev(ndev);
return ret;
}
@@ -4450,6 +4418,7 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
clk_disable_unprepare(priv->plat->pclk);
@@ -4460,7 +4429,6 @@ int stmmac_dvr_remove(struct device *dev)
stmmac_mdio_unregister(ndev);
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
- free_netdev(ndev);
return 0;
}
@@ -4481,8 +4449,7 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
- if (ndev->phydev)
- phy_stop(ndev->phydev);
+ phylink_stop(priv->phylink);
mutex_lock(&priv->lock);
@@ -4507,9 +4474,7 @@ int stmmac_suspend(struct device *dev)
}
mutex_unlock(&priv->lock);
- priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
- priv->oldduplex = DUPLEX_UNKNOWN;
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
@@ -4584,7 +4549,7 @@ int stmmac_resume(struct device *dev)
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
- stmmac_init_tx_coalesce(priv);
+ stmmac_init_coalesce(priv);
stmmac_set_rx_mode(ndev);
stmmac_enable_all_queues(priv);
@@ -4593,8 +4558,7 @@ int stmmac_resume(struct device *dev)
mutex_unlock(&priv->lock);
- if (ndev->phydev)
- phy_start(ndev->phydev);
+ phylink_start(priv->phylink);
return 0;
}