aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac/stmmac_main.c')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1288
1 files changed, 754 insertions, 534 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index da8306f60730..8273e6a175c8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -74,8 +74,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
-#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
/* Limit to make sure XDP TX and slow path can coexist */
#define STMMAC_XSK_TX_BUDGET_MAX 256
@@ -130,8 +130,13 @@ static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_queues_param(struct stmmac_priv *priv);
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
#ifdef CONFIG_DEBUG_FS
static const struct net_device_ops stmmac_netdev_ops;
@@ -229,7 +234,7 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
/* synchronize_rcu() needed for pending XDP buffers to drain */
for (queue = 0; queue < rx_queues_cnt; queue++) {
- rx_q = &priv->rx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
if (rx_q->xsk_pool) {
synchronize_rcu();
break;
@@ -355,13 +360,13 @@ static void print_pkt(unsigned char *buf, int len)
static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
u32 avail;
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -373,13 +378,13 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
*/
static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
u32 dirty;
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -400,23 +405,24 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
/* check if all TX queues have the work finished */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -448,8 +454,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -518,14 +524,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
return true;
}
-static inline u32 stmmac_cdc_adjust(struct stmmac_priv *priv)
-{
- /* Correct the clk domain crossing(CDC) error */
- if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
- return (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
- return 0;
-}
-
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
* @p : descriptor pointer
@@ -557,7 +555,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
}
if (found) {
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -594,7 +592,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
- ns -= stmmac_cdc_adjust(priv);
+ ns -= priv->plat->cdc_error_adj;
netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
shhwtstamp = skb_hwtstamps(skb);
@@ -644,10 +642,6 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
__func__, config.flags, config.tx_type, config.rx_filter);
- /* reserved for future extensions */
- if (config.flags)
- return -EINVAL;
-
if (config.tx_type != HWTSTAMP_TX_OFF &&
config.tx_type != HWTSTAMP_TX_ON)
return -ERANGE;
@@ -843,19 +837,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
struct timespec64 now;
u32 sec_inc = 0;
u64 temp = 0;
- int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
- if (ret < 0) {
- netdev_warn(priv->dev,
- "failed to enable PTP reference clock: %pe\n",
- ERR_PTR(ret));
- return ret;
- }
-
stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
priv->systime_flags = systime_flags;
@@ -899,6 +884,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -921,8 +909,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -946,105 +932,15 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
-static void stmmac_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
+static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- int tx_cnt = priv->plat->tx_queues_to_use;
- int max_speed = priv->plat->max_speed;
-
- phylink_set(mac_supported, 10baseT_Half);
- phylink_set(mac_supported, 10baseT_Full);
- phylink_set(mac_supported, 100baseT_Half);
- phylink_set(mac_supported, 100baseT_Full);
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
-
- phylink_set(mac_supported, Autoneg);
- phylink_set(mac_supported, Pause);
- phylink_set(mac_supported, Asym_Pause);
- phylink_set_port_modes(mac_supported);
-
- /* Cut down 1G if asked to */
- if ((max_speed > 0) && (max_speed < 1000)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- } else if (priv->plat->has_gmac4) {
- if (!max_speed || max_speed >= 2500) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- } else if (priv->plat->has_xgmac) {
- if (!max_speed || (max_speed >= 2500)) {
- phylink_set(mac_supported, 2500baseT_Full);
- phylink_set(mac_supported, 2500baseX_Full);
- }
- if (!max_speed || (max_speed >= 5000)) {
- phylink_set(mac_supported, 5000baseT_Full);
- }
- if (!max_speed || (max_speed >= 10000)) {
- phylink_set(mac_supported, 10000baseSR_Full);
- phylink_set(mac_supported, 10000baseLR_Full);
- phylink_set(mac_supported, 10000baseER_Full);
- phylink_set(mac_supported, 10000baseLRM_Full);
- phylink_set(mac_supported, 10000baseT_Full);
- phylink_set(mac_supported, 10000baseKX4_Full);
- phylink_set(mac_supported, 10000baseKR_Full);
- }
- if (!max_speed || (max_speed >= 25000)) {
- phylink_set(mac_supported, 25000baseCR_Full);
- phylink_set(mac_supported, 25000baseKR_Full);
- phylink_set(mac_supported, 25000baseSR_Full);
- }
- if (!max_speed || (max_speed >= 40000)) {
- phylink_set(mac_supported, 40000baseKR4_Full);
- phylink_set(mac_supported, 40000baseCR4_Full);
- phylink_set(mac_supported, 40000baseSR4_Full);
- phylink_set(mac_supported, 40000baseLR4_Full);
- }
- if (!max_speed || (max_speed >= 50000)) {
- phylink_set(mac_supported, 50000baseCR2_Full);
- phylink_set(mac_supported, 50000baseKR2_Full);
- phylink_set(mac_supported, 50000baseSR2_Full);
- phylink_set(mac_supported, 50000baseKR_Full);
- phylink_set(mac_supported, 50000baseSR_Full);
- phylink_set(mac_supported, 50000baseCR_Full);
- phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
- phylink_set(mac_supported, 50000baseDR_Full);
- }
- if (!max_speed || (max_speed >= 100000)) {
- phylink_set(mac_supported, 100000baseKR4_Full);
- phylink_set(mac_supported, 100000baseSR4_Full);
- phylink_set(mac_supported, 100000baseCR4_Full);
- phylink_set(mac_supported, 100000baseLR4_ER4_Full);
- phylink_set(mac_supported, 100000baseKR2_Full);
- phylink_set(mac_supported, 100000baseSR2_Full);
- phylink_set(mac_supported, 100000baseCR2_Full);
- phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
- phylink_set(mac_supported, 100000baseDR2_Full);
- }
- }
- /* Half-Duplex can only work with single queue */
- if (tx_cnt > 1) {
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 1000baseT_Half);
- }
-
- linkmode_and(supported, supported, mac_supported);
- linkmode_andnot(supported, supported, mask);
-
- linkmode_and(state->advertising, state->advertising, mac_supported);
- linkmode_andnot(state->advertising, state->advertising, mask);
+ if (!priv->hw->xpcs)
+ return NULL;
- /* If PCS is supported, check which modes it supports. */
- if (priv->hw->xpcs)
- xpcs_validate(priv->hw->xpcs, supported, state);
+ return &priv->hw->xpcs->pcs;
}
static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
@@ -1090,10 +986,10 @@ static void stmmac_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
- u32 ctrl;
+ u32 old_ctrl, ctrl;
- ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
- ctrl &= ~priv->hw->link.speed_mask;
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
if (interface == PHY_INTERFACE_MODE_USXGMII) {
switch (speed) {
@@ -1168,7 +1064,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
if (tx_pause && rx_pause)
stmmac_mac_flow_ctrl(priv, duplex);
- writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
stmmac_mac_set(priv, priv->ioaddr, true);
if (phy && priv->dma_cap.eee) {
@@ -1183,7 +1080,8 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
- .validate = stmmac_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
.mac_link_up = stmmac_mac_link_up,
@@ -1225,18 +1123,20 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- struct device_node *node;
+ struct fwnode_handle *fwnode;
int ret;
- node = priv->plat->phylink_node;
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
- if (node)
- ret = phylink_of_phy_connect(priv->phylink, node, 0);
+ if (fwnode)
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
/* Some DT bindings do not set-up the PHY handle. Let's try to
* manually parse it
*/
- if (!node || ret) {
+ if (!fwnode || ret) {
int addr = priv->plat->phy_addr;
struct phy_device *phydev;
@@ -1263,12 +1163,12 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
{
struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int max_speed = priv->plat->max_speed;
int mode = priv->plat->phy_interface;
struct phylink *phylink;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
- priv->phylink_config.pcs_poll = true;
if (priv->plat->mdio_bus_data)
priv->phylink_config.ovr_an_inband =
mdio_bus_data->xpcs_an_inband;
@@ -1276,19 +1176,57 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
if (!fwnode)
fwnode = dev_fwnode(priv->device);
+ /* Set the platform/firmware specified interface mode */
+ __set_bit(mode, priv->phylink_config.supported_interfaces);
+
+ /* If we have an xpcs, it defines which PHY interfaces are supported. */
+ if (priv->hw->xpcs)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ if (!max_speed || max_speed >= 1000)
+ priv->phylink_config.mac_capabilities |= MAC_1000;
+
+ if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (!max_speed || max_speed >= 5000)
+ priv->phylink_config.mac_capabilities |= MAC_5000FD;
+ if (!max_speed || max_speed >= 10000)
+ priv->phylink_config.mac_capabilities |= MAC_10000FD;
+ if (!max_speed || max_speed >= 25000)
+ priv->phylink_config.mac_capabilities |= MAC_25000FD;
+ if (!max_speed || max_speed >= 40000)
+ priv->phylink_config.mac_capabilities |= MAC_40000FD;
+ if (!max_speed || max_speed >= 50000)
+ priv->phylink_config.mac_capabilities |= MAC_50000FD;
+ if (!max_speed || max_speed >= 100000)
+ priv->phylink_config.mac_capabilities |= MAC_100000FD;
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ priv->phylink_config.mac_managed_pm = true;
+
phylink = phylink_create(&priv->phylink_config, fwnode,
mode, &stmmac_phylink_mac_ops);
if (IS_ERR(phylink))
return PTR_ERR(phylink);
- if (priv->hw->xpcs)
- phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
-
priv->phylink = phylink;
return 0;
}
-static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_cnt = priv->plat->rx_queues_to_use;
unsigned int desc_size;
@@ -1297,7 +1235,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
/* Display RX rings */
for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
pr_info("\tRX Queue %u rings\n", queue);
@@ -1310,12 +1248,13 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
}
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
}
-static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
unsigned int desc_size;
@@ -1324,7 +1263,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
/* Display TX rings */
for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
pr_info("\tTX Queue %d rings\n", queue);
@@ -1339,18 +1278,19 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
tx_q->dma_tx_phy, desc_size);
}
}
-static void stmmac_display_rings(struct stmmac_priv *priv)
+static void stmmac_display_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Display RX ring */
- stmmac_display_rx_rings(priv);
+ stmmac_display_rx_rings(priv, dma_conf);
/* Display TX ring */
- stmmac_display_tx_rings(priv);
+ stmmac_display_tx_rings(priv, dma_conf);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1374,44 +1314,50 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/**
* stmmac_clear_rx_descriptors - clear RX descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
/* Clear the RX descriptors */
- for (i = 0; i < priv->dma_rx_size; i++)
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == priv->dma_rx_size - 1),
- priv->dma_buf_sz);
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
}
/**
* stmmac_clear_tx_descriptors - clear tx descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index.
* Description: this function is called to clear the TX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
/* Clear the TX descriptors */
- for (i = 0; i < priv->dma_tx_size; i++) {
- int last = (i == (priv->dma_tx_size - 1));
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ int last = (i == (dma_conf->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1428,10 +1374,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_clear_descriptors - clear descriptors
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* Description: this function is called to clear the TX and RX descriptors
* in case of both basic and extended descriptors are used.
*/
-static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
@@ -1439,16 +1387,17 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
/* Clear the RX descriptors */
for (queue = 0; queue < rx_queue_cnt; queue++)
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
/* Clear the TX descriptors */
for (queue = 0; queue < tx_queue_cnt; queue++)
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
}
/**
* stmmac_init_rx_buffers - init the RX descriptor buffer.
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @p: descriptor pointer
* @i: descriptor index
* @flags: gfp flag
@@ -1456,21 +1405,27 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* Description: this function is called to allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
-static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ struct dma_desc *p,
int i, gfp_t flags, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
return -ENOMEM;
buf->page_offset = stmmac_rx_offset(priv);
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
return -ENOMEM;
@@ -1484,7 +1439,7 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
stmmac_set_desc_addr(priv, p, buf->addr);
- if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
return 0;
@@ -1493,12 +1448,13 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/**
* stmmac_free_rx_buffer - free RX dma buffers
* @priv: private structure
- * @queue: RX queue index
+ * @rx_q: RX queue
* @i: buffer index.
*/
-static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
+ struct stmmac_rx_queue *rx_q,
+ int i)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page)
@@ -1513,12 +1469,15 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* stmmac_free_tx_buffer - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @i: buffer index.
*/
-static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, int i)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
if (tx_q->tx_skbuff_dma[i].buf &&
tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
@@ -1557,23 +1516,28 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
/**
* dma_free_rx_skbufs - free RX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++)
- stmmac_free_rx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, rx_q, i);
}
-static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
- gfp_t flags)
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct dma_desc *p;
int ret;
@@ -1582,7 +1546,7 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
else
p = rx_q->dma_rx + i;
- ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
queue);
if (ret)
return ret;
@@ -1596,14 +1560,17 @@ static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
/**
* dma_free_rx_xskbufs - free RX dma buffers from XSK pool
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (!buf->xdp)
@@ -1614,12 +1581,14 @@ static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
}
}
-static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf;
dma_addr_t dma_addr;
struct dma_desc *p;
@@ -1654,22 +1623,25 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
/**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* @flags: gfp flag.
* Description: this function initializes the DMA RX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
int ret;
netif_dbg(priv, probe, priv->dev,
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
@@ -1696,36 +1668,35 @@ static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t f
/* RX XDP ZC buffer pool may not be populated, e.g.
* xdpsock TX-only.
*/
- stmmac_alloc_rx_buffers_zc(priv, queue);
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
} else {
- ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
if (ret < 0)
return -ENOMEM;
}
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 1);
+ dma_conf->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
rx_q->dma_rx_phy,
- priv->dma_rx_size, 0);
+ dma_conf->dma_rx_size, 0);
}
return 0;
}
-static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_rx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_count = priv->plat->rx_queues_to_use;
- u32 queue;
+ int queue;
int ret;
/* RX INITIALIZATION */
@@ -1733,7 +1704,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"SKB addresses:\nskb\t\tskb data\tdma data\n");
for (queue = 0; queue < rx_count; queue++) {
- ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
if (ret)
goto err_init_rx_buffers;
}
@@ -1742,19 +1713,16 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
err_init_rx_buffers:
while (queue >= 0) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
- if (queue == 0)
- break;
-
queue--;
}
@@ -1764,14 +1732,17 @@ err_init_rx_buffers:
/**
* __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
* @priv: driver private structure
- * @queue : TX queue index
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
* Description: this function initializes the DMA TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
netif_dbg(priv, probe, priv->dev,
@@ -1783,16 +1754,16 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 1);
+ dma_conf->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
tx_q->dma_tx_phy,
- priv->dma_tx_size, 0);
+ dma_conf->dma_tx_size, 0);
}
tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
- for (i = 0; i < priv->dma_tx_size; i++) {
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1811,16 +1782,11 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
tx_q->tx_skbuff[i] = NULL;
}
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
-
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
-
return 0;
}
-static int init_dma_tx_desc_rings(struct net_device *dev)
+static int init_dma_tx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 tx_queue_cnt;
@@ -1829,7 +1795,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_queue_cnt = priv->plat->tx_queues_to_use;
for (queue = 0; queue < tx_queue_cnt; queue++)
- __init_dma_tx_desc_rings(priv, queue);
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
return 0;
}
@@ -1837,26 +1803,29 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
+ * @dma_conf: structure to take the dma data
* @flags: gfp flag.
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It supports the chained and ring
* modes.
*/
-static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+static int init_dma_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- ret = init_dma_rx_desc_rings(dev, flags);
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
if (ret)
return ret;
- ret = init_dma_tx_desc_rings(dev);
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, dma_conf);
if (netif_msg_hw(priv))
- stmmac_display_rings(priv);
+ stmmac_display_rings(priv, dma_conf);
return ret;
}
@@ -1864,17 +1833,20 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/**
* dma_free_tx_skbufs - free TX dma buffers
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
int i;
tx_q->xsk_frames_done = 0;
- for (i = 0; i < priv->dma_tx_size; i++)
- stmmac_free_tx_buffer(priv, queue, i);
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
@@ -1893,34 +1865,37 @@ static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
u32 queue;
for (queue = 0; queue < tx_queue_cnt; queue++)
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
}
/**
* __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
*/
-static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
/* Release the DMA RX socket buffers */
if (rx_q->xsk_pool)
- dma_free_rx_xskbufs(priv, queue);
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
else
- dma_free_rx_skbufs(priv, queue);
+ dma_free_rx_skbufs(priv, dma_conf, queue);
rx_q->buf_alloc_num = 0;
rx_q->xsk_pool = NULL;
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, priv->dma_rx_size *
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1932,29 +1907,33 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
page_pool_destroy(rx_q->page_pool);
}
-static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
/* Free RX queue resources */
for (queue = 0; queue < rx_count; queue++)
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
}
/**
* __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
*/
-static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
/* Release the DMA TX socket buffers */
- dma_free_tx_skbufs(priv, queue);
+ dma_free_tx_skbufs(priv, dma_conf, queue);
if (priv->extend_desc) {
size = sizeof(struct dma_extended_desc);
@@ -1967,7 +1946,7 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
addr = tx_q->dma_tx;
}
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1975,28 +1954,32 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
kfree(tx_q->tx_skbuff);
}
-static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
/* Free TX queue resources */
for (queue = 0; queue < tx_count; queue++)
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
}
/**
* __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: RX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
bool xdp_prog = stmmac_xdp_is_enabled(priv);
struct page_pool_params pp_params = { 0 };
@@ -2008,8 +1991,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
- pp_params.pool_size = priv->dma_rx_size;
- num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.pool_size = dma_conf->dma_rx_size;
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
pp_params.dev = priv->device;
@@ -2024,7 +2007,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return ret;
}
- rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
@@ -2032,7 +2015,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -2041,7 +2024,7 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- priv->dma_rx_size *
+ dma_conf->dma_rx_size *
sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
@@ -2066,7 +2049,8 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
@@ -2074,7 +2058,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
/* RX queues buffers and DMA */
for (queue = 0; queue < rx_count; queue++) {
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2082,7 +2066,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2090,28 +2074,31 @@ err_dma:
/**
* __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* @queue: TX queue index
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
size_t size;
void *addr;
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
return -ENOMEM;
- tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -2124,7 +2111,7 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
else
size = sizeof(struct dma_desc);
- size *= priv->dma_tx_size;
+ size *= dma_conf->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -2141,7 +2128,8 @@ static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
return 0;
}
-static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
u32 tx_count = priv->plat->tx_queues_to_use;
u32 queue;
@@ -2149,7 +2137,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
/* TX queues buffers and DMA */
for (queue = 0; queue < tx_count; queue++) {
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
if (ret)
goto err_dma;
}
@@ -2157,27 +2145,29 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
return 0;
err_dma:
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
/**
* alloc_dma_desc_resources - alloc TX/RX resources.
* @priv: private structure
+ * @dma_conf: structure to take the dma data
* Description: according to which descriptor can be used (extend or basic)
* this function allocates the resources for TX and RX paths. In case of
* reception, for example, it pre-allocated the RX socket buffer in order to
* allow zero-copy mechanism.
*/
-static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* RX Allocation */
- int ret = alloc_dma_rx_desc_resources(priv);
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
if (ret)
return ret;
- ret = alloc_dma_tx_desc_resources(priv);
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
return ret;
}
@@ -2185,16 +2175,18 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
/**
* free_dma_desc_resources - free dma desc resources
* @priv: private structure
+ * @dma_conf: structure to take the dma data
*/
-static void free_dma_desc_resources(struct stmmac_priv *priv)
+static void free_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
{
/* Release the DMA TX socket buffers */
- free_dma_tx_desc_resources(priv);
+ free_dma_tx_desc_resources(priv, dma_conf);
/* Release the DMA RX socket buffers later
* to ensure all pending XDP_TX buffers are returned.
*/
- free_dma_rx_desc_resources(priv);
+ free_dma_rx_desc_resources(priv, dma_conf);
}
/**
@@ -2266,6 +2258,23 @@ static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
stmmac_stop_tx(priv, priv->ioaddr, chan);
}
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
/**
* stmmac_start_all_dma - start all RX and TX DMA channels
* @priv: driver private structure
@@ -2351,7 +2360,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/* configure all channels */
for (chan = 0; chan < rx_channels_count; chan++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
u32 buf_size;
qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
@@ -2366,7 +2375,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
chan);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
chan);
}
}
@@ -2382,7 +2391,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2390,7 +2399,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
bool work_done = true;
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
budget = min(budget, stmmac_tx_avail(priv, queue));
@@ -2457,7 +2466,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
@@ -2474,6 +2483,21 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
return !!budget && work_done;
}
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
@@ -2483,7 +2507,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
*/
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
@@ -2496,7 +2520,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
entry = tx_q->dirty_tx;
/* Try to clean all TX complete frame in 1 shot */
- while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
struct xdp_frame *xdpf;
struct sk_buff *skb;
struct dma_desc *p;
@@ -2539,6 +2563,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* ... verify the status error condition */
if (unlikely(status & tx_err)) {
priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
} else {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -2596,7 +2622,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2636,8 +2662,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -2661,17 +2687,14 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
*/
static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_stop_tx_dma(priv, chan);
- dma_free_tx_skbufs(priv, chan);
- stmmac_clear_tx_descriptors(priv, chan);
- tx_q->dirty_tx = 0;
- tx_q->cur_tx = 0;
- tx_q->mss = 0;
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
+ stmmac_reset_tx_queue(priv, chan);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
stmmac_start_tx_dma(priv, chan);
@@ -2731,8 +2754,8 @@ static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
{
int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
&priv->xstats, chan, dir);
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
struct stmmac_channel *ch = &priv->channel[chan];
struct napi_struct *rx_napi;
struct napi_struct *tx_napi;
@@ -2789,21 +2812,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
for (chan = 0; chan < tx_channel_count; chan++) {
if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- (tc <= 256)) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status[chan] == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -2905,12 +2914,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
/* DMA CSR Channel configuration */
- for (chan = 0; chan < dma_csr_ch; chan++)
+ for (chan = 0; chan < dma_csr_ch; chan++) {
stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
/* DMA RX Channel Configuration */
for (chan = 0; chan < rx_channels_count; chan++) {
- rx_q = &priv->rx_queue[chan];
+ rx_q = &priv->dma_conf.rx_queue[chan];
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, chan);
@@ -2924,7 +2935,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA TX Channel Configuration */
for (chan = 0; chan < tx_channels_count; chan++) {
- tx_q = &priv->tx_queue[chan];
+ tx_q = &priv->dma_conf.tx_queue[chan];
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, chan);
@@ -2939,7 +2950,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
hrtimer_start(&tx_q->txtimer,
STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
@@ -2989,7 +3000,7 @@ static void stmmac_init_coalesce(struct stmmac_priv *priv)
u32 chan;
for (chan = 0; chan < tx_channel_count; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
@@ -3011,12 +3022,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (priv->dma_tx_size - 1), chan);
+ (priv->dma_conf.dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (priv->dma_rx_size - 1), chan);
+ (priv->dma_conf.dma_rx_size - 1), chan);
}
/**
@@ -3241,7 +3252,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3251,7 +3262,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3308,14 +3319,22 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
}
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
+
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
/* Convert the timer from msec to usec */
@@ -3343,7 +3362,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* Enable TSO */
if (priv->tso) {
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
/* TSO and TBS cannot co-exist */
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3365,7 +3384,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
/* TBS */
for (chan = 0; chan < tx_cnt; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
@@ -3409,7 +3428,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->tx_irq[j] > 0) {
irq_set_affinity_hint(priv->tx_irq[j], NULL);
- free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
}
}
irq_idx = priv->plat->rx_queues_to_use;
@@ -3418,7 +3437,7 @@ static void stmmac_free_irq(struct net_device *dev,
for (j = irq_idx - 1; j >= 0; j--) {
if (priv->rx_irq[j] > 0) {
irq_set_affinity_hint(priv->rx_irq[j], NULL);
- free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
}
}
@@ -3553,7 +3572,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
ret = request_irq(priv->rx_irq[i],
stmmac_msi_intr_rx,
- 0, int_name, &priv->rx_queue[i]);
+ 0, int_name, &priv->dma_conf.rx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc rx-%d MSI %d (error: %d)\n",
@@ -3578,7 +3597,7 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
ret = request_irq(priv->tx_irq[i],
stmmac_msi_intr_tx,
- 0, int_name, &priv->tx_queue[i]);
+ 0, int_name, &priv->dma_conf.tx_queue[i]);
if (unlikely(ret < 0)) {
netdev_err(priv->dev,
"%s: alloc tx-%d MSI %d (error: %d)\n",
@@ -3665,27 +3684,99 @@ static int stmmac_request_irq(struct net_device *dev)
}
/**
- * stmmac_open - open entry point of the driver
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
+ * @priv: driver private structure
+ * @mtu: MTU to setup the dma queue and buf with
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
+ * Allocate the Tx/Rx DMA queue and init them.
+ * Return value:
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
+ */
+static struct stmmac_dma_conf *
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
+{
+ struct stmmac_dma_conf *dma_conf;
+ int chan, bfsize, ret;
+
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
+ if (!dma_conf) {
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(mtu, 0);
+
+ dma_conf->dma_buf_sz = bfsize;
+ /* Chose the tx/rx size from the already defined one in the
+ * priv struct. (if defined)
+ */
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
+
+ if (!dma_conf->dma_tx_size)
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!dma_conf->dma_rx_size)
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv, dma_conf);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto alloc_error;
+ }
+
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ return dma_conf;
+
+init_error:
+ free_dma_desc_resources(priv, dma_conf);
+alloc_error:
+ kfree(dma_conf);
+ return ERR_PTR(ret);
+}
+
+/**
+ * __stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
+ * @dma_conf : structure to take the dma data
* Description:
* This function is the open entry point of the driver.
* Return value:
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-int stmmac_open(struct net_device *dev)
+static int __stmmac_open(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
{
struct stmmac_priv *priv = netdev_priv(dev);
int mode = priv->plat->phy_interface;
- int bfsize = 0;
u32 chan;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI &&
@@ -3704,44 +3795,20 @@ int stmmac_open(struct net_device *dev)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
- if (bfsize < 0)
- bfsize = 0;
-
- if (bfsize < BUF_SIZE_16KiB)
- bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
-
- priv->dma_buf_sz = bfsize;
- buf_sz = bfsize;
-
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
- if (!priv->dma_tx_size)
- priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
- if (!priv->dma_rx_size)
- priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+ buf_sz = dma_conf->dma_buf_sz;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
- /* Earlier check for TBS */
- for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
- int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
-
- /* Setup per-TXQ tbs flag before TX descriptor alloc */
- tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
- }
-
- ret = alloc_dma_desc_resources(priv);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
- __func__);
- goto dma_desc_error;
- }
+ stmmac_reset_queues_param(priv);
- ret = init_dma_desc_rings(dev, GFP_KERNEL);
- if (ret < 0) {
- netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
- __func__);
- goto init_error;
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
}
ret = stmmac_hw_setup(dev, true);
@@ -3762,6 +3829,7 @@ int stmmac_open(struct net_device *dev)
stmmac_enable_all_queues(priv);
netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
return 0;
@@ -3769,18 +3837,32 @@ irq_error:
phylink_stop(priv->phylink);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
stmmac_hw_teardown(dev);
init_error:
- free_dma_desc_resources(priv);
-dma_desc_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
phylink_disconnect_phy(priv->phylink);
init_phy_error:
pm_runtime_put(priv->device);
return ret;
}
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_dma_conf *dma_conf;
+ int ret;
+
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
+ if (IS_ERR(dma_conf))
+ return PTR_ERR(dma_conf);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ return ret;
+}
+
static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
{
set_bit(__FPE_REMOVING, &priv->fpe_task_state);
@@ -3797,13 +3879,11 @@ static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
* Description:
* This is the stop entry point of the driver.
*/
-int stmmac_release(struct net_device *dev)
+static int stmmac_release(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 chan;
- netif_tx_disable(dev);
-
if (device_may_wakeup(priv->device))
phylink_speed_down(priv->phylink, false);
/* Stop and disconnect the PHY */
@@ -3813,7 +3893,9 @@ int stmmac_release(struct net_device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ netif_tx_disable(dev);
/* Free the IRQ lines */
stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
@@ -3827,11 +3909,15 @@ int stmmac_release(struct net_device *dev)
stmmac_stop_all_dma(priv);
/* Release and free the Rx/Tx resources */
- free_dma_desc_resources(priv);
+ free_dma_desc_resources(priv, &priv->dma_conf);
/* Disable the MAC Rx/Tx */
stmmac_mac_set(priv, priv->ioaddr, false);
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
netif_carrier_off(dev);
stmmac_release_ptp(priv);
@@ -3871,7 +3957,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
return true;
}
@@ -3889,7 +3975,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
int total_len, bool last_segment, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct dma_desc *desc;
u32 buff_size;
int tmp_len;
@@ -3900,7 +3986,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
dma_addr_t curr_addr;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3928,7 +4014,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
int desc_size;
if (likely(priv->extend_desc))
@@ -3990,7 +4076,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
int i;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -3998,7 +4084,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
hdr = sizeof(struct udphdr);
} else {
- proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ proto_hdr_len = skb_tcp_all_headers(skb);
hdr = tcp_hdrlen(skb);
}
@@ -4030,7 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
- priv->dma_tx_size);
+ priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -4142,7 +4228,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -4230,7 +4316,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int entry, first_tx;
dma_addr_t des;
- tx_q = &priv->tx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4293,7 +4379,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -4364,7 +4450,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -4479,9 +4565,13 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
@@ -4494,13 +4584,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
p = rx_q->dma_rx + entry;
if (!buf->page) {
- buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->page)
break;
}
if (priv->sph && !buf->sec_page) {
- buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
if (!buf->sec_page)
break;
@@ -4529,7 +4619,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -4557,12 +4647,12 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
/* First descriptor, not last descriptor and not split header */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
/* First descriptor and last descriptor and not split header */
- return min_t(unsigned int, priv->dma_buf_sz, plen);
+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
}
static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
@@ -4578,7 +4668,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
/* Not last descriptor */
if (status & rx_not_ls)
- return priv->dma_buf_sz;
+ return priv->dma_conf.dma_buf_sz;
plen = stmmac_get_rx_frame_len(priv, p, coe);
@@ -4589,7 +4679,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
dma_addr_t dma_addr;
@@ -4652,7 +4742,7 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
stmmac_enable_dma_transmission(priv, priv->ioaddr);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
tx_q->cur_tx = entry;
return STMMAC_XDP_TX;
@@ -4689,7 +4779,7 @@ static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
if (res == STMMAC_XDP_TX)
@@ -4722,7 +4812,7 @@ static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
res = STMMAC_XDP_REDIRECT;
break;
default:
- bpf_warn_invalid_xdp_action(act);
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
fallthrough;
case XDP_ABORTED:
trace_xdp_exception(priv->dev, prog, act);
@@ -4826,7 +4916,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int entry = rx_q->dirty_rx;
struct dma_desc *rx_desc = NULL;
bool ret = true;
@@ -4869,7 +4959,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
dma_wmb();
stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
}
if (rx_desc) {
@@ -4884,7 +4974,7 @@ static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
unsigned int next_entry = rx_q->cur_rx;
@@ -4906,7 +4996,7 @@ static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -4953,7 +5043,7 @@ read_again:
/* Prefetch the next RX descriptor */
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5000,16 +5090,8 @@ read_again:
buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
len += buf1_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
buf1_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
}
@@ -5074,7 +5156,7 @@ read_again:
*/
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
int status = 0, coe = priv->hw->rx_csum;
@@ -5087,7 +5169,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
int buf_sz;
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
- buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -5101,7 +5183,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
desc_size = sizeof(struct dma_desc);
}
- stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
rx_q->dma_rx_phy, desc_size);
}
while (count < limit) {
@@ -5145,7 +5227,7 @@ read_again:
break;
rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
- priv->dma_rx_size);
+ priv->dma_conf.dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -5186,16 +5268,8 @@ read_again:
buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
len += buf2_len;
- /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
- * Type frames (LLC/LLC-SNAP)
- *
- * llc_snap is never checked in GMAC >= 4, so this ACS
- * feature is always disabled and packets need to be
- * stripped manually.
- */
- if (likely(!(status & rx_not_ls)) &&
- (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
- unlikely(status != llc_snap))) {
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
if (buf2_len) {
buf2_len -= ETH_FCS_LEN;
len -= ETH_FCS_LEN;
@@ -5280,7 +5354,7 @@ read_again:
buf1_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->page, buf->page_offset, buf1_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->page);
@@ -5292,7 +5366,7 @@ read_again:
buf2_len, dma_dir);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
buf->sec_page, 0, buf2_len,
- priv->dma_buf_sz);
+ priv->dma_conf.dma_buf_sz);
/* Data payload appended into SKB */
page_pool_release_page(rx_q->page_pool, buf->sec_page);
@@ -5476,18 +5550,15 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
{
struct stmmac_priv *priv = netdev_priv(dev);
int txfifosz = priv->plat->tx_fifo_size;
+ struct stmmac_dma_conf *dma_conf;
const int mtu = new_mtu;
+ int ret;
if (txfifosz == 0)
txfifosz = priv->dma_cap.tx_fifo_size;
txfifosz /= priv->plat->tx_queues_to_use;
- if (netif_running(dev)) {
- netdev_err(priv->dev, "must be stopped to change its MTU\n");
- return -EBUSY;
- }
-
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
return -EINVAL;
@@ -5499,8 +5570,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
return -EINVAL;
- dev->mtu = mtu;
+ if (netif_running(dev)) {
+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
+ /* Try to allocate the new DMA conf with the new mtu */
+ dma_conf = stmmac_setup_dma_desc(priv, mtu);
+ if (IS_ERR(dma_conf)) {
+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
+ mtu);
+ return PTR_ERR(dma_conf);
+ }
+
+ stmmac_release(dev);
+
+ ret = __stmmac_open(dev, dma_conf);
+ kfree(dma_conf);
+ if (ret) {
+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ return ret;
+ }
+
+ stmmac_set_rx_mode(dev);
+ }
+ dev->mtu = mtu;
netdev_update_features(dev);
return 0;
@@ -5734,11 +5826,13 @@ static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
{
struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = tx_q->queue_index;
struct stmmac_priv *priv;
int status;
- priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5753,21 +5847,7 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
if (unlikely(status & tx_hard_error_bump_tc)) {
/* Try to bump up the dma threshold on this failure */
- if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
- tc <= 256) {
- tc += 64;
- if (priv->plat->force_thresh_dma_mode)
- stmmac_set_dma_operation_mode(priv,
- tc,
- tc,
- chan);
- else
- stmmac_set_dma_operation_mode(priv,
- tc,
- SF_DMA_MODE,
- chan);
- priv->xstats.threshold = tc;
- }
+ stmmac_bump_dma_threshold(priv, chan);
} else if (unlikely(status == tx_hard_error)) {
stmmac_tx_err(priv, chan);
}
@@ -5778,10 +5858,12 @@ static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
{
struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
int chan = rx_q->queue_index;
struct stmmac_priv *priv;
- priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
if (unlikely(!data)) {
netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
@@ -5812,10 +5894,10 @@ static void stmmac_poll_controller(struct net_device *dev)
if (priv->plat->multi_msi_en) {
for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
} else {
disable_irq(dev->irq);
stmmac_interrupt(dev->irq, dev);
@@ -5933,11 +6015,9 @@ static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
struct stmmac_priv *priv = netdev_priv(ndev);
int ret = 0;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
ret = eth_mac_addr(ndev, addr);
if (ret)
@@ -5996,34 +6076,34 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
return 0;
for (queue = 0; queue < rx_count; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
seq_printf(seq, "RX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
}
}
for (queue = 0; queue < tx_count; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
seq_printf(seq, "TX Queue %d:\n", queue);
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
}
}
@@ -6267,11 +6347,9 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
bool is_double = false;
int ret;
- ret = pm_runtime_get_sync(priv->device);
- if (ret < 0) {
- pm_runtime_put_noidle(priv->device);
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
return ret;
- }
if (be16_to_cpu(proto) == ETH_P_8021AD)
is_double = true;
@@ -6327,7 +6405,7 @@ static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
__netif_tx_lock(nq, cpu);
/* Avoids TX time-out as we are sharing with slow path */
- nq->trans_start = jiffies;
+ txq_trans_cond_update(nq);
for (i = 0; i < num_frames; i++) {
int res;
@@ -6359,31 +6437,32 @@ void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_rx_dma(priv, queue);
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
u32 buf_size;
int ret;
- ret = __alloc_dma_rx_desc_resources(priv, queue);
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc RX desc.\n");
return;
}
- ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
if (ret) {
- __free_dma_rx_desc_resources(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init RX desc.\n");
return;
}
- stmmac_clear_rx_descriptors(priv, queue);
+ stmmac_reset_rx_queue(priv, queue);
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
rx_q->dma_rx_phy, rx_q->queue_index);
@@ -6400,7 +6479,7 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
rx_q->queue_index);
} else {
stmmac_set_dma_bfsize(priv, priv->ioaddr,
- priv->dma_buf_sz,
+ priv->dma_conf.dma_buf_sz,
rx_q->queue_index);
}
@@ -6421,30 +6500,31 @@ void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
stmmac_stop_tx_dma(priv, queue);
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
}
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
{
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned long flags;
int ret;
- ret = __alloc_dma_tx_desc_resources(priv, queue);
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
if (ret) {
netdev_err(priv->dev, "Failed to alloc TX desc.\n");
return;
}
- ret = __init_dma_tx_desc_rings(priv, queue);
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
if (ret) {
- __free_dma_tx_desc_resources(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
netdev_err(priv->dev, "Failed to init TX desc.\n");
return;
}
- stmmac_clear_tx_descriptors(priv, queue);
+ stmmac_reset_tx_queue(priv, queue);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
tx_q->dma_tx_phy, tx_q->queue_index);
@@ -6463,6 +6543,143 @@ void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
spin_unlock_irqrestore(&ch->lock, flags);
}
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv, &priv->dma_conf);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->dma_conf.rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_conf.dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->dma_conf.tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
+dma_desc_error:
+ return ret;
+}
+
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -6475,18 +6692,18 @@ int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
return -ENETDOWN;
if (!stmmac_xdp_is_enabled(priv))
- return -ENXIO;
+ return -EINVAL;
if (queue >= priv->plat->rx_queues_to_use ||
queue >= priv->plat->tx_queues_to_use)
return -EINVAL;
- rx_q = &priv->rx_queue[queue];
- tx_q = &priv->tx_queue[queue];
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
ch = &priv->channel[queue];
if (!rx_q->xsk_pool && !tx_q->xsk_pool)
- return -ENXIO;
+ return -EINVAL;
if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
/* EQoS does not have per-DMA channel SW interrupt,
@@ -6671,19 +6888,16 @@ static void stmmac_napi_add(struct net_device *dev)
spin_lock_init(&ch->lock);
if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
}
if (queue < priv->plat->tx_queues_to_use) {
- netif_tx_napi_add(dev, &ch->tx_napi,
- stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx);
}
if (queue < priv->plat->rx_queues_to_use &&
queue < priv->plat->tx_queues_to_use) {
netif_napi_add(dev, &ch->rxtx_napi,
- stmmac_napi_poll_rxtx,
- NAPI_POLL_WEIGHT);
+ stmmac_napi_poll_rxtx);
}
}
}
@@ -6738,8 +6952,8 @@ int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
if (netif_running(dev))
stmmac_release(dev);
- priv->dma_rx_size = rx_size;
- priv->dma_tx_size = tx_size;
+ priv->dma_conf.dma_rx_size = rx_size;
+ priv->dma_conf.dma_tx_size = tx_size;
if (netif_running(dev))
ret = stmmac_open(dev);
@@ -6931,7 +7145,7 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
- if (priv->dma_cap.sphen) {
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
ndev->hw_features |= NETIF_F_GRO;
priv->sph_cap = true;
priv->sph = priv->sph_cap;
@@ -7038,16 +7252,17 @@ int stmmac_dvr_probe(struct device *device,
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- dev_err(priv->device,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err_probe(priv->device, ret,
+ "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
@@ -7074,18 +7289,13 @@ int stmmac_dvr_probe(struct device *device,
goto error_netdev_register;
}
- if (priv->plat->serdes_powerup) {
- ret = priv->plat->serdes_powerup(ndev,
- priv->plat->bsp_priv);
-
- if (ret < 0)
- goto error_serdes_powerup;
- }
-
#ifdef CONFIG_DEBUG_FS
stmmac_init_fs(ndev);
#endif
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
/* Let pm_runtime_put() disable the clocks.
* If CONFIG_PM is not enabled, the clocks will stay powered.
*/
@@ -7093,8 +7303,6 @@ int stmmac_dvr_probe(struct device *device,
return ret;
-error_serdes_powerup:
- unregister_netdev(ndev);
error_netdev_register:
phylink_destroy(priv->phylink);
error_xpcs_setup:
@@ -7125,6 +7333,8 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+ pm_runtime_get_sync(dev);
+
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
@@ -7143,8 +7353,6 @@ int stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
@@ -7152,6 +7360,9 @@ int stmmac_dvr_remove(struct device *dev)
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
@@ -7179,7 +7390,7 @@ int stmmac_suspend(struct device *dev)
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
- hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
if (priv->eee_enabled) {
priv->tx_path_in_lpi_mode = false;
@@ -7228,6 +7439,25 @@ int stmmac_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(stmmac_suspend);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+}
+
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
/**
* stmmac_reset_queues_param - reset queue parameters
* @priv: device pointer
@@ -7238,22 +7468,11 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
- for (queue = 0; queue < rx_cnt; queue++) {
- struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
-
- rx_q->cur_rx = 0;
- rx_q->dirty_rx = 0;
- }
-
- for (queue = 0; queue < tx_cnt; queue++) {
- struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
-
- tx_q->cur_tx = 0;
- tx_q->dirty_tx = 0;
- tx_q->mss = 0;
+ for (queue = 0; queue < rx_cnt; queue++)
+ stmmac_reset_rx_queue(priv, queue);
- netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
- }
+ for (queue = 0; queue < tx_cnt; queue++)
+ stmmac_reset_tx_queue(priv, queue);
}
/**
@@ -7313,7 +7532,7 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_free_tx_skbufs(priv);
- stmmac_clear_descriptors(priv);
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
stmmac_hw_setup(ndev, false);
stmmac_init_coalesce(priv);
@@ -7322,6 +7541,7 @@ int stmmac_resume(struct device *dev)
stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
mutex_unlock(&priv->lock);
rtnl_unlock();
@@ -7338,7 +7558,7 @@ static int __init stmmac_cmdline_opt(char *str)
char *opt;
if (!str || !*str)
- return -EINVAL;
+ return 1;
while ((opt = strsep(&str, ",")) != NULL) {
if (!strncmp(opt, "debug:", 6)) {
if (kstrtoint(opt + 6, 0, &debug))
@@ -7369,11 +7589,11 @@ static int __init stmmac_cmdline_opt(char *str)
goto err;
}
}
- return 0;
+ return 1;
err:
pr_err("%s: ERROR broken module parameter conversion", __func__);
- return -EINVAL;
+ return 1;
}
__setup("stmmaceth=", stmmac_cmdline_opt);