aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c599
1 files changed, 538 insertions, 61 deletions
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 358119d98358..1767c60056c5 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -91,6 +91,16 @@ static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
return cpu % priv->nthreads;
}
+static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+ writel(data, priv->cm3_base + offset);
+}
+
+static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
+{
+ return readl(priv->cm3_base + offset);
+}
+
static struct page_pool *
mvpp2_create_page_pool(struct device *dev, int num, int len,
enum dma_data_direction dma_dir)
@@ -319,7 +329,7 @@ static int mvpp2_get_nrxqs(struct mvpp2 *priv)
{
unsigned int nrxqs;
- if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
return 1;
/* According to the PPv2.2 datasheet and our experiments on
@@ -384,7 +394,7 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
if (!IS_ALIGNED(size, 16))
return -EINVAL;
- /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
+ /* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
* bytes per buffer pointer
*/
if (priv->hw_version == MVPP21)
@@ -413,6 +423,19 @@ static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
val |= MVPP2_BM_START_MASK;
+
+ val &= ~MVPP2_BM_LOW_THRESH_MASK;
+ val &= ~MVPP2_BM_HIGH_THRESH_MASK;
+
+ /* Set 8 Pools BPPI threshold for MVPP23 */
+ if (priv->hw_version == MVPP23) {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
+ } else {
+ val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
+ val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
+ }
+
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
bm_pool->size = size;
@@ -446,7 +469,7 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
u32 val;
u32 dma_addr_highbits, phys_addr_highbits;
@@ -581,6 +604,16 @@ err_unroll_pools:
return err;
}
+/* Routine enable PPv23 8 pool mode */
+static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
+ val |= MVPP23_BM_8POOL_MODE;
+ mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
+}
+
static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
{
enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
@@ -634,6 +667,9 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
if (!priv->bm_pools)
return -ENOMEM;
+ if (priv->hw_version == MVPP23)
+ mvpp23_bm_set_8pool_mode(priv);
+
err = mvpp2_bm_pools_init(dev, priv);
if (err < 0)
return err;
@@ -731,6 +767,210 @@ static void *mvpp2_buf_alloc(struct mvpp2_port *port,
return data;
}
+/* Routine enable flow control for RXQs condition */
+static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, host_id, q;
+ int fq = port->first_rxq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Set same Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set stop and start Flow control RXQ thresholds */
+ val = MSS_THRESHOLD_START;
+ val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+ /* Set RXQ port ID */
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ /* Calculate RXQ host ID:
+ * In Single queue mode: Host ID equal to Host ID used for
+ * shared RX interrupt
+ * In Multi queue mode: Host ID equal to number of
+ * RXQ ID / number of CoS queues
+ * In Single resource mode: Host ID always equal to 0
+ */
+ if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
+ host_id = port->nqvecs;
+ else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
+ host_id = q;
+ else
+ host_id = 0;
+
+ /* Set RXQ host ID */
+ val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable flow control for RXQs condition */
+static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
+{
+ int val, cm3_state, q;
+ unsigned long flags;
+ int fq = port->first_rxq;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control was enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Disable Flow control for all RXQs */
+ for (q = 0; q < port->nrxqs; q++) {
+ /* Set threshold 0 to disable Flow control */
+ val = 0;
+ val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
+ mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
+
+ val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
+
+ val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
+ + MSS_RXQ_ASS_HOSTID_OFFS));
+
+ mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* Routine disable/enable flow control for BM pool condition */
+static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
+ struct mvpp2_bm_pool *pool,
+ bool en)
+{
+ int val, cm3_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->priv->mss_spinlock, flags);
+
+ /* Remove Flow control enable bit to prevent race between FW and Kernel
+ * If Flow control were enabled, it would be re-enabled.
+ */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
+ val &= ~FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ /* Check if BM pool should be enabled/disable */
+ if (en) {
+ /* Set BM pool start and stop thresholds per port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val |= MSS_BUF_POOL_PORT_OFFS(port->id);
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ val |= MSS_THRESHOLD_STOP;
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ } else {
+ /* Remove BM pool from the port */
+ val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
+ val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
+
+ /* Zero BM pool start and stop thresholds to disable pool
+ * flow control if pool empty (not used by any port)
+ */
+ if (!pool->buf_num) {
+ val &= ~MSS_BUF_POOL_START_MASK;
+ val &= ~MSS_BUF_POOL_STOP_MASK;
+ }
+
+ mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
+ }
+
+ /* Notify Firmware that Flow control config space ready for update */
+ val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ val |= cm3_state;
+ mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
+
+ spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
+}
+
+/* disable/enable flow control for BM pool on all ports */
+static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
+{
+ struct mvpp2_port *port;
+ int i;
+
+ for (i = 0; i < priv->port_count; i++) {
+ port = priv->port_list[i];
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ port->tx_fc & en);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
+ }
+ }
+}
+
+static int mvpp2_enable_global_fc(struct mvpp2 *priv)
+{
+ int val, timeout = 0;
+
+ /* Enable global flow control. In this stage global
+ * flow control enabled, but still disabled per port.
+ */
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+ val |= FLOW_CONTROL_ENABLE_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ /* Check if Firmware running and disable FC if not*/
+ val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
+ mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
+
+ while (timeout < MSS_FC_MAX_TIMEOUT) {
+ val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
+
+ if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
+ return 0;
+ usleep_range(10, 20);
+ timeout++;
+ }
+
+ priv->global_tx_fc = false;
+ return -EOPNOTSUPP;
+}
+
/* Release buffer to BM */
static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
dma_addr_t buf_dma_addr,
@@ -742,7 +982,7 @@ static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
if (test_bit(thread, &port->priv->lock_map))
spin_lock_irqsave(&port->bm_lock[thread], flags);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
u32 val = 0;
if (sizeof(dma_addr_t) == 8)
@@ -1061,6 +1301,16 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
new_long_pool = MVPP2_BM_LONG;
if (new_long_pool != port->pool_long->id) {
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port,
+ port->pool_short,
+ false);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ false);
+ }
+
/* Remove port from old short & long pool */
port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
port->pool_long->pkt_size);
@@ -1078,6 +1328,25 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
mvpp2_swf_bm_pool_init(port);
mvpp2_set_hw_csum(port, new_long_pool);
+
+ if (port->tx_fc) {
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ mvpp2_bm_pool_update_fc(port, port->pool_long,
+ true);
+ else
+ mvpp2_bm_pool_update_fc(port, port->pool_short,
+ true);
+ }
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
out_set:
@@ -1133,14 +1402,19 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
+ int cpu = smp_processor_id();
+ u32 thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu > port->priv->nthreads)
return;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@@ -1150,20 +1424,25 @@ static void mvpp2_interrupts_mask(void *arg)
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
- u32 val;
+ int cpu = smp_processor_id();
+ u32 val, thread;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (cpu >= port->priv->nthreads)
return;
+ thread = mvpp2_cpu_to_thread(port->priv, cpu);
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_thread_write(port->priv,
- mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
+ mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, thread,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@@ -1172,7 +1451,7 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
u32 val;
int i;
- if (port->priv->hw_version != MVPP22)
+ if (port->priv->hw_version == MVPP21)
return;
if (mask)
@@ -1188,6 +1467,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
+ mvpp2_thread_write(port->priv, v->sw_thread_id,
+ MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
+ MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@@ -1199,7 +1481,7 @@ static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
{
- return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
+ return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
}
/* Port configuration routines */
@@ -1231,9 +1513,9 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII;
+ val |= GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val |= GENCONF_CTRL0_PORT1_RGMII_MII;
+ val |= GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
@@ -1250,9 +1532,9 @@ static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
if (port->gop_id > 1) {
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val &= ~GENCONF_CTRL0_PORT0_RGMII;
+ val &= ~GENCONF_CTRL0_PORT2_RGMII;
else if (port->gop_id == 3)
- val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
+ val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
}
}
@@ -1280,6 +1562,49 @@ static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
}
+static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 val;
+
+ val = readl(fca + MVPP22_FCA_CONTROL_REG);
+ val &= ~MVPP22_FCA_ENABLE_PERIODIC;
+ if (en)
+ val |= MVPP22_FCA_ENABLE_PERIODIC;
+ writel(val, fca + MVPP22_FCA_CONTROL_REG);
+}
+
+static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
+{
+ struct mvpp2 *priv = port->priv;
+ void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
+ u32 lsb, msb;
+
+ lsb = timer & MVPP22_FCA_REG_MASK;
+ msb = timer >> MVPP22_FCA_REG_SIZE;
+
+ writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
+ writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
+}
+
+/* Set Flow Control timer x100 faster than pause quanta to ensure that link
+ * partner won't send traffic if port is in XOFF mode.
+ */
+static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
+{
+ u32 timer;
+
+ timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
+ * FC_QUANTA;
+
+ mvpp22_gop_fca_enable_periodic(port, false);
+
+ mvpp22_gop_fca_set_timer(port, timer);
+
+ mvpp22_gop_fca_enable_periodic(port, true);
+}
+
static int mvpp22_gop_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
@@ -1324,6 +1649,8 @@ static int mvpp22_gop_init(struct mvpp2_port *port)
val |= GENCONF_SOFT_RESET1_GOP;
regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
+ mvpp22_gop_fca_set_periodic_timer(port);
+
unsupported_conf:
return 0;
@@ -1817,7 +2144,7 @@ static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
MVPP2_GMAC_PORT_RESET_MASK;
writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
+ if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
writel(val, port->base + MVPP22_XLG_CTRL0_REG);
@@ -1830,7 +2157,7 @@ static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -1851,7 +2178,7 @@ static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
void __iomem *mpcs, *xpcs;
u32 val;
- if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
+ if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
return;
mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
@@ -2287,7 +2614,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
int queue;
/* If the thread isn't used, don't do anything */
- if (smp_processor_id() > port->priv->nthreads)
+ if (smp_processor_id() >= port->priv->nthreads)
return;
for (queue = 0; queue < port->ntxqs; queue++) {
@@ -2348,6 +2675,20 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
+/* Set the number of non-occupied descriptors threshold */
+static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
+ struct mvpp2_rx_queue *rxq)
+{
+ u32 val;
+
+ mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+
+ val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
+ val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
+ val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
+ mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+}
+
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@@ -2611,6 +2952,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
+ /* Set the number of non occupied descriptors threshold */
+ mvpp2_set_rxq_free_tresh(port, rxq);
+
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
@@ -2928,6 +3272,9 @@ static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
for (queue = 0; queue < port->nrxqs; queue++)
mvpp2_rxq_deinit(port, port->rxqs[queue]);
+
+ if (port->tx_fc)
+ mvpp2_rxq_disable_fc(port);
}
/* Init all Rx queues for port */
@@ -2940,6 +3287,10 @@ static int mvpp2_setup_rxqs(struct mvpp2_port *port)
if (err)
goto err_cleanup;
}
+
+ if (port->tx_fc)
+ mvpp2_rxq_enable_fc(port);
+
return 0;
err_cleanup:
@@ -3563,17 +3914,17 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
frag_size = bm_pool->frag_size;
if (xdp_prog) {
- xdp.data_hard_start = data;
- xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
- xdp.data_end = xdp.data + rx_bytes;
- xdp.frame_sz = PAGE_SIZE;
+ struct xdp_rxq_info *xdp_rxq;
if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
- xdp.rxq = &rxq->xdp_rxq_short;
+ xdp_rxq = &rxq->xdp_rxq_short;
else
- xdp.rxq = &rxq->xdp_rxq_long;
+ xdp_rxq = &rxq->xdp_rxq_long;
- xdp_set_data_meta_invalid(&xdp);
+ xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
+ xdp_prepare_buff(&xdp, data,
+ MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
+ rx_bytes, false);
ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
@@ -3684,7 +4035,7 @@ static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
struct mvpp2_tx_desc *desc)
{
/* We only need to clear the low bits */
- if (port->priv->hw_version != MVPP21)
+ if (port->priv->hw_version >= MVPP22)
desc->pp22.ptp_descriptor &=
cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
}
@@ -4196,7 +4547,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
/* Enable interrupts on all threads */
mvpp2_interrupts_enable(port);
- if (port->priv->hw_version == MVPP22)
+ if (port->priv->hw_version >= MVPP22)
mvpp22_mode_reconfigure(port);
if (port->phylink) {
@@ -4239,6 +4590,8 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
new_rx_pending = MVPP2_MAX_RXD_MAX;
+ else if (ring->rx_pending < MSS_THRESHOLD_START)
+ new_rx_pending = MSS_THRESHOLD_START;
else if (!IS_ALIGNED(ring->rx_pending, 16))
new_rx_pending = ALIGN(ring->rx_pending, 16);
@@ -4346,9 +4699,10 @@ static void mvpp2_irqs_deinit(struct mvpp2_port *port)
}
}
-static bool mvpp22_rss_is_supported(void)
+static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
{
- return queue_mode == MVPP2_QDIST_MULTI_MODE;
+ return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
+ !(port->flags & MVPP2_F_LOOPBACK);
}
static int mvpp2_open(struct net_device *dev)
@@ -4412,7 +4766,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->port_irq) {
+ if (priv->hw_version >= MVPP22 && port->port_irq) {
err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
@@ -4579,6 +4933,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
*/
static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
{
+ bool change_percpu = (percpu != priv->percpu_pools);
int numbufs = MVPP2_BM_POOLS_NUM, i;
struct mvpp2_port *port = NULL;
bool status[MVPP2_MAX_PORTS];
@@ -4594,6 +4949,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
if (priv->percpu_pools)
numbufs = port->nrxqs * 2;
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, false);
+
for (i = 0; i < numbufs; i++)
mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
@@ -4608,6 +4966,9 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
mvpp2_open(port->dev);
}
+ if (change_percpu)
+ mvpp2_bm_pool_update_priv_fc(priv, true);
+
return 0;
}
@@ -5153,7 +5514,7 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0, i, loc = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5188,7 +5549,7 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
switch (info->cmd) {
@@ -5209,7 +5570,9 @@ static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
- return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
}
static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
@@ -5218,7 +5581,7 @@ static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (indir)
@@ -5236,7 +5599,7 @@ static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5257,7 +5620,7 @@ static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
struct mvpp2_port *port = netdev_priv(dev);
int ret = 0;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (rss_context >= MVPP22_N_RSS_TABLES)
return -EINVAL;
@@ -5279,7 +5642,7 @@ static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!mvpp22_rss_is_supported())
+ if (!mvpp22_rss_is_supported(port))
return -EOPNOTSUPP;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
@@ -5464,7 +5827,7 @@ static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
return;
}
- /* Handle the more complicated PPv2.2 case */
+ /* Handle the more complicated PPv2.2 and PPv2.3 case */
for (i = 0; i < port->nqvecs; i++) {
struct mvpp2_queue_vector *qv = port->qvecs + i;
@@ -5596,7 +5959,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_oversize_rxq_set(port);
mvpp2_cls_port_config(port);
- if (mvpp22_rss_is_supported())
+ if (mvpp22_rss_is_supported(port))
mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
@@ -5641,7 +6004,7 @@ static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
/* Checks if the port dt description has the required Tx interrupts:
* - PPv2.1: there are no such interrupts.
- * - PPv2.2:
+ * - PPv2.2 and PPv2.3:
* - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
* - The new ones have: "hifX" with X in [0..8]
*
@@ -5883,6 +6246,11 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
+ if (port->priv->global_tx_fc) {
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ }
+
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
@@ -5973,7 +6341,7 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -6060,7 +6428,7 @@ static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
MVPP2_GMAC_PORT_RESET_MASK,
MVPP2_GMAC_PORT_RESET_MASK);
- if (port->priv->hw_version == MVPP22) {
+ if (port->priv->hw_version >= MVPP22) {
mvpp22_gop_mask_irq(port);
phy_power_off(port->comphy);
@@ -6114,7 +6482,7 @@ static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- if (port->priv->hw_version == MVPP22 &&
+ if (port->priv->hw_version >= MVPP22 &&
port->phy_interface != interface) {
port->phy_interface = interface;
@@ -6162,6 +6530,7 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
u32 val;
+ int i;
if (mvpp2_is_xlg(interface)) {
if (!phylink_autoneg_inband(mode)) {
@@ -6212,6 +6581,23 @@ static void mvpp2_mac_link_up(struct phylink_config *config,
val);
}
+ if (port->priv->global_tx_fc) {
+ port->tx_fc = tx_pause;
+ if (tx_pause)
+ mvpp2_rxq_enable_fc(port);
+ else
+ mvpp2_rxq_disable_fc(port);
+ if (port->priv->percpu_pools) {
+ for (i = 0; i < port->nrxqs; i++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
+ } else {
+ mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
+ mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
+ }
+ if (port->priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
+ }
+
mvpp2_port_enable(port);
mvpp2_egress_enable(port);
@@ -6478,7 +6864,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
NETIF_F_HW_VLAN_CTAG_FILTER;
- if (mvpp22_rss_is_supported()) {
+ if (mvpp22_rss_is_supported(port)) {
dev->hw_features |= NETIF_F_RXHASH;
dev->features |= NETIF_F_NTUPLE;
}
@@ -6629,7 +7015,7 @@ static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
}
-/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2.
+/* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
* 4kB fixed space must be assigned for the loopback port.
* Redistribute remaining avialable 44kB space among all active ports.
* Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
@@ -6678,6 +7064,55 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
+/* Configure Rx FIFO Flow control thresholds */
+static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
+{
+ int port, val;
+
+ /* Port 0: maximum speed -10Gb/s port
+ * required by spec RX FIFO threshold 9KB
+ * Port 1: maximum speed -5Gb/s port
+ * required by spec RX FIFO threshold 4KB
+ * Port 2: maximum speed -1Gb/s port
+ * required by spec RX FIFO threshold 2KB
+ */
+
+ /* Without loopback port */
+ for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
+ if (port == 0) {
+ val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else if (port == 1) {
+ val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ } else {
+ val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
+ << MVPP2_RX_FC_TRSH_OFFS;
+ val &= MVPP2_RX_FC_TRSH_MASK;
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+ }
+ }
+}
+
+/* Configure Rx FIFO Flow control thresholds */
+void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
+{
+ int val;
+
+ val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
+
+ if (en)
+ val |= MVPP2_RX_FC_EN;
+ else
+ val &= ~MVPP2_RX_FC_EN;
+
+ mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
+}
+
static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
{
int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
@@ -6686,9 +7121,9 @@ static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
}
-/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2.
- * 3kB fixed space must be assigned for the loopback port.
- * Redistribute remaining avialable 16kB space among all active ports.
+/* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
+ * 1kB fixed space must be assigned for the loopback port.
+ * Redistribute remaining avialable 18kB space among all active ports.
* The 10G interface should use 10kB (which is maximum possible size
* per single port).
*/
@@ -6699,9 +7134,9 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
int size_remainder;
int port, size;
- /* The loopback requires fixed 3kB of the FIFO space assignment. */
+ /* The loopback requires fixed 1kB of the FIFO space assignment. */
mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
- MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ MVPP22_TX_FIFO_DATA_SIZE_1KB);
port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
/* Set TX FIFO size to 0 for inactive ports. */
@@ -6709,7 +7144,7 @@ static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
mvpp22_tx_fifo_set_hw(priv, port, 0);
/* Assign remaining TX FIFO space among all active ports. */
- size_remainder = MVPP22_TX_FIFO_DATA_SIZE_16KB;
+ size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
remaining_ports_count = hweight_long(port_map);
for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
@@ -6794,7 +7229,7 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
if (dram_target_info)
mvpp2_conf_mbus_windows(dram_target_info, priv);
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
mvpp2_axi_init(priv);
/* Disable HW PHY polling */
@@ -6829,6 +7264,8 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
} else {
mvpp22_rx_fifo_init(priv);
mvpp22_tx_fifo_init(priv);
+ if (priv->hw_version == MVPP23)
+ mvpp23_rx_fifo_fc_set_tresh(priv);
}
if (priv->hw_version == MVPP21)
@@ -6854,6 +7291,25 @@ static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_get_sram(struct platform_device *pdev,
+ struct mvpp2 *priv)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (!res) {
+ if (has_acpi_companion(&pdev->dev))
+ dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
+ else
+ dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
+ return 0;
+ }
+
+ priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
+
+ return PTR_ERR_OR_ZERO(priv->cm3_base);
+}
+
static int mvpp2_probe(struct platform_device *pdev)
{
const struct acpi_device_id *acpi_id;
@@ -6910,9 +7366,18 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->iface_base))
return PTR_ERR(priv->iface_base);
+
+ /* Map CM3 SRAM */
+ err = mvpp2_get_sram(pdev, priv);
+ if (err)
+ dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
+
+ /* Enable global Flow Control only if handler to SRAM not NULL */
+ if (priv->cm3_base)
+ priv->global_tx_fc = true;
}
- if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
+ if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
priv->sysctrl_base =
syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"marvell,system-controller");
@@ -6925,7 +7390,7 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
- if (priv->hw_version == MVPP22 &&
+ if (priv->hw_version >= MVPP22 &&
mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
priv->percpu_pools = 1;
@@ -6970,7 +7435,7 @@ static int mvpp2_probe(struct platform_device *pdev)
if (err < 0)
goto err_pp_clk;
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
if (IS_ERR(priv->mg_clk)) {
err = PTR_ERR(priv->mg_clk);
@@ -7011,7 +7476,7 @@ static int mvpp2_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (priv->hw_version == MVPP22) {
+ if (priv->hw_version >= MVPP22) {
err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
if (err)
goto err_axi_clk;
@@ -7031,6 +7496,12 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->port_map |= BIT(i);
}
+ if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
+ priv->hw_version = MVPP23;
+
+ /* Init mss lock */
+ spin_lock_init(&priv->mss_spinlock);
+
/* Initialize network controller */
err = mvpp2_init(pdev, priv);
if (err < 0) {
@@ -7070,6 +7541,12 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_port_probe;
}
+ if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
+ err = mvpp2_enable_global_fc(priv);
+ if (err)
+ dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
+ }
+
mvpp2_dbgfs_init(priv, pdev->name);
platform_set_drvdata(pdev, priv);
@@ -7086,10 +7563,10 @@ err_axi_clk:
clk_disable_unprepare(priv->axi_clk);
err_mg_core_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_core_clk);
err_mg_clk:
- if (priv->hw_version == MVPP22)
+ if (priv->hw_version >= MVPP22)
clk_disable_unprepare(priv->mg_clk);
err_gop_clk:
clk_disable_unprepare(priv->gop_clk);