aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h132
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c787
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h47
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c192
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c435
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c173
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c516
22 files changed, 2490 insertions, 167 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 2325b40dff6e..338e25a6374e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -200,6 +200,7 @@ endif
config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
+ depends on COMMON_CLK
---help---
This selects the platform specific bus support for the stmmac driver.
This driver was tested on XLINX XC2V3000 FF1152AMT0221
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index ed872eed1cab..49aa56ca09cc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -75,6 +75,7 @@ struct stmmac_extra_stats {
unsigned long rx_missed_cntr;
unsigned long rx_overflow_cntr;
unsigned long rx_vlan;
+ unsigned long rx_split_hdr_pkt_n;
/* Tx/Rx IRQ error info */
unsigned long tx_undeflow_irq;
unsigned long tx_process_stopped_irq;
@@ -354,6 +355,11 @@ struct dma_features {
unsigned int frpbs;
unsigned int frpes;
unsigned int addr64;
+ unsigned int rssen;
+ unsigned int vlhash;
+ unsigned int sphen;
+ unsigned int vlins;
+ unsigned int dvlan;
};
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
@@ -381,6 +387,16 @@ struct dma_features {
#define JUMBO_LEN 9000
+/* Receive Side Scaling */
+#define STMMAC_RSS_HASH_KEY_SIZE 40
+#define STMMAC_RSS_MAX_TABLE_SIZE 256
+
+/* VLAN */
+#define STMMAC_VLAN_NONE 0x0
+#define STMMAC_VLAN_REMOVE 0x1
+#define STMMAC_VLAN_INSERT 0x2
+#define STMMAC_VLAN_REPLACE 0x3
+
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
index 6ce3a7fb41ab..527f93320a5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -62,12 +62,10 @@ static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
{
int phy_mode;
- struct resource *res;
void __iomem *ctl_block;
struct anarion_gmac *gmac;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ctl_block = devm_ioremap_resource(&pdev->dev, res);
+ ctl_block = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(ctl_block)) {
dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
PTR_ERR(ctl_block));
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index 3a14cdd01f5f..dd9967aeda22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -333,6 +333,9 @@ static void *tegra_eqos_probe(struct platform_device *pdev,
usleep_range(2000, 4000);
gpiod_set_value(eqos->reset, 0);
+ /* MDIO bus was already reset just above */
+ data->mdio_bus_data->needs_reset = false;
+
eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
if (IS_ERR(eqos->rst)) {
err = PTR_ERR(eqos->rst);
@@ -415,7 +418,6 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
const struct dwc_eth_dwmac_data *data;
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
void *priv;
int ret;
@@ -428,17 +430,11 @@ static int dwc_eth_dwmac_probe(struct platform_device *pdev)
* resource initialization is done in the glue logic.
*/
stmmac_res.irq = platform_get_irq(pdev, 0);
- if (stmmac_res.irq < 0) {
- if (stmmac_res.irq != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "IRQ configuration information not found\n");
-
+ if (stmmac_res.irq < 0)
return stmmac_res.irq;
- }
stmmac_res.wol_irq = stmmac_res.irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- stmmac_res.addr = devm_ioremap_resource(&pdev->dev, res);
+ stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(stmmac_res.addr))
return PTR_ERR(stmmac_res.addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
index 88eb16954627..bbc16b5a410a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -46,7 +46,6 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
struct meson_dwmac *dwmac;
- struct resource *res;
int ret;
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
@@ -63,8 +62,7 @@ static int meson6_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->reg = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->reg)) {
ret = PTR_ERR(dwmac->reg);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 786ca4a7bf36..9cda29e4b89d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -308,7 +308,6 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
struct stmmac_resources stmmac_res;
- struct resource *res;
struct meson8b_dwmac *dwmac;
int ret;
@@ -332,8 +331,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
ret = -EINVAL;
goto err_remove_config_dt;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- dwmac->regs = devm_ioremap_resource(&pdev->dev, res);
+ dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dwmac->regs)) {
ret = PTR_ERR(dwmac->regs);
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 3174b701aa90..7357b8bdc128 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -32,6 +32,9 @@
#define XGMAC_CONFIG_ARPEN BIT(31)
#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT 12
+#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
#define XGMAC_CONFIG_S2KP BIT(11)
#define XGMAC_CONFIG_LM BIT(10)
#define XGMAC_CONFIG_IPC BIT(9)
@@ -44,6 +47,7 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_VTFE BIT(16)
#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
@@ -51,6 +55,19 @@
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
#define XGMAC_MAX_HASH_TABLE 8
+#define XGMAC_VLAN_TAG 0x00000050
+#define XGMAC_VLAN_EDVLP BIT(26)
+#define XGMAC_VLAN_VTHM BIT(25)
+#define XGMAC_VLAN_DOVLTC BIT(20)
+#define XGMAC_VLAN_ESVL BIT(18)
+#define XGMAC_VLAN_ETV BIT(16)
+#define XGMAC_VLAN_VID GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE 0x00000058
+#define XGMAC_VLAN_INCL 0x00000060
+#define XGMAC_VLAN_VLTI BIT(20)
+#define XGMAC_VLAN_CSVL BIT(19)
+#define XGMAC_VLAN_VLC GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT 16
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -59,6 +76,7 @@
#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_LPIIS BIT(5)
#define XGMAC_PMTIS BIT(4)
#define XGMAC_INT_EN 0x000000b4
#define XGMAC_TSIE BIT(12)
@@ -76,19 +94,34 @@
#define XGMAC_RWKPKTEN BIT(2)
#define XGMAC_MGKPKTEN BIT(1)
#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_LPI_CTRL 0x000000d0
+#define XGMAC_TXCGE BIT(21)
+#define XGMAC_LPITXA BIT(19)
+#define XGMAC_PLS BIT(17)
+#define XGMAC_LPITXEN BIT(16)
+#define XGMAC_RLPIEX BIT(3)
+#define XGMAC_RLPIEN BIT(2)
+#define XGMAC_TLPIEX BIT(1)
+#define XGMAC_TLPIEN BIT(0)
+#define XGMAC_LPI_TIMER_CTRL 0x000000d4
#define XGMAC_HW_FEATURE0 0x0000011c
#define XGMAC_HWFEAT_SAVLANINS BIT(27)
#define XGMAC_HWFEAT_RXCOESEL BIT(16)
#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_EEESEL BIT(13)
#define XGMAC_HWFEAT_TSSEL BIT(12)
#define XGMAC_HWFEAT_AVSEL BIT(11)
#define XGMAC_HWFEAT_RAVSEL BIT(10)
#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MMCSEL BIT(8)
#define XGMAC_HWFEAT_MGKSEL BIT(7)
#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_VLHASH BIT(4)
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_SPHEN BIT(17)
#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
@@ -98,6 +131,16 @@
#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN BIT(13)
+#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
+#define XGMAC_MAC_FSM_CONTROL 0x00000158
+#define XGMAC_PRTYEN BIT(1)
+#define XGMAC_TMOUTEN BIT(0)
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
@@ -108,14 +151,45 @@
#define XGMAC_DCS_SHIFT 16
#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_RSS_CTRL 0x00000c80
+#define XGMAC_UDP4TE BIT(3)
+#define XGMAC_TCP4TE BIT(2)
+#define XGMAC_IP2TE BIT(1)
+#define XGMAC_RSSE BIT(0)
+#define XGMAC_RSS_ADDR 0x00000c88
+#define XGMAC_RSSIA_SHIFT 8
+#define XGMAC_ADDRT BIT(2)
+#define XGMAC_CT BIT(1)
+#define XGMAC_OB BIT(0)
+#define XGMAC_RSS_DATA 0x00000c8c
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
#define XGMAC_TXTSSTSLO GENMASK(30, 0)
#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+#define XGMAC_PPS_CONTROL 0x00000d70
+#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x) ((x) * 8)
+#define XGMAC_PPSx_MASK(x) \
+ GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val) \
+ GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+ ((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val) \
+ GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+ ((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START 0x2
+#define XGMAC_PPSCMD_STOP 0x5
+#define XGMAC_PPSEN0 BIT(4)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0 BIT(31)
+#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10)
/* MTL Registers */
#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_FRPE BIT(15)
#define XGMAC_ETSALG GENMASK(6, 5)
#define XGMAC_WRR (0x0 << 5)
#define XGMAC_WFQ (0x1 << 5)
@@ -124,8 +198,32 @@
#define XGMAC_MTL_INT_STATUS 0x00001020
#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
-#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 3, (x) * 8)
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_QDDMACH BIT(7)
+#define XGMAC_TC_PRTY_MAP0 0x00001040
+#define XGMAC_TC_PRTY_MAP1 0x00001044
+#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
+#define XGMAC_RXPI BIT(31)
+#define XGMAC_NPE GENMASK(23, 16)
+#define XGMAC_NVE GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
+#define XGMAC_STARTBUSY BIT(31)
+#define XGMAC_WRRDN BIT(16)
+#define XGMAC_ADDR GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
+#define XGMAC_MTL_ECC_CONTROL 0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
+#define XGMAC_MEUIS BIT(1)
+#define XGMAC_MECIS BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
+#define XGMAC_RPCEIE BIT(12)
+#define XGMAC_ECEIE BIT(8)
+#define XGMAC_RXCEIE BIT(4)
+#define XGMAC_TXCEIE BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
#define XGMAC_TQS GENMASK(25, 16)
#define XGMAC_TQS_SHIFT 16
@@ -164,6 +262,7 @@
#define XGMAC_RXOVFIS BIT(16)
#define XGMAC_ABPSIS BIT(1)
#define XGMAC_TXUNFIS BIT(0)
+#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4)
/* DMA Registers */
#define XGMAC_DMA_MODE 0x00003000
@@ -190,7 +289,18 @@
#define XGMAC_TDPS GENMASK(29, 0)
#define XGMAC_RX_EDMA_CTRL 0x00003044
#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
+#define XGMAC_MCSIS BIT(31)
+#define XGMAC_MSUIS BIT(29)
+#define XGMAC_MSCIS BIT(28)
+#define XGMAC_DEUIS BIT(1)
+#define XGMAC_DECIS BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
+#define XGMAC_DCEIE BIT(1)
+#define XGMAC_TCEIE BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_SPH BIT(24)
#define XGMAC_PBLx8 BIT(16)
#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
#define XGMAC_TxPBL GENMASK(21, 16)
@@ -230,12 +340,17 @@
#define XGMAC_TBU BIT(2)
#define XGMAC_TPS BIT(1)
#define XGMAC_TI BIT(0)
+#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
/* Descriptors */
+#define XGMAC_TDES2_IVT GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT 16
#define XGMAC_TDES2_IOC BIT(31)
#define XGMAC_TDES2_TTSE BIT(30)
#define XGMAC_TDES2_B2L GENMASK(29, 16)
#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_VTIR GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT 14
#define XGMAC_TDES2_B1L GENMASK(13, 0)
#define XGMAC_TDES3_OWN BIT(31)
#define XGMAC_TDES3_CTXT BIT(30)
@@ -244,18 +359,33 @@
#define XGMAC_TDES3_CPC GENMASK(27, 26)
#define XGMAC_TDES3_CPC_SHIFT 26
#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_SAIC GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT 23
#define XGMAC_TDES3_THL GENMASK(22, 19)
#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT 18
#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_IVLTV BIT(17)
#define XGMAC_TDES3_CIC GENMASK(17, 16)
#define XGMAC_TDES3_CIC_SHIFT 16
#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV BIT(16)
+#define XGMAC_TDES3_VT GENMASK(15, 0)
#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES2_HL GENMASK(9, 0)
#define XGMAC_RDES3_OWN BIT(31)
#define XGMAC_RDES3_CTXT BIT(30)
#define XGMAC_RDES3_IOC BIT(30)
#define XGMAC_RDES3_LD BIT(28)
#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_RSV BIT(26)
+#define XGMAC_RDES3_L34T GENMASK(23, 20)
+#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_L34T_IP4TCP 0x1
+#define XGMAC_L34T_IP4UDP 0x2
+#define XGMAC_L34T_IP6TCP 0x9
+#define XGMAC_L34T_IP6UDP 0xA
#define XGMAC_RDES3_ES BIT(15)
#define XGMAC_RDES3_PL GENMASK(13, 0)
#define XGMAC_RDES3_TSD BIT(6)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 85c68b7ee8c6..e534a3aaf4a3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -6,7 +6,9 @@
#include <linux/bitrev.h>
#include <linux/crc32.h>
+#include <linux/iopoll.h>
#include "stmmac.h"
+#include "stmmac_ptp.h"
#include "dwxgmac2.h"
static void dwxgmac2_core_init(struct mac_device_info *hw,
@@ -118,6 +120,23 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
writel(value, ioaddr + reg);
}
+static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSTC(queue);
+ value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
+
+ writel(value, ioaddr + reg);
+}
+
static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
u32 rx_alg)
{
@@ -144,7 +163,9 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
u32 tx_alg)
{
void __iomem *ioaddr = hw->pcsr;
+ bool ets = true;
u32 value;
+ int i;
value = readl(ioaddr + XGMAC_MTL_OPMODE);
value &= ~XGMAC_ETSALG;
@@ -160,10 +181,28 @@ static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
value |= XGMAC_DWRR;
break;
default:
+ ets = false;
break;
}
writel(value, ioaddr + XGMAC_MTL_OPMODE);
+
+ /* Set ETS if desired */
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ value &= ~XGMAC_TSA;
+ if (ets)
+ value |= XGMAC_ETS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ }
+}
+
+static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
}
static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
@@ -200,11 +239,21 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
}
+static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
void __iomem *ioaddr = hw->pcsr;
u32 stat, en;
+ int ret = 0;
en = readl(ioaddr + XGMAC_INT_EN);
stat = readl(ioaddr + XGMAC_INT_STATUS);
@@ -216,7 +265,24 @@ static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
readl(ioaddr + XGMAC_PMT);
}
- return 0;
+ if (stat & XGMAC_LPIIS) {
+ u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ if (lpi & XGMAC_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (lpi & XGMAC_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ return ret;
}
static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
@@ -309,6 +375,53 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ value |= XGMAC_LPITXEN | XGMAC_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= XGMAC_TXCGE;
+
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ if (link)
+ value |= XGMAC_PLS;
+ else
+ value &= ~XGMAC_PLS;
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
+ writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
+}
+
static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
int mcbitslog2)
{
@@ -402,36 +515,694 @@ static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
writel(value, ioaddr + XGMAC_RX_CONFIG);
}
+static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
+ u32 val)
+{
+ u32 ctrl = 0;
+
+ writel(val, ioaddr + XGMAC_RSS_DATA);
+ ctrl |= idx << XGMAC_RSSIA_SHIFT;
+ ctrl |= is_key ? XGMAC_ADDRT : 0x0;
+ ctrl |= XGMAC_OB;
+ writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
+
+ return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
+ !(ctrl & XGMAC_OB), 100, 10000);
+}
+
+static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 *key = (u32 *)cfg->key;
+ int i, ret;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RSS_CTRL);
+ if (!cfg->enable) {
+ value &= ~XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+ }
+
+ for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_rxq; i++)
+ dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
+
+ value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
+ value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
+ value &= ~XGMAC_VLAN_DOVLTC;
+ value &= ~XGMAC_VLAN_VID;
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ }
+}
+
+struct dwxgmac3_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name,
+ const struct dwxgmac3_error_desc *desc,
+ unsigned long field_offset,
+ struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
+ { true, "MTPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
+ { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
+ { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { true, "CPI", "Control Register Parity Check Error" },
+};
+
+static void dwxgmac3_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MAC",
+ dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MTL",
+ dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "DCES", "DMA DCACHE Memory Error" },
+ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
+ { true, "DUES", "DMA DCACHE Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+}
+
+static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
+{
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ /* 1. Enable Safety Features */
+ writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+ value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
+ value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
+ value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+ value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
+ value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 4. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
+ value |= XGMAC_PRTYEN; /* FSM Parity Feature */
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
+ return 0;
+}
+
+static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr,
+ unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
+ corr = false;
+ if (err) {
+ dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
+ (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
+ corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
+ if (err) {
+ dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwxgmac3_error {
+ const struct dwxgmac3_error_desc *desc;
+} dwxgmac3_all_errors[] = {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
+};
+
+static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count,
+ const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
+ return -EINVAL;
+ if (!dwxgmac3_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwxgmac3_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
+
+ val &= ~XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+
+ return 0;
+}
+
+static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + XGMAC_MTL_OPMODE);
+ val |= XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & XGMAC_ADDR;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Write OP */
+ val |= XGMAC_WRRDN;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Start Write */
+ val |= XGMAC_STARTBUSY;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
+ unsigned int count, u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+static int dwxgmac3_rxp_config(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
+ val = old_val & ~XGMAC_CONFIG_RE;
+ writel(val, ioaddr + XGMAC_RX_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwxgmac3_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & XGMAC_NPE;
+ val |= nve & XGMAC_NVE;
+ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwxgmac3_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
+ return ret;
+}
+
+static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
+ value, value & XGMAC_TXTSC, 100, 10000))
+ return -EBUSY;
+
+ *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
+ *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
+ return 0;
+}
+
+static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & XGMAC_TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~XGMAC_PPSx_MASK(index);
+
+ if (!enable) {
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_PPSEN0;
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+}
+
+static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_SARC;
+ value |= val << XGMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_VLAN_INCL);
+ value |= XGMAC_VLAN_VLTI;
+ value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~XGMAC_VLAN_VLC;
+ value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
+ writel(value, ioaddr + XGMAC_VLAN_INCL);
+}
+
const struct stmmac_ops dwxgmac210_ops = {
.core_init = dwxgmac2_core_init,
.set_mac = dwxgmac2_set_mac,
.rx_ipc = dwxgmac2_rx_ipc,
.rx_queue_enable = dwxgmac2_rx_queue_enable,
.rx_queue_prio = dwxgmac2_rx_queue_prio,
- .tx_queue_prio = NULL,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
.rx_queue_routing = NULL,
.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
- .set_mtl_tx_queue_weight = NULL,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
.config_cbs = dwxgmac2_config_cbs,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dump_regs,
.host_irq_status = dwxgmac2_host_irq_status,
.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
.flow_ctrl = dwxgmac2_flow_ctrl,
.pmt = dwxgmac2_pmt,
.set_umac_addr = dwxgmac2_set_umac_addr,
.get_umac_addr = dwxgmac2_get_umac_addr,
- .set_eee_mode = NULL,
- .reset_eee_mode = NULL,
- .set_eee_timer = NULL,
- .set_eee_pls = NULL,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
.pcs_ctrl_ane = NULL,
.pcs_rane = NULL,
.pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
.set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
};
int dwxgmac2_setup(struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c4c45402b8f8..ae48154f933c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -26,16 +26,17 @@ static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
{
unsigned int rdes3 = le32_to_cpu(p->des3);
- int ret = good_frame;
if (unlikely(rdes3 & XGMAC_RDES3_OWN))
return dma_own;
+ if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
+ return discard_frame;
if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return rx_not_ls;
+ if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
return discard_frame;
- if (unlikely(rdes3 & XGMAC_RDES3_ES))
- ret = discard_frame;
- return ret;
+ return good_frame;
}
static int dwxgmac2_get_tx_len(struct dma_desc *p)
@@ -55,7 +56,7 @@ static void dwxgmac2_set_tx_owner(struct dma_desc *p)
static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
{
- p->des3 = cpu_to_le32(XGMAC_RDES3_OWN);
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
if (!disable_rx_ic)
p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
@@ -98,11 +99,17 @@ static int dwxgmac2_rx_check_timestamp(void *desc)
unsigned int rdes3 = le32_to_cpu(p->des3);
bool desc_valid, ts_valid;
+ dma_rmb();
+
desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
- if (likely(desc_valid && ts_valid))
+ if (likely(desc_valid && ts_valid)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ return -EINVAL;
return 0;
+ }
+
return -EINVAL;
}
@@ -113,13 +120,10 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
unsigned int rdes3 = le32_to_cpu(p->des3);
int ret = -EBUSY;
- if (likely(rdes3 & XGMAC_RDES3_CDA)) {
+ if (likely(rdes3 & XGMAC_RDES3_CDA))
ret = dwxgmac2_rx_check_timestamp(next_desc);
- if (ret)
- return ret;
- }
- return ret;
+ return !ret;
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
@@ -144,7 +148,7 @@ static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
- tdes3 = tot_pkt_len & XGMAC_TDES3_FL;
+ tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
if (is_fs)
tdes3 |= XGMAC_TDES3_FD;
else
@@ -254,6 +258,86 @@ static void dwxgmac2_clear(struct dma_desc *p)
p->des3 = 0;
}
+static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 ptype;
+
+ if (rdes3 & XGMAC_RDES3_RSV) {
+ ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+
+ switch (ptype) {
+ case XGMAC_L34T_IP4TCP:
+ case XGMAC_L34T_IP4UDP:
+ case XGMAC_L34T_IP6TCP:
+ case XGMAC_L34T_IP6UDP:
+ *type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ *type = PKT_HASH_TYPE_L3;
+ break;
+ }
+
+ *hash = le32_to_cpu(p->des1);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+ return 0;
+}
+
+static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+}
+
+static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+
+ des &= XGMAC_TDES2_IVT;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
+ des &= XGMAC_TDES3_IVTIR;
+ p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
+}
+
+static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= XGMAC_TDES2_VTIR_SHIFT;
+ p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+}
+
const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.tx_status = dwxgmac2_get_tx_status,
.rx_status = dwxgmac2_get_rx_status,
@@ -277,4 +361,10 @@ const struct stmmac_desc_ops dwxgmac210_desc_ops = {
.get_addr = dwxgmac2_get_addr,
.set_addr = dwxgmac2_set_addr,
.clear = dwxgmac2_clear,
+ .get_rx_hash = dwxgmac2_get_rx_hash,
+ .get_rx_header_len = dwxgmac2_get_rx_header_len,
+ .set_sec_addr = dwxgmac2_set_sec_addr,
+ .set_sarc = dwxgmac2_set_sarc,
+ .set_vlan_tag = dwxgmac2_set_vlan_tag,
+ .set_vlan = dwxgmac2_set_vlan,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index a4f236e3593e..64956465c030 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -128,6 +128,14 @@ static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
}
+static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
u32 channel, int fifosz, u8 qmode)
{
@@ -351,18 +359,24 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 0 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
dma_cap->av &= (hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10;
+ dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
switch (dma_cap->addr64) {
@@ -396,6 +410,14 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
dma_cap->number_rx_queues =
((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+
+ /* MAC HW feature 3 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
+ dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
+ dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
+ dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
}
static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 nchan)
@@ -462,6 +484,22 @@ static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
}
+static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_HDSMS;
+ value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ if (en)
+ value |= XGMAC_SPH;
+ else
+ value &= ~XGMAC_SPH;
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.reset = dwxgmac2_dma_reset,
.init = dwxgmac2_dma_init,
@@ -469,7 +507,7 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.init_rx_chan = dwxgmac2_dma_init_rx_chan,
.init_tx_chan = dwxgmac2_dma_init_tx_chan,
.axi = dwxgmac2_dma_axi,
- .dump_regs = NULL,
+ .dump_regs = dwxgmac2_dma_dump_regs,
.dma_rx_mode = dwxgmac2_dma_rx_mode,
.dma_tx_mode = dwxgmac2_dma_tx_mode,
.enable_dma_irq = dwxgmac2_enable_dma_irq,
@@ -488,4 +526,5 @@ const struct stmmac_dma_ops dwxgmac210_dma_ops = {
.enable_tso = dwxgmac2_enable_tso,
.qmode = dwxgmac2_qmode,
.set_bfsize = dwxgmac2_set_bfsize,
+ .enable_sph = dwxgmac2_enable_sph,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
index 6c61b753b55e..3af2e5015245 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.c
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -201,7 +201,7 @@ static const struct stmmac_hwif_entry {
.min_id = DWXGMAC_CORE_2_10,
.regs = {
.ptp_off = PTP_XGMAC_OFFSET,
- .mmc_off = 0,
+ .mmc_off = MMC_XGMAC_OFFSET,
},
.desc = &dwxgmac210_desc_ops,
.dma = &dwxgmac210_dma_ops,
@@ -209,7 +209,7 @@ static const struct stmmac_hwif_entry {
.hwtimestamp = &stmmac_ptp,
.mode = NULL,
.tc = &dwmac510_tc_ops,
- .mmc = NULL,
+ .mmc = &dwxgmac_mmc_ops,
.setup = dwxgmac2_setup,
.quirks = NULL,
},
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 278c0dbec9d9..9435b312495d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -86,6 +86,15 @@ struct stmmac_desc_ops {
void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
/* clear descriptor */
void (*clear)(struct dma_desc *p);
+ /* RSS */
+ int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type);
+ int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
+ void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type);
+ void (*set_vlan)(struct dma_desc *p, u32 type);
};
#define stmmac_init_rx_desc(__priv, __args...) \
@@ -136,6 +145,18 @@ struct stmmac_desc_ops {
stmmac_do_void_callback(__priv, desc, set_addr, __args)
#define stmmac_clear_desc(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, clear, __args)
+#define stmmac_get_rx_hash(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_hash, __args)
+#define stmmac_get_rx_header_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+#define stmmac_set_desc_sec_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
+#define stmmac_set_desc_sarc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sarc, __args)
+#define stmmac_set_desc_vlan_tag(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
+#define stmmac_set_desc_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan, __args)
struct stmmac_dma_cfg;
struct dma_features;
@@ -186,6 +207,7 @@ struct stmmac_dma_ops {
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
+ void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -242,6 +264,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, qmode, __args)
#define stmmac_set_dma_bfsize(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+#define stmmac_enable_sph(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_sph, __args)
struct mac_device_info;
struct net_device;
@@ -249,6 +273,7 @@ struct rgmii_adv;
struct stmmac_safety_stats;
struct stmmac_tc_entry;
struct stmmac_pps_cfg;
+struct stmmac_rss;
/* Helpers to program the MAC core */
struct stmmac_ops {
@@ -327,6 +352,17 @@ struct stmmac_ops {
u32 sub_second_inc, u32 systime_flags);
/* Loopback for selftests */
void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
+ /* RSS */
+ int (*rss_configure)(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ /* TX Timestamp */
+ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
+ /* Source Address Insertion / Replacement */
+ void (*sarc_configure)(void __iomem *ioaddr, int val);
};
#define stmmac_core_init(__priv, __args...) \
@@ -397,6 +433,16 @@ struct stmmac_ops {
stmmac_do_callback(__priv, mac, flex_pps_config, __args)
#define stmmac_set_mac_loopback(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
+#define stmmac_rss_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rss_configure, __args)
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
+#define stmmac_sarc_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
@@ -503,6 +549,7 @@ extern const struct stmmac_ops dwxgmac210_ops;
extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 3587ceb9faf5..a0c05925883e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -24,6 +24,7 @@
#define MMC_GMAC4_OFFSET 0x700
#define MMC_GMAC3_X_OFFSET 0x100
+#define MMC_XGMAC_OFFSET 0x800
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
@@ -116,6 +117,14 @@ struct stmmac_counters {
unsigned int mmc_rx_tcp_err_octets;
unsigned int mmc_rx_icmp_gd_octets;
unsigned int mmc_rx_icmp_err_octets;
+
+ /* FPE */
+ unsigned int mmc_tx_fpe_fragment_cntr;
+ unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_rx_packet_assembly_err_cntr;
+ unsigned int mmc_rx_packet_smd_err_cntr;
+ unsigned int mmc_rx_packet_assembly_ok_cntr;
+ unsigned int mmc_rx_fpe_fragment_cntr;
};
#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a471db6d7b11..a223584f5f9a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -119,6 +119,64 @@
#define MMC_RX_ICMP_GD_OCTETS 0x180
#define MMC_RX_ICMP_ERR_OCTETS 0x184
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_TX_OCTET_GB 0x14
+#define MMC_XGMAC_TX_PKT_GB 0x1c
+#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
+#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
+#define MMC_XGMAC_TX_64OCT_GB 0x34
+#define MMC_XGMAC_TX_65OCT_GB 0x3c
+#define MMC_XGMAC_TX_128OCT_GB 0x44
+#define MMC_XGMAC_TX_256OCT_GB 0x4c
+#define MMC_XGMAC_TX_512OCT_GB 0x54
+#define MMC_XGMAC_TX_1024OCT_GB 0x5c
+#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
+#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
+#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
+#define MMC_XGMAC_TX_UNDER 0x7c
+#define MMC_XGMAC_TX_OCTET_G 0x84
+#define MMC_XGMAC_TX_PKT_G 0x8c
+#define MMC_XGMAC_TX_PAUSE 0x94
+#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
+#define MMC_XGMAC_TX_LPI_USEC 0xa4
+#define MMC_XGMAC_TX_LPI_TRAN 0xa8
+
+#define MMC_XGMAC_RX_PKT_GB 0x100
+#define MMC_XGMAC_RX_OCTET_GB 0x108
+#define MMC_XGMAC_RX_OCTET_G 0x110
+#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
+#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
+#define MMC_XGMAC_RX_CRC_ERR 0x128
+#define MMC_XGMAC_RX_RUNT_ERR 0x130
+#define MMC_XGMAC_RX_JABBER_ERR 0x134
+#define MMC_XGMAC_RX_UNDER 0x138
+#define MMC_XGMAC_RX_OVER 0x13c
+#define MMC_XGMAC_RX_64OCT_GB 0x140
+#define MMC_XGMAC_RX_65OCT_GB 0x148
+#define MMC_XGMAC_RX_128OCT_GB 0x150
+#define MMC_XGMAC_RX_256OCT_GB 0x158
+#define MMC_XGMAC_RX_512OCT_GB 0x160
+#define MMC_XGMAC_RX_1024OCT_GB 0x168
+#define MMC_XGMAC_RX_UNI_PKT_G 0x170
+#define MMC_XGMAC_RX_LENGTH_ERR 0x178
+#define MMC_XGMAC_RX_RANGE 0x180
+#define MMC_XGMAC_RX_PAUSE 0x188
+#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
+#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
+#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
+#define MMC_XGMAC_RX_LPI_USEC 0x1a4
+#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
+#define MMC_XGMAC_TX_FPE_FRAG 0x208
+#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+#define MMC_XGMAC_RX_FPE_FRAG 0x234
+
static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
u32 value = readl(mmcaddr + MMC_CNTRL);
@@ -263,3 +321,137 @@ const struct stmmac_mmc_ops dwmac_mmc_ops = {
.intr_all_mask = dwmac_mmc_intr_all_mask,
.read = dwmac_mmc_read,
};
+
+static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+}
+
+static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+}
+
+static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
+{
+ u64 tmp = 0;
+
+ tmp += readl(addr + reg);
+ tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
+ if (tmp > GENMASK(31, 0))
+ *dest = ~0x0;
+ else
+ *dest = *dest + tmp;
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
+ &mmc->mmc_tx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
+ &mmc->mmc_tx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
+ &mmc->mmc_tx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
+ &mmc->mmc_tx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
+ &mmc->mmc_tx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
+ &mmc->mmc_tx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
+ &mmc->mmc_tx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
+ &mmc->mmc_tx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
+ &mmc->mmc_tx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
+ &mmc->mmc_tx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
+ &mmc->mmc_tx_unicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
+ &mmc->mmc_tx_multicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
+ &mmc->mmc_tx_broadcast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
+ &mmc->mmc_tx_underflow_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
+ &mmc->mmc_tx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
+ &mmc->mmc_tx_framecount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
+ &mmc->mmc_tx_pause_frame);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
+ &mmc->mmc_tx_vlan_frame_g);
+
+ /* MMC RX counter registers */
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
+ &mmc->mmc_rx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
+ &mmc->mmc_rx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
+ &mmc->mmc_rx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
+ &mmc->mmc_rx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
+ &mmc->mmc_rx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
+ &mmc->mmc_rx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
+ &mmc->mmc_rx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
+ &mmc->mmc_rx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
+ &mmc->mmc_rx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
+ &mmc->mmc_rx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
+ &mmc->mmc_rx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
+ &mmc->mmc_rx_unicast_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
+ &mmc->mmc_rx_length_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
+ &mmc->mmc_rx_autofrangetype);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
+ &mmc->mmc_rx_pause_frames);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
+ &mmc->mmc_rx_fifo_overflow);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
+ &mmc->mmc_rx_vlan_frames_gb);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
+ .ctrl = dwxgmac_mmc_ctrl,
+ .intr_all_mask = dwxgmac_mmc_intr_all_mask,
+ .read = dwxgmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 5cd966c154f3..dcb2e29a5717 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -13,6 +13,7 @@
#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
+#include <linux/if_vlan.h>
#include <linux/stmmac.h>
#include <linux/phylink.h>
#include <linux/pci.h>
@@ -57,7 +58,9 @@ struct stmmac_tx_queue {
struct stmmac_rx_buffer {
struct page *page;
+ struct page *sec_page;
dma_addr_t addr;
+ dma_addr_t sec_addr;
};
struct stmmac_rx_queue {
@@ -73,6 +76,12 @@ struct stmmac_rx_queue {
u32 rx_zeroc_thresh;
dma_addr_t dma_rx_phy;
u32 rx_tail_addr;
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
};
struct stmmac_channel {
@@ -113,6 +122,12 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
+struct stmmac_rss {
+ int enable;
+ u8 key[STMMAC_RSS_HASH_KEY_SIZE];
+ u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames;
@@ -123,6 +138,8 @@ struct stmmac_priv {
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
+ int sph;
+ u32 sarc_type;
unsigned int dma_buf_sz;
unsigned int rx_copybreak;
@@ -185,11 +202,10 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
- struct dentry *dbgfs_rings_status;
- struct dentry *dbgfs_dma_cap;
#endif
unsigned long state;
@@ -203,6 +219,9 @@ struct stmmac_priv {
/* Pulse Per Second output */
struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+
+ /* Receive Side Scaling */
+ struct stmmac_rss rss;
};
enum stmmac_state {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 6efb66820d4c..1c450105e5a6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -18,10 +18,12 @@
#include "stmmac.h"
#include "dwmac_dma.h"
+#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
+#define XGMAC_ETHTOOL_NAME "st_xgmac"
#define ETHTOOL_DMA_OFFSET 55
@@ -65,6 +67,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(rx_split_hdr_pkt_n),
/* Tx/Rx IRQ error info */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
@@ -243,6 +246,12 @@ static const struct stmmac_stats stmmac_mmc[] = {
STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
+ STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
};
#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
@@ -253,6 +262,8 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
if (priv->plat->has_gmac || priv->plat->has_gmac4)
strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else if (priv->plat->has_xgmac)
+ strlcpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
else
strlcpy(info->driver, MAC100_ETHTOOL_NAME,
sizeof(info->driver));
@@ -398,23 +409,28 @@ static int stmmac_check_if_running(struct net_device *dev)
static int stmmac_ethtool_get_regs_len(struct net_device *dev)
{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_xgmac)
+ return XGMAC_REGSIZE * 4;
return REG_SPACE_SIZE;
}
static void stmmac_ethtool_gregs(struct net_device *dev,
struct ethtool_regs *regs, void *space)
{
- u32 *reg_space = (u32 *) space;
-
struct stmmac_priv *priv = netdev_priv(dev);
-
- memset(reg_space, 0x0, REG_SPACE_SIZE);
+ u32 *reg_space = (u32 *) space;
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- /* Copy DMA registers to where ethtool expects them */
- memcpy(&reg_space[ETHTOOL_DMA_OFFSET], &reg_space[DMA_BUS_MODE / 4],
- NUM_DWMAC1000_DMA_REGS * 4);
+
+ if (!priv->plat->has_xgmac) {
+ /* Copy DMA registers to where ethtool expects them */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
+ }
}
static int stmmac_nway_reset(struct net_device *dev)
@@ -758,6 +774,76 @@ static int stmmac_set_coalesce(struct net_device *dev,
return 0;
}
+static int stmmac_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->plat->rx_queues_to_use;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return sizeof(priv->rss.key);
+}
+
+static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return ARRAY_SIZE(priv->rss.table);
+}
+
+static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ indir[i] = priv->rss.table[i];
+ }
+
+ if (key)
+ memcpy(key, priv->rss.key, sizeof(priv->rss.key));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = indir[i];
+ }
+
+ if (key)
+ memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+
+ return stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -849,6 +935,11 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_eee = stmmac_ethtool_op_get_eee,
.set_eee = stmmac_ethtool_op_set_eee,
.get_sset_count = stmmac_get_sset_count,
+ .get_rxnfc = stmmac_get_rxnfc,
+ .get_rxfh_key_size = stmmac_get_rxfh_key_size,
+ .get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
+ .get_rxfh = stmmac_get_rxfh,
+ .set_rxfh = stmmac_set_rxfh,
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fd54c7c87485..06ccd216ae90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -105,7 +105,7 @@ MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
-static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_init_fs(struct net_device *dev);
static void stmmac_exit_fs(struct net_device *dev);
#endif
@@ -432,6 +432,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
u64 ns = 0;
if (!priv->hwts_tx_en)
@@ -443,9 +444,13 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* check tx tstamp status */
if (stmmac_get_tx_timestamp_status(priv, p)) {
- /* get the valid tstamp */
stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+ if (found) {
memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
shhwtstamp.hwtstamp = ns_to_ktime(ns);
@@ -453,8 +458,6 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
/* pass tstamp to stack */
skb_tstamp_tx(skb, &shhwtstamp);
}
-
- return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
@@ -1198,6 +1201,17 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
if (!buf->page)
return -ENOMEM;
+ if (priv->sph) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
+ } else {
+ buf->sec_page = NULL;
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
@@ -1220,6 +1234,10 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
if (buf->page)
page_pool_put_page(rx_q->page_pool, buf->page, false);
buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
}
/**
@@ -2417,6 +2435,22 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
}
}
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
/**
* stmmac_mtl_configuration - Configure MTL
* @priv: driver private structure
@@ -2461,6 +2495,10 @@ static void stmmac_mtl_configuration(struct stmmac_priv *priv)
/* Set RX routing */
if (rx_queues_count > 1)
stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
}
static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
@@ -2573,6 +2611,16 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
}
+ /* Enable Split Header */
+ if (priv->sph && priv->hw->rx_csum) {
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, 1, chan);
+ }
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2750,6 +2798,33 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ p = tx_q->dma_tx + tx_q->cur_tx;
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ return true;
+}
+
/**
* stmmac_tso_allocator - close entry point of the driver
* @priv: driver private structure
@@ -2829,12 +2904,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
- unsigned int first_entry;
struct stmmac_tx_queue *tx_q;
+ unsigned int first_entry;
int tmp_pay_len = 0;
u32 pay_len, mss;
u8 proto_hdr_len;
dma_addr_t des;
+ bool has_vlan;
int i;
tx_q = &priv->tx_queue[queue];
@@ -2876,12 +2952,18 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -2960,6 +3042,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
@@ -3038,6 +3123,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int first_entry;
unsigned int enh_desc;
dma_addr_t des;
+ bool has_vlan;
int entry;
tx_q = &priv->tx_queue[queue];
@@ -3063,6 +3149,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -3076,6 +3165,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
enh_desc = priv->plat->enh_desc;
/* To program the descriptors according to the size of the frame */
if (enh_desc)
@@ -3173,6 +3265,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->xstats.tx_set_ic_bit++;
}
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
skb_tx_timestamp(skb);
/* Ready to fill the first descriptor and set the OWN bit w/o any
@@ -3292,6 +3387,17 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
break;
}
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+
+ dma_sync_single_for_device(priv->device, buf->sec_addr,
+ len, DMA_FROM_DEVICE);
+ }
+
buf->addr = page_pool_get_dma_addr(buf->page);
/* Sync whole allocation to device. This will invalidate old
@@ -3301,6 +3407,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
DMA_FROM_DEVICE);
stmmac_set_desc_addr(priv, p, buf->addr);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr);
stmmac_refill_desc3(priv, rx_q, p);
rx_q->rx_count_frames++;
@@ -3330,9 +3437,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
unsigned int next_entry = rx_q->cur_rx;
- int coe = priv->hw->rx_csum;
- unsigned int count = 0;
+ struct sk_buff *skb = NULL;
if (netif_msg_rx_status(priv)) {
void *rx_head;
@@ -3346,10 +3454,30 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
+ unsigned int hlen = 0, prev_len = 0;
+ enum pkt_hash_types hash_type;
struct stmmac_rx_buffer *buf;
struct dma_desc *np, *p;
- int entry, status;
+ unsigned int sec_len;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+ if (count >= limit)
+ break;
+
+read_again:
+ sec_len = 0;
entry = next_entry;
buf = &rx_q->buf_pool[entry];
@@ -3376,6 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
np = rx_q->dma_rx + next_entry;
prefetch(np);
+ prefetch(page_address(buf->page));
if (priv->extend_desc)
stmmac_rx_extended_status(priv, &priv->dev->stats,
@@ -3384,26 +3513,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
page_pool_recycle_direct(rx_q->page_pool, buf->page);
priv->dev->stats.rx_errors++;
buf->page = NULL;
- } else {
- struct sk_buff *skb;
- int frame_len;
- unsigned int des;
+ error = 1;
+ }
- stmmac_get_desc_addr(priv, p, &des);
- frame_len = stmmac_get_rx_frame_len(priv, p, coe);
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
- /* If frame length is greater than skb buffer size
- * (preallocated during init) then the packet is
- * ignored
- */
- if (frame_len > priv->dma_buf_sz) {
- if (net_ratelimit())
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
- priv->dev->stats.rx_length_errors++;
- continue;
- }
+ /* Buffer is good. Go on. */
+
+ if (likely(status & rx_not_ls)) {
+ len += priv->dma_buf_sz;
+ } else {
+ prev_len = len;
+ len = stmmac_get_rx_frame_len(priv, p, coe);
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
* Type frames (LLC/LLC-SNAP)
@@ -3414,53 +3540,97 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
unlikely(status != llc_snap))
- frame_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ if (!skb) {
+ int ret = stmmac_get_rx_header_len(priv, p, &hlen);
- if (netif_msg_rx_status(priv)) {
- netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, des);
- netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
- frame_len, status);
+ if (priv->sph && !ret && (hlen > 0)) {
+ sec_len = len;
+ if (!(status & rx_not_ls))
+ sec_len = sec_len - hlen;
+ len = hlen;
+
+ prefetch(page_address(buf->sec_page));
+ priv->xstats.rx_split_hdr_pkt_n++;
}
- skb = netdev_alloc_skb_ip_align(priv->dev, frame_len);
- if (unlikely(!skb)) {
+ skb = napi_alloc_skb(&ch->rx_napi, len);
+ if (!skb) {
priv->dev->stats.rx_dropped++;
continue;
}
- dma_sync_single_for_cpu(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, page_address(buf->page),
- frame_len);
- skb_put(skb, frame_len);
-
- if (netif_msg_pktdata(priv)) {
- netdev_dbg(priv->dev, "frame received (%dbytes)",
- frame_len);
- print_pkt(skb->data, frame_len);
- }
+ len);
+ skb_put(skb, len);
- stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else {
+ unsigned int buf_len = len - prev_len;
- stmmac_rx_vlan(priv->dev, skb);
+ if (likely(status & rx_not_ls))
+ buf_len = priv->dma_buf_sz;
- skb->protocol = eth_type_trans(skb, priv->dev);
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, 0, buf_len,
+ priv->dma_buf_sz);
- if (unlikely(!coe))
- skb_checksum_none_assert(skb);
- else
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
- napi_gro_receive(&ch->rx_napi, skb);
+ if (sec_len > 0) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ sec_len, DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, sec_len,
+ priv->dma_buf_sz);
- /* Data payload copied into SKB, page ready for recycle */
- page_pool_recycle_direct(rx_q->page_pool, buf->page);
- buf->page = NULL;
+ len += sec_len;
- priv->dev->stats.rx_packets++;
- priv->dev->stats.rx_bytes += frame_len;
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
}
+
+ if (likely(status & rx_not_ls))
+ goto read_again;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
}
stmmac_rx_refill(priv, queue);
@@ -3606,6 +3776,8 @@ static int stmmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(netdev);
+ bool sph_en;
+ u32 chan;
/* Keep the COE Type in case of csum is supporting */
if (features & NETIF_F_RXCSUM)
@@ -3617,6 +3789,10 @@ static int stmmac_set_features(struct net_device *netdev,
*/
stmmac_rx_ipc(priv, priv->hw);
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
return 0;
}
@@ -3962,54 +4138,102 @@ static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
-static int stmmac_init_fs(struct net_device *dev)
+static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
- if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
- netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
- return -ENOMEM;
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
}
- /* Entry to report DMA RX/TX rings */
- priv->dbgfs_rings_status =
- debugfs_create_file("descriptors_status", 0444,
- priv->dbgfs_dir, dev,
- &stmmac_rings_status_fops);
+ return crc;
+}
- if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
- netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ u16 vid;
- return -ENOMEM;
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
}
- /* Entry to report the DMA HW features */
- priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", 0444,
- priv->dbgfs_dir,
- dev, &stmmac_dma_cap_fops);
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
- if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
- netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
- return -ENOMEM;
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
}
- return 0;
+ return ret;
}
-static void stmmac_exit_fs(struct net_device *dev)
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
- debugfs_remove_recursive(priv->dbgfs_dir);
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+ return stmmac_vlan_update(priv, is_double);
}
-#endif /* CONFIG_DEBUG_FS */
static const struct net_device_ops stmmac_netdev_ops = {
.ndo_open = stmmac_open,
@@ -4027,6 +4251,8 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_poll_controller = stmmac_poll_controller,
#endif
.ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
static void stmmac_reset_subtask(struct stmmac_priv *priv)
@@ -4175,8 +4401,8 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, maxq;
- int ret = 0;
+ u32 queue, rxq, maxq;
+ int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
@@ -4259,6 +4485,12 @@ int stmmac_dvr_probe(struct device *device,
dev_info(priv->device, "TSO feature enabled\n");
}
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph = true;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
if (priv->dma_cap.addr64) {
ret = dma_set_mask_and_coherent(device,
DMA_BIT_MASK(priv->dma_cap.addr64));
@@ -4281,9 +4513,27 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
#endif
priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
@@ -4368,10 +4618,7 @@ int stmmac_dvr_probe(struct device *device,
}
#ifdef CONFIG_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- netdev_warn(priv->dev, "%s: failed debugFS registration\n",
- __func__);
+ stmmac_init_fs(ndev);
#endif
return ret;
@@ -4617,16 +4864,8 @@ static int __init stmmac_init(void)
{
#ifdef CONFIG_DEBUG_FS
/* Create debugfs main directory if it doesn't exist yet */
- if (!stmmac_fs_dir) {
+ if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
-
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
- pr_err("ERROR %s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME);
-
- return -ENOMEM;
- }
- }
#endif
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 4304c1abc5d1..40c42637ad75 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -348,7 +348,9 @@ int stmmac_mdio_register(struct net_device *ndev)
max_addr = PHY_MAX_ADDR;
}
- new_bus->reset = &stmmac_mdio_reset;
+ if (mdio_bus_data->needs_reset)
+ new_bus->reset = &stmmac_mdio_reset;
+
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 86f9c07a38cf..20906287b6d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -9,6 +9,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/clk-provider.h>
#include <linux/pci.h>
#include <linux/dmi.h>
@@ -63,6 +64,7 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->has_gmac = 1;
plat->force_sf_dma_mode = 1;
+ plat->mdio_bus_data->needs_reset = true;
plat->mdio_bus_data->phy_mask = 0;
/* Set default value for multicast hash bins */
@@ -107,6 +109,166 @@ static const struct stmmac_pci_info stmmac_pci_info = {
.setup = stmmac_default_data,
};
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->mdio_bus_data->phy_mask = 0;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ "stmmac-clk", NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_pci_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_RGMII;
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_pci_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 200000000;
+ ret = intel_mgbe_common_data(pdev, plat);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tgl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->interface = PHY_INTERFACE_MODE_SGMII;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_pci_info = {
+ .setup = tgl_sgmii_data,
+};
+
static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
{
.func = 6,
@@ -292,10 +454,15 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
int i;
stmmac_dvr_remove(&pdev->dev);
+ if (priv->plat->stmmac_clk)
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+
for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
@@ -348,6 +515,9 @@ static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
#define STMMAC_QUARK_ID 0x0937
#define STMMAC_DEVICE_ID 0x1108
+#define STMMAC_EHL_RGMII1G_ID 0x4b30
+#define STMMAC_EHL_SGMII1G_ID 0x4b31
+#define STMMAC_TGL_SGMII1G_ID 0xa0ac
#define STMMAC_DEVICE(vendor_id, dev_id, info) { \
PCI_VDEVICE(vendor_id, dev_id), \
@@ -358,6 +528,9 @@ static const struct pci_device_id stmmac_id_table[] = {
STMMAC_DEVICE(STMMAC, STMMAC_DEVICE_ID, stmmac_pci_info),
STMMAC_DEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_MAC, stmmac_pci_info),
STMMAC_DEVICE(INTEL, STMMAC_QUARK_ID, quark_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_RGMII1G_ID, ehl_rgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_EHL_SGMII1G_ID, ehl_sgmii1g_pci_info),
+ STMMAC_DEVICE(INTEL, STMMAC_TGL_SGMII1G_ID, tgl_sgmii1g_pci_info),
{}
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 154daf4d1072..eaf8f08f2e91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -342,10 +342,16 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
mdio = true;
}
- if (mdio)
+ if (mdio) {
plat->mdio_bus_data =
devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
return 0;
}
@@ -522,13 +528,15 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
}
/* clock setup */
- plat->stmmac_clk = devm_clk_get(&pdev->dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(plat->stmmac_clk)) {
- dev_warn(&pdev->dev, "Cannot get CSR clock\n");
- plat->stmmac_clk = NULL;
+ if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
}
- clk_prepare_enable(plat->stmmac_clk);
plat->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(plat->pclk)) {
@@ -609,13 +617,8 @@ int stmmac_get_platform_resources(struct platform_device *pdev,
* probe if needed before we went too far with resource allocation.
*/
stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
- if (stmmac_res->irq < 0) {
- if (stmmac_res->irq != -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "MAC IRQ configuration information not found\n");
- }
+ if (stmmac_res->irq < 0)
return stmmac_res->irq;
- }
/* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
* The external wake up irq can be passed through the platform code
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index c48224973a37..173493db038c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -194,6 +194,9 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
priv->pps[i].available = true;
}
+ if (priv->plat->ptp_max_adj)
+ stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
spin_lock_init(&priv->ptp_lock);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index a97b1ea76438..ecc8602c6799 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -11,8 +11,10 @@
#include <linux/ip.h>
#include <linux/phy.h>
#include <linux/udp.h>
+#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
#include "stmmac.h"
struct stmmachdr {
@@ -43,6 +45,7 @@ struct stmmac_packet_attrs {
int size;
int remove_sa;
u8 id;
+ int sarc;
};
static u8 stmmac_test_next_id;
@@ -228,8 +231,11 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
goto out;
}
- if (tpriv->packet->src) {
- if (!ether_addr_equal(ehdr->h_source, orig_ndev->dev_addr))
+ if (tpriv->packet->sarc) {
+ if (!ether_addr_equal(ehdr->h_source, ehdr->h_dest))
+ goto out;
+ } else if (tpriv->packet->src) {
+ if (!ether_addr_equal(ehdr->h_source, tpriv->packet->src))
goto out;
}
@@ -290,7 +296,9 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
tpriv->pt.dev = priv->dev;
tpriv->pt.af_packet_priv = tpriv;
tpriv->packet = attr;
- dev_add_pack(&tpriv->pt);
+
+ if (!attr->dont_wait)
+ dev_add_pack(&tpriv->pt);
skb = stmmac_test_get_udp_skb(priv, attr);
if (!skb) {
@@ -313,7 +321,8 @@ static int __stmmac_test_loopback(struct stmmac_priv *priv,
ret = !tpriv->ok;
cleanup:
- dev_remove_pack(&tpriv->pt);
+ if (!attr->dont_wait)
+ dev_remove_pack(&tpriv->pt);
kfree(tpriv);
return ret;
}
@@ -700,6 +709,465 @@ cleanup:
return ret;
}
+static int stmmac_test_rss(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ if (!priv->dma_cap.rssen || !priv->rss.enable)
+ return -EOPNOTSUPP;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.exp_hash = true;
+ attr.sport = 0x321;
+ attr.dport = 0x123;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_vlan_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct iphdr *ihdr;
+ u16 proto;
+
+ proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+ if (tpriv->vlan_id) {
+ if (skb->vlan_proto != htons(proto))
+ goto out;
+ if (skb->vlan_tci != tpriv->vlan_id)
+ goto out;
+ }
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 1;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = true;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_8021Q);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 2;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = !tpriv->ok;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -1;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
+ struct tc_cls_u32_offload cls_u32 = { };
+ struct stmmac_packet_attrs attr = { };
+ struct tc_action **actions, *act;
+ struct tc_u32_sel *sel;
+ struct tcf_exts *exts;
+ int ret, i, nk = 1;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.frpsel)
+ return -EOPNOTSUPP;
+
+ sel = kzalloc(sizeof(*sel) + nk * sizeof(struct tc_u32_key), GFP_KERNEL);
+ if (!sel)
+ return -ENOMEM;
+
+ exts = kzalloc(sizeof(*exts), GFP_KERNEL);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto cleanup_sel;
+ }
+
+ actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ if (!actions) {
+ ret = -ENOMEM;
+ goto cleanup_exts;
+ }
+
+ act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ if (!act) {
+ ret = -ENOMEM;
+ goto cleanup_actions;
+ }
+
+ cls_u32.command = TC_CLSU32_NEW_KNODE;
+ cls_u32.common.chain_index = 0;
+ cls_u32.common.protocol = htons(ETH_P_ALL);
+ cls_u32.knode.exts = exts;
+ cls_u32.knode.sel = sel;
+ cls_u32.knode.handle = 0x123;
+
+ exts->nr_actions = nk;
+ exts->actions = actions;
+ for (i = 0; i < nk; i++) {
+ struct tcf_gact *gact = to_gact(&act[i]);
+
+ actions[i] = &act[i];
+ gact->tcf_action = TC_ACT_SHOT;
+ }
+
+ sel->nkeys = nk;
+ sel->offshift = 0;
+ sel->keys[0].off = 6;
+ sel->keys[0].val = htonl(0xdeadbeef);
+ sel->keys[0].mask = ~0x0;
+
+ ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+ if (ret)
+ goto cleanup_act;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.src = addr;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = !ret; /* Shall NOT receive packet */
+
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+
+cleanup_act:
+ kfree(act);
+cleanup_actions:
+ kfree(actions);
+cleanup_exts:
+ kfree(exts);
+cleanup_sel:
+ kfree(sel);
+ return ret;
+}
+#else
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_desc_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x1;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_desc_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x2;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_reg_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_reg_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+ u16 proto;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = svlan;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = priv->dev->dev_addr;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
+ skb->protocol = htons(proto);
+
+ skb_set_queue_mapping(skb, 0);
+ ret = dev_queue_xmit(skb);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanoff(struct stmmac_priv *priv)
+{
+ return stmmac_test_vlanoff_common(priv, false);
+}
+
+static int stmmac_test_svlanoff(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.dvlan)
+ return -EOPNOTSUPP;
+ return stmmac_test_vlanoff_common(priv, true);
+}
+
#define STMMAC_LOOPBACK_NONE 0
#define STMMAC_LOOPBACK_MAC 1
#define STMMAC_LOOPBACK_PHY 2
@@ -745,6 +1213,46 @@ static const struct stmmac_test {
.name = "Flow Control ",
.lb = STMMAC_LOOPBACK_PHY,
.fn = stmmac_test_flowctrl,
+ }, {
+ .name = "RSS ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rss,
+ }, {
+ .name = "VLAN Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt,
+ }, {
+ .name = "Double VLAN Filtering",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt,
+ }, {
+ .name = "Flexible RX Parser ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rxp,
+ }, {
+ .name = "SA Insertion (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sai,
+ }, {
+ .name = "SA Replacement (desc)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sar,
+ }, {
+ .name = "SA Insertion (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sai,
+ }, {
+ .name = "SA Replacement (reg)",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sar,
+ }, {
+ .name = "VLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanoff,
+ }, {
+ .name = "SVLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_svlanoff,
},
};