aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mediatek/mtk_eth_soc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mediatek/mtk_eth_soc.c')
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2028
1 files changed, 1522 insertions, 506 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 75d67d1b5f6b..7cd381530aa4 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -9,6 +9,7 @@
#include <linux/of_device.h>
#include <linux/of_mdio.h>
#include <linux/of_net.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
@@ -20,9 +21,11 @@
#include <linux/pinctrl/devinfo.h>
#include <linux/phylink.h>
#include <linux/jhash.h>
+#include <linux/bitfield.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
static int mtk_msg_level = -1;
module_param_named(msg_level, mtk_msg_level, int, 0);
@@ -31,6 +34,112 @@ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
#define MTK_ETHTOOL_STAT(x) { #x, \
offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
+static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x1800,
+ .rx_ptr = 0x1900,
+ .rx_cnt_cfg = 0x1904,
+ .qcrx_ptr = 0x1908,
+ .glo_cfg = 0x1a04,
+ .rst_idx = 0x1a08,
+ .delay_irq = 0x1a0c,
+ .fc_th = 0x1a10,
+ .int_grp = 0x1a20,
+ .hred = 0x1a44,
+ .ctx_ptr = 0x1b00,
+ .dtx_ptr = 0x1b04,
+ .crx_ptr = 0x1b10,
+ .drx_ptr = 0x1b14,
+ .fq_head = 0x1b20,
+ .fq_tail = 0x1b24,
+ .fq_count = 0x1b28,
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+ .tx_irq_mask = 0x0a28,
+ .tx_irq_status = 0x0a20,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6100,
+ .rx_cnt_cfg = 0x6104,
+ .pcrx_ptr = 0x6108,
+ .glo_cfg = 0x6204,
+ .rst_idx = 0x6208,
+ .delay_irq = 0x620c,
+ .irq_status = 0x6220,
+ .irq_mask = 0x6228,
+ .int_grp = 0x6250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+};
+
/* strings used by ethtool */
static const struct mtk_ethtool_stats {
char str[ETH_GSTRING_LEN];
@@ -48,13 +157,20 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_long_errors),
MTK_ETHTOOL_STAT(rx_checksum_errors),
MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
};
static const char * const mtk_clks_source_name[] = {
"ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll",
"sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb",
"sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb",
- "sgmii_ck", "eth2pll",
+ "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1"
};
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
@@ -91,46 +207,96 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
}
dev_err(eth->dev, "mdio: MDIO timeout\n");
- return -1;
+ return -ETIMEDOUT;
}
-static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
{
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ int ret;
- write_data &= 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE |
- (phy_register << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data,
- MTK_PHY_IAC);
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+ }
- if (mtk_mdio_busy_wait(eth))
- return -1;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
return 0;
}
-static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
{
- u32 d;
-
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ int ret;
- mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ |
- (phy_reg << PHY_IAC_REG_SHIFT) |
- (phy_addr << PHY_IAC_ADDR_SHIFT),
- MTK_PHY_IAC);
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- if (mtk_mdio_busy_wait(eth))
- return 0xffff;
+ if (phy_reg & MII_ADDR_C45) {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ } else {
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+ }
- d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff;
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
- return d;
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
}
static int mtk_mdio_write(struct mii_bus *bus, int phy_addr,
@@ -210,14 +376,33 @@ static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
mtk_w32(eth, val, TRGMII_TCK_CTRL);
}
+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ unsigned int sid;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface)) {
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ return mtk_sgmii_select_pcs(eth->sgmii, sid);
+ }
+
+ return NULL;
+}
+
static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
struct mtk_eth *eth = mac->hw;
- u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
+ u32 i;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
@@ -274,6 +459,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
+ /* FIXME: this is incorrect. Not only does it
+ * use state->speed (which is not guaranteed
+ * to be correct) but it also makes use of it
+ * in a code path that will only be reachable
+ * when the PHY interface mode changes, not
+ * when the speed changes. Consequently, RGMII
+ * is probably broken.
+ */
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
@@ -330,38 +523,14 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
SYSCFG0_SGMII_MASK,
~(u32)SYSCFG0_SGMII_MASK);
- /* Decide how GMAC and SGMIISYS be mapped */
- sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
- 0 : mac->id;
-
- /* Setup SGMIISYS with the determined property */
- if (state->interface != PHY_INTERFACE_MODE_SGMII)
- err = mtk_sgmii_setup_mode_force(eth->sgmii, sid,
- state);
- else if (phylink_autoneg_inband(mode))
- err = mtk_sgmii_setup_mode_an(eth->sgmii, sid);
-
- if (err)
- goto init_err;
-
- regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
- SYSCFG0_SGMII_MASK, val);
+ /* Save the syscfg0 value for mac_finish */
+ mac->syscfg0 = val;
} else if (phylink_autoneg_inband(mode)) {
dev_err(eth->dev,
"In-band mode not supported in non SGMII mode!\n");
return;
}
- /* Setup gmac */
- mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr_new = mcr_cur;
- mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
-
- /* Only update control register when needed! */
- if (mcr_new != mcr_cur)
- mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
-
return;
err_phy:
@@ -374,6 +543,33 @@ init_err:
mac->id, phy_modes(state->interface), err);
}
+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ /* Enable SGMII */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface))
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, mac->syscfg0);
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
static void mtk_mac_pcs_get_state(struct phylink_config *config,
struct phylink_link_state *state)
{
@@ -406,14 +602,6 @@ static void mtk_mac_pcs_get_state(struct phylink_config *config,
state->pause |= MLO_PAUSE_TX;
}
-static void mtk_mac_an_restart(struct phylink_config *config)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
-
- mtk_sgmii_restart_an(mac->hw, mac->id);
-}
-
static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
@@ -432,8 +620,9 @@ static void mtk_mac_link_up(struct phylink_config *config,
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
- u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ u32 mcr;
+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
MAC_MCR_FORCE_RX_FC);
@@ -463,97 +652,12 @@ static void mtk_mac_link_up(struct phylink_config *config,
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
-static void mtk_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state)
-{
- struct mtk_mac *mac = container_of(config, struct mtk_mac,
- phylink_config);
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
-
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII) &&
- phy_interface_mode_is_rgmii(state->interface)) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) &&
- !mac->id && state->interface == PHY_INTERFACE_MODE_TRGMII) &&
- !(MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII) &&
- (state->interface == PHY_INTERFACE_MODE_SGMII ||
- phy_interface_mode_is_8023z(state->interface)))) {
- linkmode_zero(supported);
- return;
- }
-
- phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
-
- switch (state->interface) {
- case PHY_INTERFACE_MODE_TRGMII:
- phylink_set(mask, 1000baseT_Full);
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- break;
- case PHY_INTERFACE_MODE_GMII:
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- phylink_set(mask, 1000baseT_Half);
- fallthrough;
- case PHY_INTERFACE_MODE_SGMII:
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- fallthrough;
- case PHY_INTERFACE_MODE_MII:
- case PHY_INTERFACE_MODE_RMII:
- case PHY_INTERFACE_MODE_REVMII:
- case PHY_INTERFACE_MODE_NA:
- default:
- phylink_set(mask, 10baseT_Half);
- phylink_set(mask, 10baseT_Full);
- phylink_set(mask, 100baseT_Half);
- phylink_set(mask, 100baseT_Full);
- break;
- }
-
- if (state->interface == PHY_INTERFACE_MODE_NA) {
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- phylink_set(mask, 2500baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- phylink_set(mask, 1000baseX_Full);
- }
- if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GEPHY)) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseT_Half);
- }
- }
-
- phylink_set(mask, Pause);
- phylink_set(mask, Asym_Pause);
-
- linkmode_and(supported, supported, mask);
- linkmode_and(state->advertising, state->advertising, mask);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
-}
-
static const struct phylink_mac_ops mtk_phylink_ops = {
- .validate = mtk_validate,
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = mtk_mac_select_pcs,
.mac_pcs_get_state = mtk_mac_pcs_get_state,
- .mac_an_restart = mtk_mac_an_restart,
.mac_config = mtk_mac_config,
+ .mac_finish = mtk_mac_finish,
.mac_link_down = mtk_mac_link_down,
.mac_link_up = mtk_mac_link_up,
};
@@ -583,6 +687,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
+ eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45;
eth->mii_bus->priv = eth;
eth->mii_bus->parent = eth->dev;
@@ -608,8 +713,8 @@ static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -619,8 +724,8 @@ static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->tx_irq_lock, flags);
- val = mtk_r32(eth, eth->tx_int_mask_reg);
- mtk_w32(eth, val | mask, eth->tx_int_mask_reg);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
}
@@ -630,8 +735,8 @@ static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -641,8 +746,8 @@ static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
u32 val;
spin_lock_irqsave(&eth->rx_irq_lock, flags);
- val = mtk_r32(eth, MTK_PDMA_INT_MASK);
- mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
}
@@ -693,39 +798,39 @@ void mtk_stats_update_mac(struct mtk_mac *mac)
hw_stats->rx_checksum_errors +=
mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
} else {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
unsigned int offs = hw_stats->reg_offset;
u64 stats;
- hw_stats->rx_bytes += mtk_r32(mac->hw,
- MTK_GDM1_RX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_RX_GBCNT_H + offs);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
if (stats)
hw_stats->rx_bytes += (stats << 32);
hw_stats->rx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
hw_stats->rx_overflow +=
- mtk_r32(mac->hw, MTK_GDM1_RX_OERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
hw_stats->rx_fcs_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
hw_stats->rx_short_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_SERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
hw_stats->rx_long_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_LENCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
hw_stats->rx_checksum_errors +=
- mtk_r32(mac->hw, MTK_GDM1_RX_CERCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
hw_stats->rx_flow_control_packets +=
- mtk_r32(mac->hw, MTK_GDM1_RX_FCCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
hw_stats->tx_skip +=
- mtk_r32(mac->hw, MTK_GDM1_TX_SKIPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
hw_stats->tx_collisions +=
- mtk_r32(mac->hw, MTK_GDM1_TX_COLCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
hw_stats->tx_bytes +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_L + offs);
- stats = mtk_r32(mac->hw, MTK_GDM1_TX_GBCNT_H + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
if (stats)
hw_stats->tx_bytes += (stats << 32);
hw_stats->tx_packets +=
- mtk_r32(mac->hw, MTK_GDM1_TX_GPCNT + offs);
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
}
u64_stats_update_end(&hw_stats->syncp);
@@ -799,8 +904,8 @@ static inline int mtk_max_buf_size(int frag_size)
return buf_size;
}
-static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
- struct mtk_rx_dma *dma_rxd)
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ struct mtk_rx_dma_v2 *dma_rxd)
{
rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
if (!(rxd->rxd2 & RX_DMA_DONE))
@@ -809,67 +914,89 @@ static inline bool mtk_rx_get_desc(struct mtk_rx_dma *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
return true;
}
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
- eth->scratch_ring = dma_alloc_coherent(eth->dev,
- cnt * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * soc->txrx.txd_size,
&eth->phy_scratch_ring,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
- GFP_KERNEL);
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
if (unlikely(!eth->scratch_head))
return -ENOMEM;
- dma_addr = dma_map_single(eth->dev,
+ dma_addr = dma_map_single(eth->dma_dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
return -ENOMEM;
- phy_ring_tail = eth->phy_scratch_ring +
- (sizeof(struct mtk_tx_dma) * (cnt - 1));
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
for (i = 0; i < cnt; i++) {
- eth->scratch_ring[i].txd1 =
- (dma_addr + (i * MTK_QDMA_PAGE_SIZE));
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
- ((i + 1) * sizeof(struct mtk_tx_dma)));
- eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
+ txd->txd2 = eth->phy_scratch_ring +
+ (i + 1) * soc->txrx.txd_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
- mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
- mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
- mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
- mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
return 0;
}
-static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
{
- void *ret = ring->dma;
-
- return ret + (desc - ring->phys);
+ return ring->dma + (desc - ring->phys);
}
-static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
- struct mtk_tx_dma *txd)
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ void *txd, u32 txd_size)
{
- int idx = txd - ring->dma;
+ int idx = (txd - ring->dma) / txd_size;
return &ring->buf[idx];
}
@@ -877,54 +1004,66 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
struct mtk_tx_dma *dma)
{
- return ring->dma_pdma - ring->dma + dma;
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
}
-static int txd_to_idx(struct mtk_tx_ring *ring, struct mtk_tx_dma *dma)
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
{
- return ((void *)dma - (void *)ring->dma) / sizeof(*dma);
+ return (dma - ring->dma) / txd_size;
}
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
- bool napi)
+ struct xdp_frame_bulk *bq, bool napi)
{
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(eth->dev,
+ dma_unmap_single(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
} else {
if (dma_unmap_len(tx_buf, dma_len0)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
}
if (dma_unmap_len(tx_buf, dma_len1)) {
- dma_unmap_page(eth->dev,
+ dma_unmap_page(eth->dma_dev,
dma_unmap_addr(tx_buf, dma_addr1),
dma_unmap_len(tx_buf, dma_len1),
DMA_TO_DEVICE);
}
}
- tx_buf->flags = 0;
- if (tx_buf->skb &&
- (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) {
- if (napi)
- napi_consume_skb(tx_buf->skb, napi);
- else
- dev_kfree_skb_any(tx_buf->skb);
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
}
- tx_buf->skb = NULL;
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
}
static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
@@ -941,7 +1080,7 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
dma_unmap_len_set(tx_buf, dma_len1, size);
} else {
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
txd->txd1 = mapped_addr;
txd->txd2 = TX_DMA_PLEN0(size);
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -950,18 +1089,108 @@ static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
}
}
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *desc = txd;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM;
+ /* vlan header offload */
+ if (info->vlan)
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
+ }
+ WRITE_ONCE(desc->txd4, data);
+}
+
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_v2 *desc = txd;
+ struct mtk_eth *eth = mac->hw;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ if (!info->qid && mac->id)
+ info->qid = MTK_QDMA_GMAC2_QID;
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
+ WRITE_ONCE(desc->txd4, data);
+
+ data = 0;
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO_V2;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM_V2;
+ }
+ WRITE_ONCE(desc->txd5, data);
+
+ data = 0;
+ if (info->first && info->vlan)
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
+ WRITE_ONCE(desc->txd6, data);
+
+ WRITE_ONCE(desc->txd7, 0);
+ WRITE_ONCE(desc->txd8, 0);
+}
+
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
+ else
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
+}
+
static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
int tx_num, struct mtk_tx_ring *ring, bool gso)
{
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = skb_headlen(skb),
+ .gso = gso,
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
+ .vlan = skb_vlan_tag_present(skb),
+ .qid = skb->mark & MTK_QDMA_TX_MASK,
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ .last = !skb_is_nonlinear(skb),
+ };
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_dma *itxd_pdma, *txd_pdma;
struct mtk_tx_buf *itx_buf, *tx_buf;
- dma_addr_t mapped_addr;
- unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0, fport;
int k = 0;
itxd = ring->next_free;
@@ -969,52 +1198,35 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- /* set the forward port */
- fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
- txd4 |= fport;
-
- itx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
memset(itx_buf, 0, sizeof(*itx_buf));
- if (gso)
- txd4 |= TX_DMA_TSO;
-
- /* TX Checksum offload */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- txd4 |= TX_DMA_CHKSUM;
-
- /* VLAN header offload */
- if (skb_vlan_tag_present(skb))
- txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
-
- mapped_addr = dma_map_single(eth->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
return -ENOMEM;
- WRITE_ONCE(itxd->txd1, mapped_addr);
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+
itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb),
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
k++);
/* TX SG offload */
txd = itxd;
txd_pdma = qdma_to_pdma(ring, txd);
- nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < nr_frags; i++) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
unsigned int offset = 0;
int frag_size = skb_frag_size(frag);
while (frag_size) {
- bool last_frag = false;
- unsigned int frag_map_size;
bool new_desc = true;
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) ||
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
(i & 0x1)) {
txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
txd_pdma = qdma_to_pdma(ring, txd);
@@ -1026,47 +1238,42 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
new_desc = false;
}
-
- frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
- frag_map_size,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+ soc->txrx.dma_max_len);
+ txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
+ offset, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
goto err_dma;
- if (i == nr_frags - 1 &&
- (frag_size - frag_map_size) == 0)
- last_frag = true;
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
- WRITE_ONCE(txd->txd1, mapped_addr);
- WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
- TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, fport);
-
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
- tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 :
MTK_TX_FLAGS_FPORT1;
- setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr,
- frag_map_size, k++);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
+ txd_info.size, k++);
- frag_size -= frag_map_size;
- offset += frag_map_size;
+ frag_size -= txd_info.size;
+ offset += txd_info.size;
}
}
/* store skb to cleanup */
- itx_buf->skb = skb;
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
- WRITE_ONCE(itxd->txd4, txd4);
- WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
- (!nr_frags * TX_DMA_LS0)));
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (k & 0x1)
txd_pdma->txd2 |= TX_DMA_LS0;
else
@@ -1084,13 +1291,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
!netdev_xmit_more())
- mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
} else {
- int next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd),
- ring->dma_size);
+ int next_idx;
+
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1098,13 +1307,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
/* unmap dma */
- mtk_tx_unmap(eth, tx_buf, false);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -1114,17 +1323,16 @@ err_dma:
return -ENOMEM;
}
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
{
- int i, nfrags;
+ int i, nfrags = 1;
skb_frag_t *frag;
- nfrags = 1;
if (skb_is_gso(skb)) {
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- MTK_TX_DMA_BUF_LEN);
+ eth->soc->txrx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1176,7 +1384,7 @@ static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto drop;
- tx_num = mtk_cal_txd_req(skb);
+ tx_num = mtk_cal_txd_req(eth, skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
netif_stop_queue(dev);
netif_err(eth, tx_queued, dev,
@@ -1227,9 +1435,12 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
return &eth->rx_ring[0];
for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ struct mtk_rx_dma *rxd;
+
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- if (ring->dma[idx].rxd2 & RX_DMA_DONE) {
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
}
@@ -1257,42 +1468,352 @@ static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
}
}
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2);
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, eth->rx_napi.napi_id,
+ id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_eth *eth)
{
struct dim_sample dim_sample = {};
struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
int idx;
struct sk_buff *skb;
u8 *data, *new_data;
- struct mtk_rx_dma *rxd, trxd;
+ struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- unsigned int pktlen;
dma_addr_t dma_addr;
- u32 hash;
- int mac;
+ u32 hash, reason;
+ int mac = 0;
ring = mtk_get_rx_ring(eth);
if (unlikely(!ring))
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = &ring->dma[idx];
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
data = ring->data[idx];
- if (!mtk_rx_get_desc(&trxd, rxd))
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) ||
- (trxd.rxd4 & RX_DMA_SPECIAL_TAG))
- mac = 0;
- else
- mac = ((trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
- RX_DMA_FPORT_MASK) - 1;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1;
+ else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG))
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
!eth->netdev[mac]))
@@ -1303,70 +1824,144 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
/* alloc new buffer */
- new_data = napi_alloc_frag(ring->frag_size);
- if (unlikely(!new_data)) {
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
- dma_addr = dma_map_single(eth->dev,
- new_data + NET_SKB_PAD +
- eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
- skb_free_frag(new_data);
- netdev->stats.rx_dropped++;
- goto release_desc;
- }
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
- dma_unmap_single(eth->dev, trxd.rxd1,
- ring->buf_size, DMA_FROM_DEVICE);
+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
- /* receive data */
- skb = build_skb(data, ring->frag_size);
- if (unlikely(!skb)) {
- skb_free_frag(data);
- netdev->stats.rx_dropped++;
- goto skip_rx;
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
}
- skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
- skb_put(skb, pktlen);
- if (trxd.rxd4 & eth->rx_dma_l4_valid)
+ bytes += skb->len;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+ } else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd4;
+ }
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, netdev);
- bytes += pktlen;
- hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
- if (hash != MTK_RXD4_FOE_ENTRY) {
- hash = jhash_1word(hash, 0);
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ if (trxd.rxd3 & RX_DMA_VTAG_V2)
+ __vlan_hwaccel_put_tag(skb,
+ htons(RX_DMA_VPID(trxd.rxd4)),
+ RX_DMA_VID(trxd.rxd4));
+ } else if (trxd.rxd2 & RX_DMA_VTAG) {
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ RX_DMA_VID(trxd.rxd3));
+ }
+
+ /* If the device is attached to a dsa switch, the special
+ * tag inserted in VLAN field by hw switch can * be offloaded
+ * by RX HW VLAN offload. Clear vlan info.
+ */
+ if (netdev_uses_dsa(netdev))
+ __vlan_hwaccel_clear_tag(skb);
}
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
- (trxd.rxd2 & RX_DMA_VTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- RX_DMA_VID(trxd.rxd3));
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
skip_rx:
ring->data[idx] = new_data;
rxd->rxd1 = (unsigned int)dma_addr;
-
release_desc:
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
rxd->rxd2 = RX_DMA_LSO;
else
- rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
ring->calc_idx = idx;
-
done++;
}
@@ -1385,22 +1980,27 @@ rx_done:
&dim_sample);
net_dim(&eth->rx_dim, dim_sample);
+ if (xdp_flush)
+ xdp_do_flush_map();
+
return done;
}
static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->last_free_ptr;
- dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
u32 next_cpu = desc->txd2;
@@ -1410,29 +2010,34 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
break;
- tx_buf = mtk_desc_to_tx_buf(ring, desc);
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+ eth->soc->txrx.txd_size);
if (tx_buf->flags & MTK_TX_FLAGS_FPORT1)
mac = 1;
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[mac] += skb->len;
- done[mac]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[mac] += skb->len;
+ done[mac]++;
+ }
budget--;
}
- mtk_tx_unmap(eth, tx_buf, true);
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = next_cpu;
}
+ xdp_flush_frame_bulk(&bq);
ring->last_free_ptr = cpu;
- mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
return budget;
}
@@ -1441,34 +2046,38 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
unsigned int *done, unsigned int *bytes)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
- struct mtk_tx_dma *desc;
- struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
u32 cpu, dma;
cpu = ring->cpu_idx;
dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
while ((cpu != dma) && budget) {
tx_buf = &ring->buf[cpu];
- skb = tx_buf->skb;
- if (!skb)
+ if (!tx_buf->data)
break;
- if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
- bytes[0] += skb->len;
- done[0]++;
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ bytes[0] += skb->len;
+ done[0]++;
+ }
budget--;
}
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
- mtk_tx_unmap(eth, tx_buf, true);
-
- desc = &ring->dma[cpu];
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
}
+ xdp_flush_frame_bulk(&bq);
ring->cpu_idx = cpu;
@@ -1525,24 +2134,25 @@ static void mtk_handle_status_irq(struct mtk_eth *eth)
static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int tx_done = 0;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
mtk_handle_status_irq(eth);
- mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
tx_done = mtk_poll_tx(eth, budget);
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done tx %d, intr 0x%08x/0x%x\n", tx_done,
- mtk_r32(eth, eth->tx_int_status_reg),
- mtk_r32(eth, eth->tx_int_mask_reg));
+ mtk_r32(eth, reg_map->tx_irq_status),
+ mtk_r32(eth, reg_map->tx_irq_mask));
}
if (tx_done == budget)
return budget;
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
return budget;
if (napi_complete_done(napi, tx_done))
@@ -1554,6 +2164,7 @@ static int mtk_napi_tx(struct napi_struct *napi, int budget)
static int mtk_napi_rx(struct napi_struct *napi, int budget)
{
struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int rx_done_total = 0;
mtk_handle_status_irq(eth);
@@ -1561,40 +2172,44 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
if (unlikely(netif_msg_intr(eth))) {
dev_info(eth->dev,
"done rx %d, intr 0x%08x/0x%x\n", rx_done,
- mtk_r32(eth, MTK_PDMA_INT_STATUS),
- mtk_r32(eth, MTK_PDMA_INT_MASK));
+ mtk_r32(eth, reg_map->pdma.irq_status),
+ mtk_r32(eth, reg_map->pdma.irq_mask));
}
if (rx_done_total == budget)
return budget;
- } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT);
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
return rx_done_total;
}
static int mtk_tx_alloc(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = sizeof(*ring->dma);
+ int i, sz = soc->txrx.txd_size;
+ struct mtk_tx_dma_v2 *txd;
ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
GFP_KERNEL);
if (!ring->buf)
goto no_tx_mem;
- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
goto no_tx_mem;
@@ -1602,18 +2217,25 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
int next = (i + 1) % MTK_DMA_SIZE;
u32 next_ptr = ring->phys + next * sz;
- ring->dma[i].txd2 = next_ptr;
- ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd = ring->dma + i * sz;
+ txd->txd2 = next_ptr;
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd->txd4 = 0;
+ if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
/* On MT7688 (PDMA only) this driver uses the ring->dma structs
* only as the framework. The real HW descriptors are the PDMA
* descriptors in ring->dma_pdma.
*/
- if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
- &ring->phys_pdma,
- GFP_ATOMIC);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
+ &ring->phys_pdma, GFP_KERNEL);
if (!ring->dma_pdma)
goto no_tx_mem;
@@ -1625,8 +2247,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
ring->dma_size = MTK_DMA_SIZE;
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
- ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->next_free = ring->dma;
+ ring->last_free = (void *)txd;
ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
ring->thresh = MAX_SKB_FRAGS;
@@ -1635,20 +2257,20 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
*/
wmb();
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR);
- mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR);
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
mtk_w32(eth,
ring->phys + ((MTK_DMA_SIZE - 1) * sz),
- MTK_QTX_CRX_PTR);
- mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR);
+ soc->reg_map->qdma.crx_ptr);
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
- MTK_QTX_CFG(0));
+ soc->reg_map->qdma.qtx_cfg);
} else {
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
- mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
}
return 0;
@@ -1659,45 +2281,43 @@ no_tx_mem:
static void mtk_tx_clean(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
int i;
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth, &ring->buf[i], false);
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
kfree(ring->buf);
ring->buf = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
- ring->dma_pdma,
- ring->phys_pdma);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
}
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size;
int i;
- u32 offset = 0;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
return -EINVAL;
ring = &eth->rx_ring_qdma;
- offset = 0x1000;
} else {
ring = &eth->rx_ring[ring_no];
}
@@ -1717,45 +2337,98 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!ring->data)
return -ENOMEM;
- for (i = 0; i < rx_dma_size; i++) {
- ring->data[i] = netdev_alloc_frag(ring->frag_size);
- if (!ring->data[i])
- return -ENOMEM;
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
}
- ring->dma = dma_alloc_coherent(eth->dev,
- rx_dma_size * sizeof(*ring->dma),
- &ring->phys, GFP_ATOMIC);
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
if (!ring->dma)
return -ENOMEM;
for (i = 0; i < rx_dma_size; i++) {
- dma_addr_t dma_addr = dma_map_single(eth->dev,
- ring->data[i] + NET_SKB_PAD + eth->ip_align,
- ring->buf_size,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
- return -ENOMEM;
- ring->dma[i].rxd1 = (unsigned int)dma_addr;
+ struct mtk_rx_dma_v2 *rxd;
+ dma_addr_t dma_addr;
+ void *data;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr)))
+ return -ENOMEM;
+ }
+ rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
- ring->dma[i].rxd2 = RX_DMA_LSO;
+ rxd->rxd2 = RX_DMA_LSO;
else
- ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+ rxd->rxd8 = 0;
+ }
}
+
ring->dma_size = rx_dma_size;
ring->calc_idx_update = false;
ring->calc_idx = rx_dma_size - 1;
- ring->crx_idx_reg = MTK_PRX_CRX_IDX_CFG(ring_no);
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ else
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
/* make sure that all changes to the dma ring are flushed before we
* continue
*/
wmb();
- mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
- mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
- mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
- mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys,
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->qdma.rst_idx);
+ } else {
+ mtk_w32(eth, ring->phys,
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->pdma.rst_idx);
+ }
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
return 0;
}
@@ -1766,27 +2439,36 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
if (ring->data && ring->dma) {
for (i = 0; i < ring->dma_size; i++) {
+ struct mtk_rx_dma *rxd;
+
if (!ring->data[i])
continue;
- if (!ring->dma[i].rxd1)
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (!rxd->rxd1)
continue;
- dma_unmap_single(eth->dev,
- ring->dma[i].rxd1,
- ring->buf_size,
- DMA_FROM_DEVICE);
- skb_free_frag(ring->data[i]);
+
+ dma_unmap_single(eth->dma_dev, rxd->rxd1,
+ ring->buf_size, DMA_FROM_DEVICE);
+ mtk_rx_put_buff(ring, ring->data[i], false);
}
kfree(ring->data);
ring->data = NULL;
}
if (ring->dma) {
- dma_free_coherent(eth->dev,
- ring->dma_size * sizeof(*ring->dma),
- ring->dma,
- ring->phys);
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma, ring->phys);
ring->dma = NULL;
}
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
}
static int mtk_hwlro_rx_init(struct mtk_eth *eth)
@@ -1984,6 +2666,9 @@ static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
struct ethtool_rx_flow_spec *fsp =
(struct ethtool_rx_flow_spec *)&cmd->fs;
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
/* only tcp dst ipv4 is meaningful, others are meaningless */
fsp->flow_type = TCP_V4_FLOW;
fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
@@ -2059,9 +2744,9 @@ static int mtk_dma_busy_wait(struct mtk_eth *eth)
u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- reg = MTK_QDMA_GLO_CFG;
+ reg = eth->soc->reg_map->qdma.glo_cfg;
else
- reg = MTK_PDMA_GLO_CFG;
+ reg = eth->soc->reg_map->pdma.glo_cfg;
ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
!(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
@@ -2119,8 +2804,8 @@ static int mtk_dma_init(struct mtk_eth *eth)
* automatically
*/
mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
- FC_THRES_MIN, MTK_QDMA_FC_THRES);
- mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
}
return 0;
@@ -2128,16 +2813,16 @@ static int mtk_dma_init(struct mtk_eth *eth)
static void mtk_dma_free(struct mtk_eth *eth)
{
+ const struct mtk_soc_data *soc = eth->soc;
int i;
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
if (eth->scratch_ring) {
- dma_free_coherent(eth->dev,
- MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
- eth->scratch_ring,
- eth->phy_scratch_ring);
+ dma_free_coherent(eth->dma_dev,
+ MTK_DMA_SIZE * soc->txrx.txd_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
}
@@ -2172,7 +2857,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
__napi_schedule(&eth->rx_napi);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
}
return IRQ_HANDLED;
@@ -2194,13 +2879,16 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
- if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) {
- if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT)
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+ eth->soc->txrx.rx_irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
- if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) {
- if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT)
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
mtk_handle_irq_tx(irq, _eth);
}
@@ -2214,16 +2902,17 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
}
#endif
static int mtk_start_dma(struct mtk_eth *eth)
{
- u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int err;
err = mtk_dma_init(eth);
@@ -2233,21 +2922,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
}
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- mtk_w32(eth,
- MTK_TX_WB_DDONE | MTK_TX_DMA_EN |
- MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
- MTK_RX_BT_32DWORDS,
- MTK_QDMA_GLO_CFG);
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2))
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+ MTK_CHK_DDONE_EN;
+ else
+ val |= MTK_RX_BT_32DWORDS;
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
mtk_w32(eth,
MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
} else {
mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
- MTK_PDMA_GLO_CFG);
+ reg_map->pdma.glo_cfg);
}
return 0;
@@ -2296,22 +2991,25 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(&eth->dma_refcnt)) {
- u32 gdm_config = MTK_GDMA_TO_PDMA;
- int err;
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
if (err)
return err;
- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
- gdm_config = MTK_GDMA_TO_PPE;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
}
else
@@ -2349,6 +3047,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -2363,7 +3062,7 @@ static int mtk_stop(struct net_device *dev)
mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, MTK_RX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -2371,17 +3070,59 @@ static int mtk_stop(struct net_device *dev)
cancel_work_sync(&eth->tx_dim.work);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
- mtk_stop_dma(eth, MTK_PDMA_GLO_CFG);
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(&eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
+
+ return 0;
+}
+
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
return 0;
}
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
{
regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
@@ -2426,6 +3167,7 @@ static void mtk_dim_rx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2433,7 +3175,7 @@ static void mtk_dim_rx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_TX_MASK;
val |= MTK_PDMA_DELAY_RX_EN;
@@ -2443,9 +3185,9 @@ static void mtk_dim_rx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2456,6 +3198,7 @@ static void mtk_dim_tx(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
struct dim_cq_moder cur_profile;
u32 val, cur;
@@ -2463,7 +3206,7 @@ static void mtk_dim_tx(struct work_struct *work)
dim->profile_ix);
spin_lock_bh(&eth->dim_lock);
- val = mtk_r32(eth, MTK_PDMA_DELAY_INT);
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
val &= MTK_PDMA_DELAY_RX_MASK;
val |= MTK_PDMA_DELAY_TX_EN;
@@ -2473,9 +3216,9 @@ static void mtk_dim_tx(struct work_struct *work)
cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
- mtk_w32(eth, val, MTK_PDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
- mtk_w32(eth, val, MTK_QDMA_DELAY_INT);
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
spin_unlock_bh(&eth->dim_lock);
@@ -2484,6 +3227,9 @@ static void mtk_dim_tx(struct work_struct *work)
static int mtk_hw_init(struct mtk_eth *eth)
{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
int i, val, ret;
if (test_and_set_bit(MTK_HW_INIT, &eth->state))
@@ -2496,6 +3242,10 @@ static int mtk_hw_init(struct mtk_eth *eth)
if (ret)
goto err_disable_pm;
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
ret = device_reset(eth->dev);
if (ret) {
@@ -2514,9 +3264,25 @@ static int mtk_hw_init(struct mtk_eth *eth)
return 0;
}
- /* Non-MT7628 handling... */
- ethsys_reset(eth, RSTCTRL_FE);
- ethsys_reset(eth, RSTCTRL_PPE);
+ val = RSTCTRL_FE | RSTCTRL_PPE;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ val |= RSTCTRL_ETH;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+ }
+
+ ethsys_reset(eth, val);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+ }
if (eth->pctl) {
/* Set GE2 driving and slew rate */
@@ -2554,12 +3320,48 @@ static int mtk_hw_init(struct mtk_eth *eth)
mtk_rx_irq_disable(eth, ~0);
/* FE int grouping */
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
- mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ /* PSE should not drop port8 and port9 packets */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+ /* PSE config input queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ /* PSE config output queue threshold */
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
+ }
+
return 0;
err_disable_pm:
@@ -2616,6 +3418,12 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new;
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
@@ -2768,8 +3576,8 @@ static void mtk_get_drvinfo(struct net_device *dev,
{
struct mtk_mac *mac = netdev_priv(dev);
- strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
}
@@ -2805,11 +3613,18 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
int i;
switch (stringset) {
- case ETH_SS_STATS:
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
break;
}
}
@@ -2817,13 +3632,35 @@ static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int mtk_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
- case ETH_SS_STATS:
- return ARRAY_SIZE(mtk_ethtool_stats);
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
default:
return -EOPNOTSUPP;
}
}
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
static void mtk_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -2851,6 +3688,8 @@ static void mtk_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
*data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
} while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
}
@@ -2943,6 +3782,8 @@ static const struct net_device_ops mtk_netdev_ops = {
.ndo_poll_controller = mtk_poll_controller,
#endif
.ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
};
static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
@@ -3004,11 +3845,35 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
/* mac config is not set */
mac->interface = PHY_INTERFACE_MODE_NA;
- mac->mode = MLO_AN_PHY;
mac->speed = SPEED_UNKNOWN;
mac->phylink_config.dev = &eth->netdev[id]->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
+ /* This driver makes use of state->speed in mac_config */
+ mac->phylink_config.legacy_pre_march2020 = true;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
@@ -3049,8 +3914,38 @@ free_netdev:
return err;
}
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
static int mtk_probe(struct platform_device *pdev)
{
+ struct resource *res = NULL;
struct device_node *mac_np;
struct mtk_eth *eth;
int err, i;
@@ -3062,24 +3957,13 @@ static int mtk_probe(struct platform_device *pdev)
eth->soc = of_device_get_match_data(&pdev->dev);
eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
eth->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
- eth->tx_int_mask_reg = MTK_QDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_QDMA_INT_STATUS;
- } else {
- eth->tx_int_mask_reg = MTK_PDMA_INT_MASK;
- eth->tx_int_status_reg = MTK_PDMA_INT_STATUS;
- }
-
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
eth->ip_align = NET_IP_ALIGN;
- } else {
- eth->rx_dma_l4_valid = RX_DMA_L4_VALID;
- }
spin_lock_init(&eth->page_lock);
spin_lock_init(&eth->tx_irq_lock);
@@ -3110,6 +3994,16 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
+
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
GFP_KERNEL);
@@ -3132,6 +4026,33 @@ static int mtk_probe(struct platform_device *pdev)
}
}
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ }
+
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
+ }
+ }
+
for (i = 0; i < 3; i++) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
eth->irq[i] = eth->irq[0];
@@ -3139,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev)
eth->irq[i] = platform_get_irq(pdev, i);
if (eth->irq[i] < 0) {
dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
- return -ENXIO;
+ err = -ENXIO;
+ goto err_wed_exit;
}
}
for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
eth->clks[i] = devm_clk_get(eth->dev,
mtk_clks_source_name[i]);
if (IS_ERR(eth->clks[i])) {
- if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
if (eth->soc->required_clks & BIT(i)) {
dev_err(&pdev->dev, "clock %s not found\n",
mtk_clks_source_name[i]);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_wed_exit;
}
eth->clks[i] = NULL;
}
@@ -3162,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev)
err = mtk_hw_init(eth);
if (err)
- return err;
+ goto err_wed_exit;
eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
@@ -3207,10 +4132,20 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- err = mtk_ppe_init(&eth->ppe, eth->dev,
- eth->base + MTK_ETH_PPE_BASE, 2);
- if (err)
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+ eth->soc->offload_version, i);
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+ }
err = mtk_eth_offload_init(eth);
if (err)
@@ -3235,10 +4170,8 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
- MTK_NAPI_WEIGHT);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
- MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
@@ -3250,6 +4183,8 @@ err_free_dev:
mtk_free_dev(eth);
err_deinit_hw:
mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
return err;
}
@@ -3269,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev)
phylink_disconnect_phy(mac->phylink);
}
+ mtk_wed_exit();
mtk_hw_deinit(eth);
netif_napi_del(&eth->tx_napi);
@@ -3280,50 +4216,129 @@ static int mtk_remove(struct platform_device *pdev)
}
static const struct mtk_soc_data mt2701_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7621_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7621_CAPS,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7621_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7622_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x2028,
.caps = MT7622_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7622_CLKS_BITMAP,
.required_pctl = false,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7623_data = {
+ .reg_map = &mtk_reg_map,
.caps = MT7623_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.offload_version = 2,
+ .hash_offset = 2,
+ .foe_entry_size = sizeof(struct mtk_foe_entry) - 16,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
static const struct mtk_soc_data mt7629_data = {
+ .reg_map = &mtk_reg_map,
.ana_rgc3 = 0x128,
.caps = MT7629_CAPS | MTK_HWLRO,
.hw_features = MTK_HW_FEATURES,
.required_clks = MT7629_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .hash_offset = 4,
+ .foe_entry_size = sizeof(struct mtk_foe_entry),
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
};
static const struct mtk_soc_data rt5350_data = {
+ .reg_map = &mt7628_reg_map,
.caps = MT7628_CAPS,
.hw_features = MTK_HW_FEATURES_MT7628,
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
};
const struct of_device_id of_mtk_match[] = {
@@ -3332,6 +4347,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7622-eth", .data = &mt7622_data},
{ .compatible = "mediatek,mt7623-eth", .data = &mt7623_data},
{ .compatible = "mediatek,mt7629-eth", .data = &mt7629_data},
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data},
{ .compatible = "ralink,rt5350-eth", .data = &rt5350_data},
{},
};