aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/dsa/bcm_sf2.c10
-rw-r--r--drivers/net/dsa/bcm_sf2.h3
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c497
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c38
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c26
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c41
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mdio.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c31
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c876
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.h36
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c55
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h21
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c162
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c109
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h48
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h135
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h58
-rw-r--r--drivers/net/ethernet/arc/emac_main.c3
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c83
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c65
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c105
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c13
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c103
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c5
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c18
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c485
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h49
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c668
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c84
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c154
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c614
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h42
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c18
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c27
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c67
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c136
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c110
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c18
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c65
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h9
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c36
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/sky2.c6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h150
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c155
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c454
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h42
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/ctrl.c105
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c308
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.h117
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c663
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c164
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h51
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/lag_conf.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c38
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/metadata.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c47
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h25
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c3
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c5
-rw-r--r--drivers/net/ethernet/socionext/netsec.c238
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/ti/cpsw.c237
-rw-r--r--drivers/net/ethernet/ti/cpts.c32
-rw-r--r--drivers/net/ethernet/ti/cpts.h38
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c32
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c29
-rw-r--r--drivers/net/geneve.c111
-rw-r--r--drivers/net/phy/amd.c1
-rw-r--r--drivers/net/phy/aquantia.c15
-rw-r--r--drivers/net/phy/at803x.c3
-rw-r--r--drivers/net/phy/bcm63xx.c6
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/bcm87xx.c10
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c2
-rw-r--r--drivers/net/phy/davicom.c4
-rw-r--r--drivers/net/phy/dp83640.c1
-rw-r--r--drivers/net/phy/dp83822.c1
-rw-r--r--drivers/net/phy/dp83848.c1
-rw-r--r--drivers/net/phy/dp83867.c1
-rw-r--r--drivers/net/phy/dp83tc811.c1
-rw-r--r--drivers/net/phy/fixed_phy.c19
-rw-r--r--drivers/net/phy/icplus.c145
-rw-r--r--drivers/net/phy/intel-xway.c10
-rw-r--r--drivers/net/phy/lxt.c6
-rw-r--r--drivers/net/phy/marvell.c93
-rw-r--r--drivers/net/phy/marvell10g.c37
-rw-r--r--drivers/net/phy/meson-gxl.c2
-rw-r--r--drivers/net/phy/micrel.c31
-rw-r--r--drivers/net/phy/microchip.c1
-rw-r--r--drivers/net/phy/microchip_t1.c1
-rw-r--r--drivers/net/phy/mscc.c6
-rw-r--r--drivers/net/phy/national.c1
-rw-r--r--drivers/net/phy/phy-c45.c12
-rw-r--r--drivers/net/phy/phy-core.c213
-rw-r--r--drivers/net/phy/phy.c443
-rw-r--r--drivers/net/phy/phy_device.c192
-rw-r--r--drivers/net/phy/phy_led_triggers.c15
-rw-r--r--drivers/net/phy/phylink.c19
-rw-r--r--drivers/net/phy/qsemi.c1
-rw-r--r--drivers/net/phy/realtek.c45
-rw-r--r--drivers/net/phy/smsc.c7
-rw-r--r--drivers/net/phy/ste10Xp.c2
-rw-r--r--drivers/net/phy/uPD60620.c6
-rw-r--r--drivers/net/phy/vitesse.c9
-rw-r--r--drivers/net/tun.c49
-rw-r--r--drivers/net/usb/lan78xx.c27
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/vrf.c19
-rw-r--r--drivers/net/vxlan.c60
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c137
-rw-r--r--drivers/net/xen-netfront.c2
208 files changed, 8137 insertions, 3173 deletions
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index ed6828821fbd..80af658e530d 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
return PTR_ERR(peer_net);
peer = rtnl_create_link(peer_net, ifname, name_assign_type,
- &vxcan_link_ops, tbp);
+ &vxcan_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(peer_net);
return PTR_ERR(peer);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2eb68769562c..aa4a1f5206f1 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
return ret;
}
+ ret = bcm_sf2_cfp_resume(ds);
+ if (ret)
+ return ret;
+
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
@@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
mutex_init(&priv->cfp.lock);
+ INIT_LIST_HEAD(&priv->cfp.rules_list);
/* CFP rule #0 cannot be used for specific classifications, flag it as
* permanently used
@@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
return ret;
}
+ bcm_sf2_gphy_enable_set(priv->dev->ds, false);
+
ret = bcm_sf2_cfp_rst(priv);
if (ret) {
pr_err("failed to reset CFP\n");
@@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
priv->wol_ports_mask = 0;
dsa_unregister_switch(priv->dev->ds);
+ bcm_sf2_cfp_exit(priv->dev->ds);
/* Disable all ports and interrupts */
bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index cc31e986e6e3..faaef320ec48 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv {
DECLARE_BITMAP(used, CFP_NUM_RULES);
DECLARE_BITMAP(unique, CFP_NUM_RULES);
unsigned int rules_cnt;
+ struct list_head rules_list;
};
struct bcm_sf2_priv {
@@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc);
int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+void bcm_sf2_cfp_exit(struct dsa_switch *ds);
+int bcm_sf2_cfp_resume(struct dsa_switch *ds);
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..e14663ab6dbc 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset);
}
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule = NULL;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ break;
+ }
+
+ return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
@@ -728,27 +789,14 @@ out_err:
return ret;
}
-static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
- struct ethtool_rx_flow_spec *fs)
+static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
- int ret = -EINVAL;
-
- /* Check for unsupported extensions */
- if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
- fs->m_ext.data[1]))
- return -EINVAL;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- test_bit(fs->location, priv->cfp.used))
- return -EBUSY;
-
- if (fs->location != RX_CLS_LOC_ANY &&
- fs->location > bcm_sf2_cfp_rule_size(priv))
- return -EINVAL;
+ int ret;
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
@@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs);
break;
default:
+ ret = -EINVAL;
break;
}
return ret;
}
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule = NULL;
+ int ret = -EINVAL;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype ||
+ fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = bcm_sf2_cfp_rule_insert(ds, port, fs);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
+ return ret;
+}
+
static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
u32 loc, u32 *next_loc)
{
@@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
return 0;
}
-static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
- u32 loc)
+static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
{
u32 next_loc = 0;
int ret;
- /* Refuse deleting unused rules, and those that are not unique since
- * that could leave IPv6 rules with one of the chained rule in the
- * table.
- */
- if (!test_bit(loc, priv->cfp.unique) || loc == 0)
- return -EINVAL;
-
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
@@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
return ret;
}
-static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(flow->m_u); i++)
- flow->m_u.hdata[i] ^= 0xff;
-
- flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
- flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
- flow->m_ext.data[0] ^= cpu_to_be32(~0);
- flow->m_ext.data[1] ^= cpu_to_be32(~0);
-}
-
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
- bool mask)
-{
- u32 reg, offset, ipv4;
- u16 src_dst_port;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
-
- reg = core_readl(priv, offset);
- /* src port [15:8] */
- src_dst_port = reg << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
-
- reg = core_readl(priv, offset);
- /* src port [7:0] */
- src_dst_port |= (reg >> 24);
-
- v4_spec->pdst = cpu_to_be16(src_dst_port);
- v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
-
- /* IPv4 dst [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
-
- reg = core_readl(priv, offset);
- /* IPv4 dst [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- /* IPv4 dst [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- v4_spec->ip4dst = cpu_to_be32(ipv4);
-
- /* IPv4 src [15:8] */
- ipv4 = (reg & 0xff) << 8;
-
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
+ struct cfp_rule *rule;
+ int ret;
- /* Once the TCAM is programmed, the mask reflects the slice number
- * being matched, don't bother checking it when reading back the
- * mask spec
+ /* Refuse deleting unused rules, and those that are not unique since
+ * that could leave IPv6 rules with one of the chained rule in the
+ * table.
*/
- if (!mask && !(reg & SLICE_VALID))
+ if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
- /* IPv4 src [7:0] */
- ipv4 |= (reg >> 24) & 0xff;
- /* IPv4 src [31:16] */
- ipv4 |= ((reg >> 8) & 0xffff) << 16;
- v4_spec->ip4src = cpu_to_be32(ipv4);
-
- return 0;
-}
-
-static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs)
-{
- struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
- u32 reg;
- int ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V4_FLOW;
- v4_spec = &fs->h_u.tcp_ip4_spec;
- v4_m_spec = &fs->m_u.tcp_ip4_spec;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V4_FLOW;
- v4_spec = &fs->h_u.udp_ip4_spec;
- v4_m_spec = &fs->m_u.udp_ip4_spec;
- break;
- default:
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
return -EINVAL;
- }
-
- fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
- v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
-
- ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
- if (ret)
- return ret;
-
- return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
-}
-
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
- __be32 *ip6_addr, __be16 *port,
- bool mask)
-{
- u32 reg, tmp, offset;
-
- /* C-Tag [31:24]
- * UDF_n_B8 [23:8] (port)
- * UDF_n_B7 (upper) [7:0] (addr[15:8])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(4);
- else
- offset = CORE_CFP_DATA_PORT(4);
- reg = core_readl(priv, offset);
- *port = cpu_to_be32(reg) >> 8;
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B7 (lower) [31:24] (addr[7:0])
- * UDF_n_B6 [23:8] (addr[31:16])
- * UDF_n_B5 (upper) [7:0] (addr[47:40])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(3);
- else
- offset = CORE_CFP_DATA_PORT(3);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[3] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
-
- /* UDF_n_B5 (lower) [31:24] (addr[39:32])
- * UDF_n_B4 [23:8] (addr[63:48])
- * UDF_n_B3 (upper) [7:0] (addr[79:72])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(2);
- else
- offset = CORE_CFP_DATA_PORT(2);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[2] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
- /* UDF_n_B3 (lower) [31:24] (addr[71:64])
- * UDF_n_B2 [23:8] (addr[95:80])
- * UDF_n_B1 (upper) [7:0] (addr[111:104])
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(1);
- else
- offset = CORE_CFP_DATA_PORT(1);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[1] = cpu_to_be32(tmp);
- tmp = (u32)(reg & 0xff) << 8;
+ ret = bcm_sf2_cfp_rule_remove(priv, port, loc);
- /* UDF_n_B1 (lower) [31:24] (addr[103:96])
- * UDF_n_B0 [23:8] (addr[127:112])
- * Reserved [7:4]
- * Slice ID [3:2]
- * Slice valid [1:0]
- */
- if (mask)
- offset = CORE_CFP_MASK_PORT(0);
- else
- offset = CORE_CFP_DATA_PORT(0);
- reg = core_readl(priv, offset);
- tmp |= (reg >> 24) & 0xff;
- tmp |= (u32)((reg >> 8) << 16);
- ip6_addr[0] = cpu_to_be32(tmp);
+ list_del(&rule->next);
+ kfree(rule);
- if (!mask && !(reg & SLICE_VALID))
- return -EINVAL;
-
- return 0;
+ return ret;
}
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs,
- u32 next_loc)
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
{
- struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
- u32 reg;
- int ret;
-
- /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
- * assuming tcp_ip6_spec here being an union.
- */
- v6_spec = &fs->h_u.tcp_ip6_spec;
- v6_m_spec = &fs->m_u.tcp_ip6_spec;
-
- /* Read the second half first */
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
- false);
- if (ret)
- return ret;
-
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
- &v6_m_spec->pdst, true);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations. We would not have the port enabled for this rule, so
- * don't bother checking it.
- */
- (void)core_readl(priv, CORE_CFP_DATA_PORT(7));
-
- /* The slice number is valid, so read the rule we are chained from now
- * which is our first half.
- */
- bcm_sf2_cfp_rule_addr_set(priv, next_loc);
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
-
- switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
- case IPPROTO_TCP:
- fs->flow_type = TCP_V6_FLOW;
- break;
- case IPPROTO_UDP:
- fs->flow_type = UDP_V6_FLOW;
- break;
- default:
- return -EINVAL;
- }
+ unsigned int i;
- ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
- false);
- if (ret)
- return ret;
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
- return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
- &v6_m_spec->psrc, true);
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
struct ethtool_rxnfc *nfc)
{
- u32 reg, ipv4_or_chain_id;
- unsigned int queue_num;
- int ret;
-
- bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
- if (ret)
- return ret;
-
- reg = core_readl(priv, CORE_ACT_POL_DATA0);
-
- ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
- if (ret)
- return ret;
-
- /* Extract the destination port */
- nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
- DST_MAP_IB_MASK) - 1;
-
- /* There is no Port 6, so we compensate for that here */
- if (nfc->fs.ring_cookie >= 6)
- nfc->fs.ring_cookie++;
- nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
-
- /* Extract the destination queue */
- queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
- nfc->fs.ring_cookie += queue_num;
-
- /* Extract the L3_FRAMING or CHAIN_ID */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ struct cfp_rule *rule;
- /* With IPv6 rules this would contain a non-zero chain ID since
- * we reserve entry 0 and it cannot be used. So if we read 0 here
- * this means an IPv4 rule.
- */
- ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
- if (ipv4_or_chain_id == 0)
- ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
- else
- ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
- ipv4_or_chain_id);
- if (ret)
- return ret;
-
- /* Read last to avoid next entry clobbering the results during search
- * operations
- */
- reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
- if (!(reg & 1 << port))
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
return -EINVAL;
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
bcm_sf2_invert_masks(&nfc->fs);
/* Put the TCAM size here */
@@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0;
}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}
+
+int bcm_sf2_cfp_resume(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule;
+ int ret = 0;
+ u32 reg;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg &= ~CFP_EN_MAP_MASK;
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = bcm_sf2_cfp_rule_remove(priv, rule->port,
+ rule->fs.location);
+ if (ret) {
+ dev_err(ds->dev, "failed to remove rule\n");
+ return ret;
+ }
+
+ ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs);
+ if (ret) {
+ dev_err(ds->dev, "failed to restore rule\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index a5de9bffe5be..74547f43b938 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port,
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(
+ phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index e05d4eddc935..b603f8d6ee3e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2524,11 +2524,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
mutex_unlock(&chip->reg_lock);
if (reg == MII_PHYSID2) {
- /* Some internal PHYS don't have a model number. Use
- * the mv88e6390 family model number instead.
- */
- if (!(val & 0x3f0))
- val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
+ /* Some internal PHYs don't have a model number. */
+ if (chip->info->family != MV88E6XXX_FAMILY_6165)
+ /* Then there is the 6165 family. It gets is
+ * PHYs correct. But it can also have two
+ * SERDES interfaces in the PHY address
+ * space. And these don't have a model
+ * number. But they are not PHYs, so we don't
+ * want to give them something a PHY driver
+ * will recognise.
+ *
+ * Use the mv88e6390 family model number
+ * instead, for anything which really could be
+ * a PHY,
+ */
+ if (!(val & 0x3f0))
+ val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4;
}
return err ? err : val;
@@ -3234,6 +3245,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3276,6 +3288,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3291,8 +3304,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.phylink_validate = mv88e6390x_phylink_validate,
};
@@ -3318,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3405,11 +3419,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.port_set_egress_floods = mv88e6352_port_set_egress_floods,
.port_set_ether_type = mv88e6351_port_set_ether_type,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3710,11 +3724,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3757,11 +3771,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6390_port_pause_limit,
- .port_set_cmode = mv88e6390x_port_set_cmode,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6352_port_get_cmode,
+ .port_set_cmode = mv88e6390x_port_set_cmode,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
.stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
@@ -3777,8 +3791,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390x_serdes_power,
- .serdes_irq_setup = mv88e6390_serdes_irq_setup,
- .serdes_irq_free = mv88e6390_serdes_irq_free,
+ .serdes_irq_setup = mv88e6390x_serdes_irq_setup,
+ .serdes_irq_free = mv88e6390x_serdes_irq_free,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index cd7db60a508b..ebd26b6a93e6 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
u16 reg;
int err;
- if (mode == PHY_INTERFACE_MODE_NA)
- return 0;
-
if (port != 9 && port != 10)
return -EOPNOTSUPP;
+ /* Default to a slow mode, so freeing up SERDES interfaces for
+ * other ports which might use them for SFPs.
+ */
+ if (mode == PHY_INTERFACE_MODE_NA)
+ mode = PHY_INTERFACE_MODE_1000BASEX;
+
switch (mode) {
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X;
@@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
}
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode)
+{
+ switch (mode) {
+ case PHY_INTERFACE_MODE_XGMII:
+ case PHY_INTERFACE_MODE_XAUI:
+ case PHY_INTERFACE_MODE_RXAUI:
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ return mv88e6390x_port_set_cmode(chip, port, mode);
+}
+
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode)
{
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index 36904c9bf955..0d81866d0e4a 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in,
u8 out);
+int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
+ phy_interface_t mode);
int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
phy_interface_t mode);
int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode);
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index bb69650ff772..2caa8c8b4b55 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -619,15 +619,11 @@ out:
return ret;
}
-int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
{
int lane;
int err;
- /* Only support ports 9 and 10 at the moment */
- if (port < 9)
- return 0;
-
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane == -ENODEV)
@@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
return mv88e6390_serdes_irq_enable(chip, port, lane);
}
-void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return 0;
+
+ return mv88e6390_serdes_irq_setup(chip, port);
+}
+
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
{
int lane = mv88e6390x_serdes_get_lane(chip, port);
- if (port < 9)
+ if (lane == -ENODEV)
return;
if (lane < 0)
@@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
chip->ports[port].serdes_irq = 0;
}
+void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port)
+{
+ if (port < 9)
+ return;
+
+ mv88e6390x_serdes_irq_free(chip, port);
+}
+
int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
{
u8 cmode = chip->ports[port].cmode;
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 7870c5a9ef12..573dce8b1eb4 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
+int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port);
+void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
int port, uint8_t *data);
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7c9348a26cbb..91fc64c1145e 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev)
else
phy_set_max_speed(phy, SPEED_100);
- phy->advertising = phy->supported;
+ linkmode_copy(phy->advertising, phy->supported);
greth->link = 0;
greth->speed = 0;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 151bdb629e8a..128cd648ba99 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
phy_write(phy_data->phydev, 0x04, 0x0d01);
phy_write(phy_data->phydev, 0x00, 0x9140);
- phy_data->phydev->supported = PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+
+ linkmode_copy(phy_data->phydev->supported, supported);
+
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
unsigned int phy_id = phy_data->phydev->phy_id;
@@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
reg = phy_read(phy_data->phydev, 0x00);
phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
- phy_data->phydev->supported = (PHY_10BT_FEATURES |
- PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ supported);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ supported);
+ linkmode_copy(phy_data->phydev->supported, supported);
phy_support_asym_pause(phy_data->phydev);
netif_dbg(pdata, drv, pdata->netdev,
@@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
struct phy_device *phydev;
- u32 advertising;
int ret;
/* If we already have a PHY, just return */
@@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
xgbe_phy_external_phy_quirks(pdata);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
- phydev->advertising &= advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ lks->link_modes.advertising);
phy_start_aneg(phy_data->phydev);
@@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising);
if (phy_data->phydev->pause) {
XGBE_SET_LP_ADV(lks, Pause);
@@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
{
struct ethtool_link_ksettings *lks = &pdata->phy.lks;
struct xgbe_phy_data *phy_data = pdata->phy_data;
- u32 advertising;
int ret;
ret = xgbe_phy_find_phy_device(pdata);
@@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
if (!phy_data->phydev)
return 0;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- lks->link_modes.advertising);
-
phy_data->phydev->autoneg = pdata->phy.autoneg;
- phy_data->phydev->advertising = phy_data->phydev->supported &
- advertising;
+ linkmode_and(phy_data->phydev->advertising,
+ phy_data->phydev->supported,
+ lks->link_modes.advertising);
if (pdata->phy.autoneg != AUTONEG_ENABLE) {
phy_data->phydev->speed = pdata->phy.speed;
diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c
index f5fe3bb2e59d..53529cd85162 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mdio.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c
@@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev)
int xge_mdio_config(struct net_device *ndev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct xge_pdata *pdata = netdev_priv(ndev);
struct device *dev = &pdata->pdev->dev;
struct mii_bus *mdio_bus;
@@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev)
goto err;
}
- phydev->supported &= ~(SUPPORTED_10baseT_Half |
- SUPPORTED_10baseT_Full |
- SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_1000baseT_Half |
- SUPPORTED_AUI |
- SUPPORTED_MII |
- SUPPORTED_FIBRE |
- SUPPORTED_BNC);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask);
+
+ linkmode_andnot(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
pdata->phy_speed = SPEED_UNKNOWN;
return 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 686f6d8c9e79..4556630ee286 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \
aq_ring.o \
aq_hw_utils.o \
aq_ethtool.o \
+ aq_filters.o \
hw_atl/hw_atl_a0.o \
hw_atl/hw_atl_b0.o \
hw_atl/hw_atl_utils.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
index becb578211ed..6b6d1724676e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -14,7 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/pci.h>
-
+#include <linux/if_vlan.h>
#include "ver.h"
#include "aq_cfg.h"
#include "aq_utils.h"
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 99ef1daaa4d8..a5fd71692c8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -12,6 +12,7 @@
#include "aq_ethtool.h"
#include "aq_nic.h"
#include "aq_vec.h"
+#include "aq_filters.h"
static void aq_ethtool_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
@@ -213,7 +214,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev,
case ETHTOOL_GRXRINGS:
cmd->data = cfg->vecs;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic);
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = aq_get_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ return err;
+}
+
+static int aq_ethtool_set_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = aq_add_rxnfc_rule(aq_nic, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = aq_del_rxnfc_rule(aq_nic, cmd);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -520,6 +550,7 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_rxfh_key_size = aq_ethtool_get_rss_key_size,
.get_rxfh = aq_ethtool_get_rss,
.get_rxnfc = aq_ethtool_get_rxnfc,
+ .set_rxnfc = aq_ethtool_set_rxnfc,
.get_sset_count = aq_ethtool_get_sset_count,
.get_ethtool_stats = aq_ethtool_stats,
.get_link_ksettings = aq_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
new file mode 100644
index 000000000000..18bc035da850
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.c: RX filters related functions. */
+
+#include "aq_filters.h"
+
+static bool __must_check
+aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->flow_type & FLOW_MAC_EXT)
+ return false;
+
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ return true;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ case IPV6_USER_FLOW:
+ switch (fsp->h_u.usr_ip6_spec.l4_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_SCTP:
+ case IPPROTO_IP:
+ return true;
+ default:
+ return false;
+ }
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static bool __must_check
+aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
+ struct ethtool_rx_flow_spec *fsp2)
+{
+ if (fsp1->flow_type != fsp2->flow_type ||
+ memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
+ memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
+ memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
+ memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
+ return false;
+
+ return true;
+}
+
+static bool __must_check
+aq_rule_already_exists(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct aq_rx_filter *rule;
+ struct hlist_node *aq_node2;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == fsp->location)
+ continue;
+ if (aq_match_filter(&rule->aq_fsp, fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: This filter is already set\n");
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
+ fsp->location > AQ_RX_LAST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FL3L4,
+ AQ_RX_LAST_LOC_FL3L4);
+ return -EINVAL;
+ }
+ if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ netdev_err(aq_nic->ndev,
+ "ethtool: mixing ipv4 and ipv6 is not allowed");
+ return -EINVAL;
+ } else if (rx_fltrs->fl3l4.is_ipv6 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
+ fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified location for ipv6 must be %d or %d",
+ AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fl2(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
+ fsp->location > AQ_RX_LAST_LOC_FETHERT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FETHERT,
+ AQ_RX_LAST_LOC_FETHERT);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
+ fsp->m_u.ether_spec.h_proto == 0U) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: proto (ether_type) parameter must be specified");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __must_check
+aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_hw_rx_fltrs_s *rx_fltrs,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
+ fsp->location > AQ_RX_LAST_LOC_FVLANID) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: location must be in range [%d, %d]",
+ AQ_RX_FIRST_LOC_FVLANID,
+ AQ_RX_LAST_LOC_FVLANID);
+ return -EINVAL;
+ }
+
+ if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ aq_nic->active_vlans))) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown vlan-id specified");
+ return -EINVAL;
+ }
+
+ if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: queue number must be in range [0, %d]",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int __must_check
+aq_check_filter(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ if (fsp->flow_type & FLOW_EXT) {
+ if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
+ err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
+ } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ } else {
+ netdev_err(aq_nic->ndev,
+ "ethtool: invalid vlan mask 0x%x specified",
+ be16_to_cpu(fsp->m_ext.vlan_tci));
+ err = -EINVAL;
+ }
+ } else {
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IPV4_FLOW:
+ case IP_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = false;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_FLOW:
+ case IPV6_USER_FLOW:
+ rx_fltrs->fl3l4.is_ipv6 = true;
+ err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
+ break;
+ default:
+ netdev_err(aq_nic->ndev,
+ "ethtool: unknown flow-type specified");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static bool __must_check
+aq_rule_is_not_support(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_support = false;
+
+ if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: Please, to enable the RX flow control:\n"
+ "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
+ rule_is_not_support = true;
+ } else if (!aq_rule_is_approve(fsp)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified flow type is not supported\n");
+ rule_is_not_support = true;
+ } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
+ (fsp->h_u.tcp_ip4_spec.tos ||
+ fsp->h_u.tcp_ip6_spec.tclass)) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified tos tclass are not supported\n");
+ rule_is_not_support = true;
+ } else if (fsp->flow_type & FLOW_MAC_EXT) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: MAC_EXT is not supported");
+ rule_is_not_support = true;
+ }
+
+ return rule_is_not_support;
+}
+
+static bool __must_check
+aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ bool rule_is_not_correct = false;
+
+ if (!aq_nic) {
+ rule_is_not_correct = true;
+ } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified number %u rule is invalid\n",
+ fsp->location);
+ rule_is_not_correct = true;
+ } else if (aq_check_filter(aq_nic, fsp)) {
+ rule_is_not_correct = true;
+ } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
+ netdev_err(aq_nic->ndev,
+ "ethtool: The specified action is invalid.\n"
+ "Maximum allowable value action is %u.\n",
+ aq_nic->aq_nic_cfg.num_rss_queues - 1);
+ rule_is_not_correct = true;
+ }
+ }
+
+ return rule_is_not_correct;
+}
+
+static int __must_check
+aq_check_rule(struct aq_nic_s *aq_nic,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ int err = 0;
+
+ if (aq_rule_is_not_correct(aq_nic, fsp))
+ err = -EINVAL;
+ else if (aq_rule_is_not_support(aq_nic, fsp))
+ err = -EOPNOTSUPP;
+ else if (aq_rule_already_exists(aq_nic, fsp))
+ err = -EEXIST;
+
+ return err;
+}
+
+static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l2 *data, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
+
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
+ data->queue = fsp->ring_cookie;
+ else
+ data->queue = -1;
+
+ data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
+ data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
+ == VLAN_PRIO_MASK;
+ data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+}
+
+static int aq_add_del_fether(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ struct aq_rx_filter_l2 data;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
+
+ if (unlikely(!aq_hw_ops->hw_filter_l2_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
+ return -EOPNOTSUPP;
+
+ if (add)
+ return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
+ else
+ return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
+}
+
+static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
+ aq_vlans[i].vlan_id == vlan) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Function rebuilds array of vlan filters so that filters with assigned
+ * queue have a precedence over just vlans on the interface.
+ */
+static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
+ unsigned long *active_vlans,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ bool vlan_busy = false;
+ int vlan = -1;
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].enable &&
+ aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
+ continue;
+ do {
+ vlan = find_next_bit(active_vlans,
+ VLAN_N_VID,
+ vlan + 1);
+ if (vlan == VLAN_N_VID) {
+ aq_vlans[i].enable = 0U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = 0;
+ continue;
+ }
+
+ vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
+ if (!vlan_busy) {
+ aq_vlans[i].enable = 1U;
+ aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
+ aq_vlans[i].vlan_id = vlan;
+ }
+ } while (vlan_busy && vlan != VLAN_N_VID);
+ }
+}
+
+static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_vlan *aq_vlans, bool add)
+{
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+ int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
+ int i;
+
+ memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
+
+ if (!add)
+ return 0;
+
+ /* remove vlan if it was in table without queue assignment */
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
+ if (aq_vlans[i].vlan_id ==
+ (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
+ aq_vlans[i].enable = false;
+ }
+ }
+
+ aq_vlans[location].location = location;
+ aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
+ & VLAN_VID_MASK;
+ aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
+ aq_vlans[location].enable = 1U;
+
+ return 0;
+}
+
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
+ break;
+ }
+ if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
+ struct ethtool_rxnfc cmd;
+
+ cmd.fs.location = rule->aq_fsp.location;
+ return aq_del_rxnfc_rule(aq_nic, &cmd);
+ }
+
+ return -ENOENT;
+}
+
+static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+
+ aq_set_data_fvlan(aq_nic,
+ aq_rx_fltr,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
+ add);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr,
+ struct aq_rx_filter_l3l4 *data, bool add)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
+
+ memset(data, 0, sizeof(*data));
+
+ data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
+ data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
+
+ if (!add) {
+ if (!data->is_ipv6)
+ rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
+ else
+ rx_fltrs->fl3l4.active_ipv6 &=
+ ~BIT((data->location) / 4);
+
+ return 0;
+ }
+
+ data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_UDP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ data->cmd |= HW_ATL_RX_SCTP;
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
+ break;
+ default:
+ break;
+ }
+
+ if (!data->is_ipv6) {
+ data->ip_src[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
+ data->ip_dst[0] =
+ ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
+ } else {
+ int i;
+
+ rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ data->ip_dst[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
+ data->ip_src[i] =
+ ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
+ }
+ data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
+ }
+ if (fsp->flow_type != IP_USER_FLOW &&
+ fsp->flow_type != IPV6_USER_FLOW) {
+ if (!data->is_ipv6) {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip4_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip4_spec.psrc);
+ } else {
+ data->p_dst =
+ ntohs(fsp->h_u.tcp_ip6_spec.pdst);
+ data->p_src =
+ ntohs(fsp->h_u.tcp_ip6_spec.psrc);
+ }
+ }
+ if (data->ip_src[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
+ if (data->ip_dst[0] && !data->is_ipv6)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
+ if (data->p_dst)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
+ if (data->p_src)
+ data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
+ if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
+ data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
+ data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
+ } else {
+ data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
+ }
+
+ return 0;
+}
+
+static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
+ const struct aq_hw_ops *aq_hw_ops,
+ struct aq_rx_filter_l3l4 *data)
+{
+ if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
+ return -EOPNOTSUPP;
+
+ return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
+}
+
+static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ struct aq_rx_filter_l3l4 data;
+
+ if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
+ aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
+ aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
+ return -EINVAL;
+
+ return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
+}
+
+static int aq_add_del_rule(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, bool add)
+{
+ int err = -EINVAL;
+
+ if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
+ if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_VID_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_vlan;
+ err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
+ } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
+ == VLAN_PRIO_MASK) {
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ }
+ } else {
+ switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
+ case ETHER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_ethertype;
+ err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case IP_USER_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ case IPV6_USER_FLOW:
+ aq_rx_fltr->type = aq_rx_filter_l3l4;
+ err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int aq_update_table_filters(struct aq_nic_s *aq_nic,
+ struct aq_rx_filter *aq_rx_fltr, u16 index,
+ struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL, *parent = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location >= index)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->aq_fsp.location == index) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+ if (unlikely(!aq_rx_fltr))
+ return err;
+
+ INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
+
+ if (parent)
+ hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
+ else
+ hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
+
+ ++rx_fltrs->active_filters;
+
+ return 0;
+}
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+
+ return rx_fltrs->active_filters;
+}
+
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
+{
+ return &aq_nic->aq_hw_rx_fltrs;
+}
+
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *aq_rx_fltr;
+ int err = 0;
+
+ err = aq_check_rule(aq_nic, fsp);
+ if (err)
+ goto err_exit;
+
+ aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
+ if (unlikely(!aq_rx_fltr)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
+
+ err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
+ if (unlikely(err))
+ goto err_free;
+
+ err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
+ if (unlikely(err)) {
+ hlist_del(&aq_rx_fltr->aq_node);
+ --rx_fltrs->active_filters;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ kfree(aq_rx_fltr);
+err_exit:
+ return err;
+}
+
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+ int err = -EINVAL;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (rule->aq_fsp.location == cmd->fs.location)
+ break;
+ }
+
+ if (rule && rule->aq_fsp.location == cmd->fs.location) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+ return err;
+}
+
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct aq_rx_filter *rule = NULL;
+ struct hlist_node *aq_node2;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node)
+ if (fsp->location <= rule->aq_fsp.location)
+ break;
+
+ if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
+ return -EINVAL;
+
+ memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
+
+ return 0;
+}
+
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int count = 0;
+
+ cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ if (unlikely(count == cmd->rule_cnt))
+ return -EMSGSIZE;
+
+ rule_locs[count++] = rule->aq_fsp.location;
+ }
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, false);
+ if (err)
+ goto err_exit;
+ hlist_del(&rule->aq_node);
+ kfree(rule);
+ --rx_fltrs->active_filters;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
+{
+ struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
+ struct hlist_node *aq_node2;
+ struct aq_rx_filter *rule;
+ int err = 0;
+
+ hlist_for_each_entry_safe(rule, aq_node2,
+ &rx_fltrs->filter_list, aq_node) {
+ err = aq_add_del_rule(aq_nic, rule, true);
+ if (err)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int hweight = 0;
+ int err = 0;
+ int i;
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
+ hweight += hweight_long(aq_nic->active_vlans[i]);
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ }
+
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ if (err)
+ return err;
+
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (hweight < AQ_VLAN_MAX_FILTERS)
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true);
+ /* otherwise left in promiscue mode */
+ }
+
+ return err;
+}
+
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
+{
+ const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
+ struct aq_hw_s *aq_hw = aq_nic->aq_hw;
+ int err = 0;
+
+ memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
+
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
+ return -EOPNOTSUPP;
+ if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
+ return -EOPNOTSUPP;
+
+ err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
+ if (err)
+ return err;
+ err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
+ aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
+ );
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
new file mode 100644
index 000000000000..c6a08c6585d5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2014-2017 aQuantia Corporation. */
+
+/* File aq_filters.h: RX filters related functions. */
+
+#ifndef AQ_FILTERS_H
+#define AQ_FILTERS_H
+
+#include "aq_nic.h"
+
+enum aq_rx_filter_type {
+ aq_rx_filter_ethertype,
+ aq_rx_filter_vlan,
+ aq_rx_filter_l3l4
+};
+
+struct aq_rx_filter {
+ struct hlist_node aq_node;
+ enum aq_rx_filter_type type;
+ struct ethtool_rx_flow_spec aq_fsp;
+};
+
+u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic);
+struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic);
+int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd);
+int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id);
+int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic);
+int aq_filters_vlans_update(struct aq_nic_s *aq_nic);
+int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic);
+
+#endif /* AQ_FILTERS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a1e70da358ca..81aab73dc22f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -18,6 +18,17 @@
#include "aq_rss.h"
#include "hw_atl/hw_atl_utils.h"
+#define AQ_RX_FIRST_LOC_FVLANID 0U
+#define AQ_RX_LAST_LOC_FVLANID 15U
+#define AQ_RX_FIRST_LOC_FETHERT 16U
+#define AQ_RX_LAST_LOC_FETHERT 31U
+#define AQ_RX_FIRST_LOC_FL3L4 32U
+#define AQ_RX_LAST_LOC_FL3L4 39U
+#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4
+#define AQ_VLAN_MAX_FILTERS \
+ (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U)
+#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU
+
/* NIC H/W capabilities */
struct aq_hw_caps_s {
u64 hw_features;
@@ -130,6 +141,7 @@ struct aq_hw_s {
struct aq_ring_s;
struct aq_ring_param_s;
struct sk_buff;
+struct aq_rx_filter_l3l4;
struct aq_hw_ops {
@@ -183,6 +195,23 @@ struct aq_hw_ops {
int (*hw_packet_filter_set)(struct aq_hw_s *self,
unsigned int packet_filter);
+ int (*hw_filter_l3l4_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l3l4_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data);
+
+ int (*hw_filter_l2_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_l2_clear)(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data);
+
+ int (*hw_filter_vlan_set)(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans);
+
+ int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable);
+
int (*hw_multicast_list_set)(struct aq_hw_s *self,
u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 7c07eef275eb..2a11c1eefd8f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -13,6 +13,7 @@
#include "aq_nic.h"
#include "aq_pci_func.h"
#include "aq_ethtool.h"
+#include "aq_filters.h"
#include <linux/netdevice.h>
#include <linux/module.h>
@@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev)
err = aq_nic_init(aq_nic);
if (err < 0)
goto err_exit;
+
+ err = aq_reapply_rxnfc_all_rules(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
err = aq_nic_start(aq_nic);
if (err < 0)
goto err_exit;
@@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev,
bool is_lro = false;
int err = 0;
+ if (!(features & NETIF_F_NTUPLE)) {
+ if (aq_nic->ndev->features & NETIF_F_NTUPLE) {
+ err = aq_clear_rxnfc_all_rules(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+ if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
+ if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ err = aq_filters_vlan_offload_off(aq_nic);
+ if (unlikely(err))
+ goto err_exit;
+ }
+ }
+
aq_cfg->features = features;
if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) {
@@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev,
err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw,
aq_cfg);
+err_exit:
return err;
}
@@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
aq_nic_set_multicast_list(aq_nic, ndev);
}
+static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ set_bit(vid, aq_nic->active_vlans);
+
+ return aq_filters_vlans_update(aq_nic);
+}
+
+static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto,
+ u16 vid)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ if (!aq_nic->aq_hw_ops->hw_filter_vlan_set)
+ return -EOPNOTSUPP;
+
+ clear_bit(vid, aq_nic->active_vlans);
+
+ if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid))
+ return aq_filters_vlans_update(aq_nic);
+
+ return 0;
+}
+
static const struct net_device_ops aq_ndev_ops = {
.ndo_open = aq_ndev_open,
.ndo_stop = aq_ndev_close,
@@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = {
.ndo_set_rx_mode = aq_ndev_set_multicast_settings,
.ndo_change_mtu = aq_ndev_change_mtu,
.ndo_set_mac_address = aq_ndev_set_mac_address,
- .ndo_set_features = aq_ndev_set_features
+ .ndo_set_features = aq_ndev_set_features,
+ .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 7abdc0952425..279ea58f4a9e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -84,8 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->is_lro = AQ_CFG_IS_LRO_DEF;
- cfg->vlan_id = 0U;
-
aq_nic_rss_init(self, cfg->num_rss_queues);
/*descriptors */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 44ec47a3d60a..8e34c1e49bf2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -35,7 +35,6 @@ struct aq_nic_cfg_s {
u32 mtu;
u32 flow_control;
u32 link_speed_msk;
- u32 vlan_id;
u32 wol;
u16 is_mc_list_enabled;
u16 mc_list_count;
@@ -61,6 +60,23 @@ struct aq_nic_cfg_s {
#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+struct aq_hw_rx_fl2 {
+ struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS];
+};
+
+struct aq_hw_rx_fl3l4 {
+ u8 active_ipv4;
+ u8 active_ipv6:2;
+ u8 is_ipv6;
+};
+
+struct aq_hw_rx_fltrs_s {
+ struct hlist_head filter_list;
+ u16 active_filters;
+ struct aq_hw_rx_fl2 fl2;
+ struct aq_hw_rx_fl3l4 fl3l4;
+};
+
struct aq_nic_s {
atomic_t flags;
struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
@@ -81,10 +97,13 @@ struct aq_nic_s {
u32 count;
u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
+ /* Bitmask of currently assigned vlans from linux */
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct pci_dev *pdev;
unsigned int msix_entry_mask;
u32 irqvecs;
+ struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs;
};
static inline struct device *aq_nic_get_dev(struct aq_nic_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 1d5d6b8df855..c8b44cdb91c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -19,6 +19,7 @@
#include "aq_pci_func.h"
#include "hw_atl/hw_atl_a0.h"
#include "hw_atl/hw_atl_b0.h"
+#include "aq_filters.h"
static const struct pci_device_id aq_pci_tbl[] = {
{ PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), },
@@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev)
struct aq_nic_s *self = pci_get_drvdata(pdev);
if (self->ndev) {
+ aq_clear_rxnfc_all_rules(self);
if (self->ndev->reg_state == NETREG_REGISTERED)
unregister_netdev(self->ndev);
aq_nic_free_vectors(self);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index f02592f43fe3..6af7d7f0cdca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -41,7 +41,9 @@
NETIF_F_RXHASH | \
NETIF_F_SG | \
NETIF_F_TSO | \
- NETIF_F_LRO, \
+ NETIF_F_LRO | \
+ NETIF_F_NTUPLE | \
+ NETIF_F_HW_VLAN_CTAG_FILTER, \
.hw_priv_flags = IFF_UNICAST_FLT, \
.flow_control = true, \
.mtu = HW_ATL_B0_MTU_JUMBO, \
@@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- if (cfg->vlan_id) {
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U);
- hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U);
+ hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
- hw_atl_rpf_vlan_untagged_act_set(self, 1U);
-
- hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U);
- hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
- hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U);
- } else {
- hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
- }
+ // Always accept untagged packets
+ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U);
+ hw_atl_rpf_vlan_untagged_act_set(self, 1U);
/* Rx Interrupts */
hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
@@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
+static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_cmd_clear(self, location);
+ hw_atl_rpf_l4_spd_set(self, 0U, location);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location);
+ hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location);
+ } else {
+ int i;
+
+ for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
+ hw_atl_rpfl3l4_cmd_clear(self, location + i);
+ hw_atl_rpf_l4_spd_set(self, 0U, location + i);
+ hw_atl_rpf_l4_dpd_set(self, 0U, location + i);
+ }
+ hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location);
+ hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l3l4 *data)
+{
+ u8 location = data->location;
+
+ hw_atl_b0_hw_fl3l4_clear(self, data);
+
+ if (data->cmd) {
+ if (!data->is_ipv6) {
+ hw_atl_rpfl3l4_ipv4_dest_addr_set(self,
+ location,
+ data->ip_dst[0]);
+ hw_atl_rpfl3l4_ipv4_src_addr_set(self,
+ location,
+ data->ip_src[0]);
+ } else {
+ hw_atl_rpfl3l4_ipv6_dest_addr_set(self,
+ location,
+ data->ip_dst);
+ hw_atl_rpfl3l4_ipv6_src_addr_set(self,
+ location,
+ data->ip_src);
+ }
+ }
+ hw_atl_rpf_l4_dpd_set(self, data->p_dst, location);
+ hw_atl_rpf_l4_spd_set(self, data->p_src, location);
+ hw_atl_rpfl3l4_cmd_set(self, location, data->cmd);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self,
+ !!data->user_priority_en,
+ data->location);
+ if (data->user_priority_en)
+ hw_atl_rpf_etht_user_priority_set(self,
+ data->user_priority,
+ data->location);
+
+ if (data->queue < 0) {
+ hw_atl_rpf_etht_flr_act_set(self, 0U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location);
+ } else {
+ hw_atl_rpf_etht_flr_act_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location);
+ hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
+ struct aq_rx_filter_l2 *data)
+{
+ hw_atl_rpf_etht_flr_en_set(self, 0U, data->location);
+ hw_atl_rpf_etht_flr_set(self, 0U, data->location);
+ hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location);
+
+ return aq_hw_err_from_flags(self);
+}
+
+/**
+ * @brief Set VLAN filter table
+ * @details Configure VLAN filter table to accept (and assign the queue) traffic
+ * for the particular vlan ids.
+ * Note: use this function under vlan promisc mode not to lost the traffic
+ *
+ * @param aq_hw_s
+ * @param aq_rx_filter_vlan VLAN filter configuration
+ * @return 0 - OK, <0 - error
+ */
+static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self,
+ struct aq_rx_filter_vlan *aq_vlans)
+{
+ int i;
+
+ for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) {
+ hw_atl_rpf_vlan_flr_en_set(self, 0U, i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i);
+ if (aq_vlans[i].enable) {
+ hw_atl_rpf_vlan_id_flr_set(self,
+ aq_vlans[i].vlan_id,
+ i);
+ hw_atl_rpf_vlan_flr_act_set(self, 1U, i);
+ hw_atl_rpf_vlan_flr_en_set(self, 1U, i);
+ if (aq_vlans[i].queue != 0xFF) {
+ hw_atl_rpf_vlan_rxq_flr_set(self,
+ aq_vlans[i].queue,
+ i);
+ hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i);
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable)
+{
+ /* set promisc in case of disabing the vland filter */
+ hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable);
+
+ return aq_hw_err_from_flags(self);
+}
+
const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
.hw_init = hw_atl_b0_hw_init,
@@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
.hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
.hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
.hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_filter_l2_set = hw_atl_b0_hw_fl2_set,
+ .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear,
+ .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set,
+ .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set,
+ .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl,
.hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
.hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
.hw_rss_set = hw_atl_b0_hw_rss_set,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 5502ec5f0f69..939f77e2e117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
vlan_id_flr);
}
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_EN_F_MSK,
+ HW_ATL_RPF_VL_RXQ_EN_F_SHIFT,
+ vlan_rxq_en);
+}
+
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter),
+ HW_ATL_RPF_VL_RXQ_F_MSK,
+ HW_ATL_RPF_VL_RXQ_F_SHIFT,
+ vlan_rxq);
+};
+
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter)
{
@@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
HW_ATL_RPF_ET_VALF_SHIFT, etht_flr);
}
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter),
+ HW_ATL_RPF_L4_SPD_MSK,
+ HW_ATL_RPF_L4_SPD_SHIFT, val);
+}
+
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter),
+ HW_ATL_RPF_L4_DPD_MSK,
+ HW_ATL_RPF_L4_DPD_SHIFT, val);
+}
+
/* RPO: rx packet offload */
void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
u32 ipv4header_crc_offload_en)
@@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr)
HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT,
up_force_intr);
}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ 0U);
+}
+
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location),
+ ipv4_dest);
+}
+
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src)
+{
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location),
+ ipv4_src);
+}
+
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd);
+}
+
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_SRCA_ADR(location + i),
+ ipv6_src[i]);
+}
+
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest)
+{
+ int i;
+
+ for (i = 0; i < 4; ++i)
+ aq_hw_write_reg(aq_hw,
+ HW_ATL_RPF_L3_DSTA_ADR(location + i),
+ ipv6_dest[i]);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index 41f239928c15..03c570d115fe 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr,
u32 filter);
+/* Set VLAN RX queue assignment enable */
+void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en,
+ u32 filter);
+
+/* Set VLAN RX queue */
+void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq,
+ u32 filter);
+
/* set ethertype filter enable */
void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en,
u32 filter);
@@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
/* set ethertype filter */
void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+/* set L4 source port */
+void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
+/* set L4 destination port */
+void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter);
+
/* rpo */
/* set ipv4 header checksum offload enable */
@@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
/* set uP Force Interrupt */
void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr);
+/* clear ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* clear ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location);
+
+/* set ipv4 filter destination address */
+void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_dest);
+
+/* set ipv4 filter source address */
+void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 ipv4_src);
+
+/* set command for filter l3-l4 */
+void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd);
+
+/* set ipv6 filter source address */
+void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_src);
+
+/* set ipv6 filter destination address */
+void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
+ u32 *ipv6_dest);
+
#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index a715fa317b1c..8470d92db812 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1092,24 +1092,43 @@
/* Default value of bitfield vl_id{F}[B:0] */
#define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0
-/* RX et_en{F} Bitfield Definitions
- * Preprocessor definitions for the bitfield "et_en{F}".
+/* RX vl_rxq_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}".
* Parameter: filter {F} | stride size 0x4 | range [0, 15]
- * PORT="pif_rpf_et_en_i[0]"
- */
-
-/* Register address for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4)
-/* Bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000
-/* Inverted bitmask for bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF
-/* Lower bit position of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_SHIFT 31
-/* Width of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_WIDTH 1
-/* Default value of bitfield et_en{F} */
-#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0
+ * PORT="pif_rpf_vl_rxq_en_i"
+ */
+
+/* Register address for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000
+/* Inverted bitmask for bitfield vl_rxq_en{F}[ */
+#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF
+/* Lower bit position of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28
+/* Width of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1
+/* Default value of bitfield vl_rxq_en{F} */
+#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0
+
+/* RX vl_rxq{F}[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_rxq0_i[4:0]"
+ */
+
+/* Register address for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000
+/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF
+/* Lower bit position of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20
+/* Width of bitfield vl_rxw{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5
+/* Default value of bitfield vl_rxq{F}[4:0] */
+#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0
/* rx et_en{f} bitfield definitions
* preprocessor definitions for the bitfield "et_en{f}".
@@ -1263,6 +1282,44 @@
/* default value of bitfield et_val{f}[f:0] */
#define HW_ATL_RPF_ET_VALF_DEFAULT 0x0
+/* RX l4_sp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]".
+ * Parameter: srcport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_sp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4)
+/* Bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_SHIFT 0
+/* Width of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_WIDTH 16
+/* Default value of bitfield l4_sp{D}[F:0] */
+#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0
+
+/* RX l4_dp{D}[F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]".
+ * Parameter: destport {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l4_dp0_i[15:0]"
+ */
+
+/* Register address for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4)
+/* Bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu
+/* Inverted bitmask for bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u
+/* Lower bit position of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_SHIFT 0
+/* Width of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_WIDTH 16
+/* Default value of bitfield l4_dp{D}[F:0] */
+#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0
+
/* rx ipv4_chk_en bitfield definitions
* preprocessor definitions for the bitfield "ipv4_chk_en".
* port="pif_rpo_ipv4_chk_en_i"
@@ -2418,4 +2475,48 @@
/* default value of bitfield uP Force Interrupt */
#define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0
+#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380
+#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0
+#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0
+
+#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4)
+
+/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_sa0_i[31:0]"
+ */
+
+/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */
+#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_SHIFT 0
+/* Width of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_WIDTH 32
+/* Default value of bitfield l3_sa0[1F:0] */
+#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0
+
+/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]".
+ * Parameter: location {D} | stride size 0x4 | range [0, 7]
+ * PORT="pif_rpf_l3_da0_i[31:0]"
+ */
+
+ /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
+#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4)
+/* Bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
+/* Inverted bitmask for bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu
+/* Lower bit position of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_SHIFT 0
+/* Width of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_WIDTH 32
+/* Default value of bitfield l3_da0[1F:0] */
+#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0
+
#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 7def1cb8ab9d..1af6606a9166 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -454,8 +454,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
(fw.val =
aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
fw.tid), 1000U, 100U);
- if (err < 0)
- goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -463,8 +461,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
goto err_exit;
}
} while (sw.tid != fw.tid || 0xFFFFU == fw.len);
- if (err < 0)
- goto err_exit;
if (rpc) {
if (fw.len) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 3613fca64b58..48278e333462 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -240,6 +240,64 @@ struct __packed offload_info {
u8 buf[0];
};
+enum hw_atl_rx_action_with_traffic {
+ HW_ATL_RX_DISCARD,
+ HW_ATL_RX_HOST,
+};
+
+struct aq_rx_filter_vlan {
+ u8 enable;
+ u8 location;
+ u16 vlan_id;
+ u8 queue;
+};
+
+struct aq_rx_filter_l2 {
+ s8 queue;
+ u8 location;
+ u8 user_priority_en;
+ u8 user_priority;
+ u16 ethertype;
+};
+
+struct aq_rx_filter_l3l4 {
+ u32 cmd;
+ u8 location;
+ u32 ip_dst[4];
+ u32 ip_src[4];
+ u16 p_dst;
+ u16 p_src;
+ u8 is_ipv6;
+};
+
+enum hw_atl_rx_protocol_value_l3l4 {
+ HW_ATL_RX_TCP,
+ HW_ATL_RX_UDP,
+ HW_ATL_RX_SCTP,
+ HW_ATL_RX_ICMP
+};
+
+enum hw_atl_rx_ctrl_registers_l3l4 {
+ HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22),
+ HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23),
+ HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24),
+ HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25),
+ HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26),
+ HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27),
+ HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28),
+ HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29),
+ HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30),
+ HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31)
+};
+
+#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U
+#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U
+
+#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U
+
+#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \
+ ((location) - AQ_RX_FIRST_LOC_FL3L4)
+
#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index bd277b0dc615..4406325fdd9f 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev)
phy_dev->autoneg = AUTONEG_ENABLE;
phy_dev->speed = 0;
phy_dev->duplex = 0;
- phy_dev->advertising &= phy_dev->supported;
+ linkmode_and(phy_dev->advertising, phy_dev->advertising,
+ phy_dev->supported);
priv->last_rx_bd = 0;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e445ab724827..f44808959ff3 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev)
static int b44_register_phy_one(struct b44 *bp)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mii_bus *mii_bus;
struct ssb_device *sdev = bp->sdev;
struct phy_device *phydev;
@@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp)
}
/* mask with MAC supported features */
- phydev->supported &= (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full |
- SUPPORTED_Autoneg |
- SUPPORTED_MII);
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
bp->old_link = 0;
bp->phy_addr = phydev->mdio.addr;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0e2d99c737e3..4574275ef445 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
{
+ unsigned int index;
u32 reg;
/* Disable RXCHK, active filters and Broadcom tag matching */
@@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
rxchk_writel(priv, reg, RXCHK_CONTROL);
+ /* Make sure we restore correct CID index in case HW lost
+ * its context during deep idle state
+ */
+ for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
+ rxchk_writel(priv, priv->filters_loc[index] <<
+ RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
+ rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ }
+
/* Clear the MagicPacket detection logic */
mpd_enable_set(priv, false);
@@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
+ priv->filters_loc[index] = nfc->fs.location;
set_bit(index, priv->filters);
return 0;
@@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
* be taken care of during suspend time by bcm_sysport_suspend_to_wol
*/
clear_bit(index, priv->filters);
+ priv->filters_loc[index] = 0;
return 0;
}
@@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
struct bcm_sysport_priv *priv;
struct net_device *slave_dev;
unsigned int num_tx_queues;
- unsigned int q, start, port;
+ unsigned int q, qp, port;
struct net_device *dev;
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
@@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
priv->per_port_num_tx_queues = num_tx_queues;
- start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues);
- for (q = 0; q < num_tx_queues; q++) {
- ring = &priv->tx_rings[q + start];
+ for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
+ q++) {
+ ring = &priv->tx_rings[q];
+
+ if (ring->inspect)
+ continue;
/* Just remember the mapping actual programming done
* during bcm_sysport_init_tx_ring
*/
- ring->switch_queue = q;
+ ring->switch_queue = qp;
ring->switch_port = port;
ring->inspect = true;
priv->ring_map[q + port * num_tx_queues] = ring;
+ qp++;
+ }
+
+ return 0;
+}
+
+static int bcm_sysport_unmap_queues(struct notifier_block *nb,
+ struct dsa_notifier_register_info *info)
+{
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_priv *priv;
+ struct net_device *slave_dev;
+ unsigned int num_tx_queues;
+ struct net_device *dev;
+ unsigned int q, port;
+
+ priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
+ if (priv->netdev != info->master)
+ return 0;
+
+ dev = info->master;
+
+ if (dev->netdev_ops != &bcm_sysport_netdev_ops)
+ return 0;
+
+ port = info->port_number;
+ slave_dev = info->info.dev;
+
+ num_tx_queues = slave_dev->real_num_tx_queues;
+
+ for (q = 0; q < dev->num_tx_queues; q++) {
+ ring = &priv->tx_rings[q];
- /* Set all queues as being used now */
- set_bit(q + start, &priv->queue_bitmap);
+ if (ring->switch_port != port)
+ continue;
+
+ if (!ring->inspect)
+ continue;
+
+ ring->inspect = false;
+ priv->ring_map[q + port * num_tx_queues] = NULL;
}
return 0;
@@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb,
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct dsa_notifier_register_info *info;
-
- if (event != DSA_PORT_REGISTER)
- return NOTIFY_DONE;
+ int ret = NOTIFY_DONE;
- info = ptr;
+ switch (event) {
+ case DSA_PORT_REGISTER:
+ ret = bcm_sysport_map_queues(nb, ptr);
+ break;
+ case DSA_PORT_UNREGISTER:
+ ret = bcm_sysport_unmap_queues(nb, ptr);
+ break;
+ }
- return notifier_from_errno(bcm_sysport_map_queues(nb, info));
+ return notifier_from_errno(ret);
}
#define REV_FMT "v%2x.%02x"
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index a7a230884a87..0887e6356649 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -786,6 +786,7 @@ struct bcm_sysport_priv {
/* Ethtool */
u32 msg_enable;
DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX);
+ u32 filters_loc[RXCHK_BRCM_TAG_MAX];
struct bcm_sysport_stats64 stats64;
@@ -795,7 +796,6 @@ struct bcm_sysport_priv {
/* map information between switch port queues and local queues */
struct notifier_block dsa_notifier;
unsigned int per_port_num_tx_queues;
- unsigned long queue_bitmap;
struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8];
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d83233ae4a15..510dfc1c236b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
if (realdev) {
dev = cnic_from_netdev(realdev);
if (dev) {
- vid |= VLAN_TAG_PRESENT;
+ vid |= VLAN_CFI_MASK; /* make non-zero */
cnic_rcv_netevent(dev->cnic_priv, event, vid);
cnic_put(dev);
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 2d6f090bf644..983245c0867c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
break;
}
- return 0;
+ return ret;
}
static void bcmgenet_power_up(struct bcmgenet_priv *priv,
@@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int bcmgenet_suspend(struct device *d)
-{
- struct net_device *dev = dev_get_drvdata(d);
- struct bcmgenet_priv *priv = netdev_priv(dev);
- int ret = 0;
-
- if (!netif_running(dev))
- return 0;
-
- netif_device_detach(dev);
-
- bcmgenet_netif_stop(dev);
-
- if (!device_may_wakeup(d))
- phy_suspend(dev->phydev);
-
- /* Prepare the device for Wake-on-LAN and switch to the slow clock */
- if (device_may_wakeup(d) && priv->wolopts) {
- ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
- clk_prepare_enable(priv->clk_wol);
- } else if (priv->internal_phy) {
- ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
- }
-
- /* Turn off the clocks */
- clk_disable_unprepare(priv->clk);
-
- return ret;
-}
-
static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
@@ -3719,6 +3689,39 @@ out_clk_disable:
clk_disable_unprepare(priv->clk);
return ret;
}
+
+static int bcmgenet_suspend(struct device *d)
+{
+ struct net_device *dev = dev_get_drvdata(d);
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (!netif_running(dev))
+ return 0;
+
+ netif_device_detach(dev);
+
+ bcmgenet_netif_stop(dev);
+
+ if (!device_may_wakeup(d))
+ phy_suspend(dev->phydev);
+
+ /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+ if (device_may_wakeup(d) && priv->wolopts) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+ clk_prepare_enable(priv->clk_wol);
+ } else if (priv->internal_phy) {
+ ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+ }
+
+ /* Turn off the clocks */
+ clk_disable_unprepare(priv->clk);
+
+ if (ret)
+ bcmgenet_resume(d);
+
+ return ret;
+}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 2fbd027f0148..b3596e0ee47b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,9 +186,15 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
}
reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already powered up so skip the rest */
reg &= ~MPD_EN;
bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index a6cbaca37e94..aceb9b7b55bd 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
* capabilities, use that knowledge to also configure the
* Reverse MII interface correctly.
*/
- if (dev->phydev->supported & PHY_1000BT_FEATURES)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ dev->phydev->supported))
port_ctrl = PORT_MODE_EXT_RVMII_50;
else
port_ctrl = PORT_MODE_EXT_RVMII_25;
@@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 89295306f161..dc155c692c40 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -66,11 +66,6 @@
#include <uapi/linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
-
#define BAR_0 0
#define BAR_2 2
@@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp)
phydev->speed = tp->link_config.speed;
phydev->duplex = tp->link_config.duplex;
phydev->autoneg = tp->link_config.autoneg;
- phydev->advertising = tp->link_config.advertising;
+ ethtool_convert_legacy_u32_to_link_mode(
+ phydev->advertising, tp->link_config.advertising);
}
phy_start(phydev);
@@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
do_low_power = false;
if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, };
struct phy_device *phydev;
- u32 phyid, advertising;
+ u32 phyid;
phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr);
@@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tp->link_config.speed = phydev->speed;
tp->link_config.duplex = phydev->duplex;
tp->link_config.autoneg = phydev->autoneg;
- tp->link_config.advertising = phydev->advertising;
-
- advertising = ADVERTISED_TP |
- ADVERTISED_Pause |
- ADVERTISED_Autoneg |
- ADVERTISED_10baseT_Half;
+ ethtool_convert_link_mode_to_legacy_u32(
+ &tp->link_config.advertising,
+ phydev->advertising);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ advertising);
if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
- if (tg3_flag(tp, WOL_SPEED_100MB))
- advertising |=
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_10baseT_Full;
- else
- advertising |= ADVERTISED_10baseT_Full;
+ if (tg3_flag(tp, WOL_SPEED_100MB)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ advertising);
+ }
}
- phydev->advertising = advertising;
-
+ linkmode_copy(phydev->advertising, advertising);
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
@@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
}
/* tp->lock must be held */
-static u64 tg3_refclk_read(struct tg3 *tp)
+static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts)
{
- u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
- return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+ u64 stamp;
+
+ ptp_read_system_prets(sts);
+ stamp = tr32(TG3_EAV_REF_CLCK_LSB);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
+
+ return stamp;
}
/* tp->lock must be held */
@@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
tg3_full_lock(tp, 0);
- ns = tg3_refclk_read(tp);
+ ns = tg3_refclk_read(tp, sts);
ns += tp->ptp_adjust;
tg3_full_unlock(tp);
@@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
- .gettime64 = tg3_ptp_gettime,
+ .gettimex64 = tg3_ptp_gettimex,
.settime64 = tg3_ptp_settime,
.enable = tg3_ptp_enable,
};
@@ -16959,32 +16971,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-#ifdef CONFIG_SPARC
-static int tg3_get_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
- struct pci_dev *pdev = tp->pdev;
- struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
- int len;
-
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
-{
- struct net_device *dev = tp->dev;
-
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
- return 0;
-}
-#endif
-
static int tg3_get_device_address(struct tg3 *tp)
{
struct net_device *dev = tp->dev;
@@ -16992,10 +16978,8 @@ static int tg3_get_device_address(struct tg3 *tp)
int addr_ok = 0;
int err;
-#ifdef CONFIG_SPARC
- if (!tg3_get_macaddr_sparc(tp))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
return 0;
-#endif
if (tg3_flag(tp, IS_SSB_CORE)) {
err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
@@ -17057,13 +17041,8 @@ static int tg3_get_device_address(struct tg3 *tp)
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
-#ifdef CONFIG_SPARC
- if (!tg3_get_default_macaddr_sparc(tp))
- return 0;
-#endif
+ if (!is_valid_ether_addr(&dev->dev_addr[0]))
return -EINVAL;
- }
return 0;
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 4b3aecf98f2a..5359c1021f42 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Set the mode of the interface, RGMII/MII. */
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (netdev->phydev->supported &
- (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
+ int rgmii_mode =
+ (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ netdev->phydev->supported) |
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ netdev->phydev->supported)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d49db46254cd..649bf7c586c1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2295,6 +2295,8 @@ static int cxgb_up(struct adapter *adap)
static void cxgb_down(struct adapter *adapter)
{
+ struct hash_mac_addr *entry, *tmp;
+
cancel_work_sync(&adapter->tid_release_task);
cancel_work_sync(&adapter->db_full_task);
cancel_work_sync(&adapter->db_drop_task);
@@ -2303,6 +2305,12 @@ static void cxgb_down(struct adapter *adapter)
t4_sge_stop(adapter);
t4_free_sge_resources(adapter);
+
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+
adapter->flags &= ~FULL_INIT_DONE;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index cb523949c812..fc6a08789835 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -7141,21 +7141,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
unsigned int cache_line_size)
{
unsigned int page_shift = fls(page_size) - 1;
- unsigned int sge_hps = page_shift - 10;
unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
unsigned int fl_align_log = fls(fl_align) - 1;
- t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
- HOSTPAGESIZEPF0_V(sge_hps) |
- HOSTPAGESIZEPF1_V(sge_hps) |
- HOSTPAGESIZEPF2_V(sge_hps) |
- HOSTPAGESIZEPF3_V(sge_hps) |
- HOSTPAGESIZEPF4_V(sge_hps) |
- HOSTPAGESIZEPF5_V(sge_hps) |
- HOSTPAGESIZEPF6_V(sge_hps) |
- HOSTPAGESIZEPF7_V(sge_hps));
-
if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_CONTROL_A,
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60df66f4d21c..bf7325f6d553 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
+ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index ff84791a0ff8..8ec503c88c06 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -722,6 +722,10 @@ static int adapter_up(struct adapter *adapter)
if (adapter->flags & USING_MSIX)
name_msix_vecs(adapter);
+
+ /* Initialize hash mac addr list*/
+ INIT_LIST_HEAD(&adapter->mac_hlist);
+
adapter->flags |= FULL_INIT_DONE;
}
@@ -747,8 +751,6 @@ static int adapter_up(struct adapter *adapter)
enable_rx(adapter);
t4vf_sge_start(adapter);
- /* Initialize hash mac addr list*/
- INIT_LIST_HEAD(&adapter->mac_hlist);
return 0;
}
@@ -3287,6 +3289,7 @@ err_disable_device:
static void cxgb4vf_pci_remove(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
+ struct hash_mac_addr *entry, *tmp;
/*
* Tear down driver state associated with device.
@@ -3337,6 +3340,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter->mbox_log);
+ list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
kfree(adapter);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index c5ad7a4f4d83..80b2bd3747ce 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
struct be_wrb_params
*wrb_params)
{
+ bool insert_vlan = false;
u16 vlan_tag = 0;
skb = skb_share_check(skb, GFP_ATOMIC);
if (unlikely(!skb))
return skb;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
+ insert_vlan = true;
+ }
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
- if (!vlan_tag)
+ if (!insert_vlan) {
vlan_tag = adapter->pvid;
+ insert_vlan = true;
+ }
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
* skip VLAN insertion
*/
BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
}
- if (vlan_tag) {
+ if (insert_vlan) {
skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
vlan_tag);
if (unlikely(!skb))
return skb;
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
}
/* Insert the outer VLAN, if any */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 6e0f47f2c8a3..9510c9d78858 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev)
static int dpaa_phy_init(struct net_device *net_dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct mac_device *mac_dev;
struct phy_device *phy_dev;
struct dpaa_priv *priv;
@@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev)
}
/* Remove any features not supported by the controller */
- phy_dev->supported &= mac_dev->if_support;
+ ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, mask);
+
phy_support_asym_pause(phy_dev);
mac_dev->phy_dev = phy_dev;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 88f7acce38dc..640967a4d50d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -203,8 +203,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id)
+ struct dpaa2_eth_fq *fq)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
u8 fd_format = dpaa2_fd_get_format(fd);
@@ -267,12 +266,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
- skb_record_rx_queue(skb, queue_id);
+ skb_record_rx_queue(skb, fq->flowid);
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(napi, skb);
+ napi_gro_receive(&ch->napi, skb);
return;
@@ -289,7 +288,7 @@ err_frame_format:
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
static int consume_frames(struct dpaa2_eth_channel *ch,
- enum dpaa2_eth_fq_type *type)
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -312,7 +311,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
fd = dpaa2_dq_fd(dq);
fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
- fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
+ fq->consume(priv, ch, fd, fq);
cleaned++;
} while (!is_last);
@@ -323,10 +322,10 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
ch->stats.frames += cleaned;
/* A dequeue operation only pulls frames from a single queue
- * into the store. Return the frame queue type as an out param.
+ * into the store. Return the frame queue as an out param.
*/
- if (type)
- *type = fq->type;
+ if (src)
+ *src = fq;
return cleaned;
}
@@ -571,8 +570,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
struct dpaa2_eth_fq *fq;
+ struct netdev_queue *nq;
u16 queue_mapping;
unsigned int needed_headroom;
+ u32 fd_len;
int err, i;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
@@ -644,8 +645,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Clean up everything, including freeing the skb */
free_tx_fd(priv, &fd);
} else {
+ fd_len = dpaa2_fd_get_len(&fd);
percpu_stats->tx_packets++;
- percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
+ percpu_stats->tx_bytes += fd_len;
+
+ nq = netdev_get_tx_queue(net_dev, queue_mapping);
+ netdev_tx_sent_queue(nq, fd_len);
}
return NETDEV_TX_OK;
@@ -661,11 +666,11 @@ err_alloc_headroom:
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
const struct dpaa2_fd *fd,
- struct napi_struct *napi __always_unused,
- u16 queue_id __always_unused)
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_eth_drv_stats *percpu_extras;
+ u32 fd_len = dpaa2_fd_get_len(fd);
u32 fd_errors;
/* Tracing point */
@@ -673,7 +678,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
- percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
+ percpu_extras->tx_conf_bytes += fd_len;
+
+ fq->dq_frames++;
+ fq->dq_bytes += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -934,8 +942,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_channel *ch;
struct dpaa2_eth_priv *priv;
int rx_cleaned = 0, txconf_cleaned = 0;
- enum dpaa2_eth_fq_type type = 0;
- int store_cleaned;
+ struct dpaa2_eth_fq *fq, *txc_fq = NULL;
+ struct netdev_queue *nq;
+ int store_cleaned, work_done;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
@@ -949,18 +958,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
/* Refill pool if appropriate */
refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &type);
- if (type == DPAA2_RX_FQ)
+ store_cleaned = consume_frames(ch, &fq);
+ if (!store_cleaned)
+ break;
+ if (fq->type == DPAA2_RX_FQ) {
rx_cleaned += store_cleaned;
- else
+ } else {
txconf_cleaned += store_cleaned;
+ /* We have a single Tx conf FQ on this channel */
+ txc_fq = fq;
+ }
/* If we either consumed the whole NAPI budget with Rx frames
* or we reached the Tx confirmations threshold, we're done.
*/
if (rx_cleaned >= budget ||
- txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI)
- return budget;
+ txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
+ work_done = budget;
+ goto out;
+ }
} while (store_cleaned);
/* We didn't consume the entire budget, so finish napi and
@@ -974,7 +990,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
ch->nctx.desired_cpu);
- return max(rx_cleaned, 1);
+ work_done = max(rx_cleaned, 1);
+
+out:
+ if (txc_fq) {
+ nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
+ netdev_tx_completed_queue(nq, txc_fq->dq_frames,
+ txc_fq->dq_bytes);
+ txc_fq->dq_frames = 0;
+ txc_fq->dq_bytes = 0;
+ }
+
+ return work_done;
}
static void enable_ch_napi(struct dpaa2_eth_priv *priv)
@@ -1434,8 +1461,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
FSL_MC_POOL_DPCON, &dpcon);
if (err) {
- dev_info(dev, "Not enough DPCONs, will go on as-is\n");
- return NULL;
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_info(dev, "Not enough DPCONs, will go on as-is\n");
+ return ERR_PTR(err);
}
err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
@@ -1493,8 +1523,10 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return NULL;
channel->dpcon = setup_dpcon(priv);
- if (!channel->dpcon)
+ if (IS_ERR_OR_NULL(channel->dpcon)) {
+ err = PTR_ERR(channel->dpcon);
goto err_setup;
+ }
err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
&attr);
@@ -1513,7 +1545,7 @@ err_get_attr:
free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
- return NULL;
+ return ERR_PTR(err);
}
static void free_channel(struct dpaa2_eth_priv *priv,
@@ -1547,10 +1579,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
for_each_online_cpu(i) {
/* Try to allocate a channel */
channel = alloc_channel(priv);
- if (!channel) {
- dev_info(dev,
- "No affine channel for cpu %d and above\n", i);
- err = -ENODEV;
+ if (IS_ERR_OR_NULL(channel)) {
+ err = PTR_ERR(channel);
+ if (err != -EPROBE_DEFER)
+ dev_info(dev,
+ "No affine channel for cpu %d and above\n", i);
goto err_alloc_ch;
}
@@ -1597,7 +1630,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
/* Stop if we already have enough channels to accommodate all
* RX and TX conf queues
*/
- if (priv->num_channels == dpaa2_eth_queue_count(priv))
+ if (priv->num_channels == priv->dpni_attrs.num_queues)
break;
}
@@ -1608,9 +1641,12 @@ err_set_cdan:
err_service_reg:
free_channel(priv, channel);
err_alloc_ch:
+ if (err == -EPROBE_DEFER)
+ return err;
+
if (cpumask_empty(&priv->dpio_cpumask)) {
dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
- return err;
+ return -ENODEV;
}
dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
@@ -1732,7 +1768,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv)
err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
&dpbp_dev);
if (err) {
- dev_err(dev, "DPBP device allocation failed\n");
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "DPBP device allocation failed\n");
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 452a8e9c4f0e..16545e9386cd 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -271,14 +271,15 @@ struct dpaa2_eth_fq {
u32 tx_qdbin;
u16 flowid;
int target_cpu;
+ u32 dq_frames;
+ u32 dq_bytes;
struct dpaa2_eth_channel *channel;
enum dpaa2_eth_fq_type type;
void (*consume)(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
- struct napi_struct *napi,
- u16 queue_id);
+ struct dpaa2_eth_fq *fq);
struct dpaa2_eth_fq_stats stats;
};
@@ -434,9 +435,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
DPAA2_ETH_RX_HWA_SIZE;
}
+/* We have exactly one {Rx, Tx conf} queue per channel */
static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv)
{
- return priv->dpni_attrs.num_queues;
+ return priv->num_channels;
}
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 84b942b1eccc..9b150db3b510 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
if (err) {
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
goto err_exit;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index d79e4e009d63..71f4205f14e7 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
*/
/* get local capabilities */
- lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising);
/* get link partner capabilities */
rmt_adv = 0;
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 82722d05fedb..88a396fd242f 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
- if (strcmp(tbi->type, "tbi-phy") == 0) {
+ if (of_node_is_type(tbi, "tbi-phy")) {
dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n",
tbi);
break;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3c8da1a18ba0..0e102c764b13 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1784,14 +1784,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev)
*/
static int init_phy(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct gfar_private *priv = netdev_priv(dev);
- uint gigabit_support =
- priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
struct phy_device *phydev;
struct ethtool_eee edata;
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask);
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask);
+
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
@@ -1809,8 +1815,8 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- phydev->advertising = phydev->supported;
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Add support for flow control */
phy_support_asym_pause(phydev);
@@ -3656,7 +3662,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
val |= MACCFG1_TX_FLOW;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 0d76e15cd6dd..241325c35cb4 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
prio = vlan_tci_prio(rule);
prio_mask = vlan_tci_priom(rule);
- if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) {
- vlan |= RQFPR_CFI;
- vlan_mask |= RQFPR_CFI;
- } else if (cfi != VLAN_TAG_PRESENT &&
- cfi_mask == VLAN_TAG_PRESENT) {
+ if (cfi_mask) {
+ if (cfi)
+ vlan |= RQFPR_CFI;
vlan_mask |= RQFPR_CFI;
}
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 32e02700feaa..2e978cb8b28c 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1742,12 +1742,7 @@ static int init_phy(struct net_device *dev)
if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII)
uec_configure_serdes(dev);
- phy_set_max_speed(phydev, SPEED_100);
-
- if (priv->max_speed == SPEED_1000)
- phydev->supported |= ADVERTISED_1000baseT_Full;
-
- phydev->advertising = phydev->supported;
+ phy_set_max_speed(phydev, priv->max_speed);
priv->phydev = phydev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 28e907831b0e..c62378c07e70 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev)
*/
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
struct phy_device *phy_dev = h->phy_dev;
int ret;
@@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
if (unlikely(ret))
return -ENODEV;
- phy_dev->supported &= h->if_support;
- phy_dev->advertising = phy_dev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
phy_dev->autoneg = false;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 038326cfda93..4d9cf39da48c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -36,6 +36,9 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
};
/* below are per-VF mac-vlan subcodes */
@@ -85,6 +88,12 @@ struct hclge_mbx_pf_to_vf_cmd {
u16 msg[8];
};
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
/* used by VF to store the received Async responses from PF */
struct hclgevf_mbx_arq_ring {
#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 055b40606dbc..4a39feaba8b2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
+#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+#define hnae3_dev_gro_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
@@ -124,7 +128,10 @@ enum hnae3_reset_notify_type {
enum hnae3_reset_type {
HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
HNAE3_FUNC_RESET,
HNAE3_CORE_RESET,
HNAE3_GLOBAL_RESET,
@@ -132,6 +139,11 @@ enum hnae3_reset_type {
HNAE3_NONE_RESET,
};
+enum hnae3_flr_state {
+ HNAE3_FLR_DOWN,
+ HNAE3_FLR_DONE,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -162,6 +174,7 @@ struct hnae3_client_ops {
int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
int (*reset_notify)(struct hnae3_handle *handle,
enum hnae3_reset_notify_type type);
+ enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle);
};
#define HNAE3_CLIENT_NAME_LENGTH 16
@@ -197,6 +210,10 @@ struct hnae3_ae_dev {
* Enable the hardware
* stop()
* Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
* get_status()
* Get the carrier state of the back channel of the handle, 1 for ok, 0 for
* non-ok
@@ -292,17 +309,22 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
-
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
int (*init_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
void (*uninit_client_instance)(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev);
int (*start)(struct hnae3_handle *handle);
void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed, u8 *duplex);
@@ -403,6 +425,8 @@ struct hnae3_ae_ops {
u16 vlan, u8 qos, __be16 proto);
int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
void (*get_channels)(struct hnae3_handle *handle,
struct ethtool_channels *ch);
void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
@@ -430,6 +454,10 @@ struct hnae3_ae_ops {
int (*restore_fd_rules)(struct hnae3_handle *handle);
void (*enable_fd)(struct hnae3_handle *handle, bool enable);
pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, int enable);
};
struct hnae3_dcb_ops {
@@ -488,6 +516,14 @@ struct hnae3_roce_private_info {
void __iomem *roce_io_base;
int base_vector;
int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
};
struct hnae3_unic_private_info {
@@ -520,9 +556,6 @@ struct hnae3_handle {
struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
u64 flags; /* Indicate the capabilities for this handle*/
- unsigned long last_reset_time;
- enum hnae3_reset_type reset_level;
-
union {
struct net_device *netdev; /* first member */
struct hnae3_knic_private_info kinfo;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index ea5f8a84070d..b6fabbbdfd5b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getets)
return h->kinfo.dcb_ops->ieee_getets(h, ets);
@@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setets)
return h->kinfo.dcb_ops->ieee_setets(h, ets);
@@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_getpfc)
return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
@@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (h->kinfo.dcb_ops->ieee_setpfc)
return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 20fcf0d1c2ce..7f81db3df041 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
@@ -312,6 +313,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
return min_t(u16, rss_size, max_rss_size);
}
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
static int hns3_nic_net_up(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -334,6 +353,10 @@ static int hns3_nic_net_up(struct net_device *netdev)
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
/* start the ae_dev */
ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
if (ret)
@@ -344,6 +367,9 @@ static int hns3_nic_net_up(struct net_device *netdev)
return 0;
out_start_err:
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
for (j = i - 1; j >= 0; j--)
hns3_vector_disable(&priv->tqp_vector[j]);
@@ -354,11 +380,13 @@ out_start_err:
static int hns3_nic_net_open(struct net_device *netdev)
{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = hns3_get_handle(netdev);
struct hnae3_knic_private_info *kinfo;
int i, ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -378,23 +406,24 @@ static int hns3_nic_net_open(struct net_device *netdev)
kinfo->prio_tc[i]);
}
- priv->ae_handle->last_reset_time = jiffies;
return 0;
}
static void hns3_nic_net_down(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
const struct hnae3_ae_ops *ops;
int i;
- if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
- return;
-
/* disable vectors */
for (i = 0; i < priv->vector_num; i++)
hns3_vector_disable(&priv->tqp_vector[i]);
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
/* stop ae_dev */
ops = priv->ae_handle->ae_algo->ops;
if (ops->stop)
@@ -408,6 +437,11 @@ static void hns3_nic_net_down(struct net_device *netdev)
static int hns3_nic_net_stop(struct net_device *netdev)
{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev);
@@ -1312,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ if (features & NETIF_F_GRO_HW)
+ ret = h->ae_algo->ops->set_gro_en(h, true);
+ else
+ ret = h->ae_algo->ops->set_gro_en(h, false);
+ if (ret)
+ return ret;
+ }
+
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
@@ -1530,18 +1573,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
- bool if_running = netif_running(netdev);
int ret;
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
- /* if this was called with netdev up then bring netdevice down */
- if (if_running) {
- (void)hns3_nic_net_stop(netdev);
- msleep(100);
- }
-
ret = h->ae_algo->ops->set_mtu(h, new_mtu);
if (ret)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
@@ -1549,10 +1585,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
else
netdev->mtu = new_mtu;
- /* if the netdev was running earlier, bring it up again */
- if (if_running && hns3_nic_net_open(netdev))
- ret = -EINVAL;
-
return ret;
}
@@ -1615,10 +1647,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev)
priv->tx_timeout_count++;
- if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
- return;
-
- /* request the reset */
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
if (h->ae_algo->ops->reset_event)
h->ae_algo->ops->reset_event(h->pdev, h);
}
@@ -1682,8 +1713,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
- if (pdev->revision >= 0x21)
+ if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
+ }
}
/* hns3_probe - Device initialization routine
@@ -1819,9 +1852,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "hns3 flr done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
static const struct pci_error_handlers hns3_err_handler = {
.error_detected = hns3_error_detected,
.slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
};
static struct pci_driver hns3_driver = {
@@ -1875,7 +1928,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
- netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
@@ -2253,6 +2308,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
+ /* We MUST enable hardware checksum before enabling hardware GRO */
+ if (skb_shinfo(skb)->gso_size) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
+ }
+
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
@@ -2296,6 +2357,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
@@ -2329,6 +2393,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
+ unsigned char *va)
+{
+#define HNS3_NEED_ADD_FRAG 1
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ netdev_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ return HNS3_NEED_ADD_FRAG;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
+ struct sk_buff **out_skb, bool pending)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *head_skb = *out_skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *pre_desc;
+ u32 bd_base_info;
+ int pre_bd;
+
+ /* if there is pending bd, the SW param next_to_clean has moved
+ * to next and the next is NULL
+ */
+ if (pending) {
+ pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
+ ring->desc_num;
+ pre_desc = &ring->desc[pre_bd];
+ bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
+ } else {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ }
+
+ while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
+ HNS3_RX_HEAD_SIZE);
+ if (unlikely(!new_skb)) {
+ netdev_err(ring->tqp->handle->kinfo.netdev,
+ "alloc rx skb frag fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hnae3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ ring->pending_buf++;
+ }
+
+ return 0;
+}
+
+static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info)
+{
+ u16 gro_count;
+ u32 l3_type;
+
+ gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!gro_count)
+ return;
+
+ /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
+ * to skb_shinfo(skb)->gso_segs
+ */
+ NAPI_GRO_CB(skb)->count = gro_count;
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ if (skb_shinfo(skb)->gso_size)
+ tcp_gro_complete(skb);
+}
+
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
@@ -2345,18 +2556,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
- struct sk_buff **out_skb, int *out_bnum)
+ struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
+ struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
- struct sk_buff *skb;
- unsigned char *va;
u32 bd_base_info;
- int pull_len;
u32 l234info;
int length;
- int bnum;
+ int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
@@ -2368,9 +2577,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
- return -EFAULT;
+ return -ENXIO;
- va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+ if (!skb)
+ ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
@@ -2379,62 +2589,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
- prefetch(va);
+ prefetch(ring->va);
#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
+ prefetch(ring->va + L1_CACHE_BYTES);
#endif
- skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
- HNS3_RX_HEAD_SIZE);
- if (unlikely(!skb)) {
- netdev_err(netdev, "alloc rx skb fail\n");
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.sw_err_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- return -ENOMEM;
- }
-
- prefetchw(skb->data);
+ if (!skb) {
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ *out_skb = skb = ring->skb;
- bnum = 1;
- if (length <= HNS3_RX_HEAD_SIZE) {
- memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
-
- /* We can reuse buffer as-is, just make sure it is local */
- if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
- desc_cb->reuse_flag = 1;
- else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (ret > 0) { /* need add frag */
+ ret = hns3_add_frag(ring, desc, &skb, false);
+ if (ret)
+ return ret;
- ring_ptr_move_fw(ring, next_to_clean);
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+ }
} else {
- u64_stats_update_begin(&ring->syncp);
- ring->stats.seg_pkt_cnt++;
- u64_stats_update_end(&ring->syncp);
-
- pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
-
- memcpy(__skb_put(skb, pull_len), va,
- ALIGN(pull_len, sizeof(long)));
-
- hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ ret = hns3_add_frag(ring, desc, &skb, true);
+ if (ret)
+ return ret;
- while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
- desc = &ring->desc[ring->next_to_clean];
- desc_cb = &ring->desc_cb[ring->next_to_clean];
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
- bnum++;
- }
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
}
- *out_bnum = bnum;
-
l234info = le32_to_cpu(desc->rx.l234_info);
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
@@ -2484,7 +2674,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
+ /* This is needed in order to enable forwarding support */
+ hns3_set_gro_param(skb, l234info, bd_base_info);
+
hns3_rx_checksum(ring, skb, desc);
+ *out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
@@ -2497,9 +2691,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
- int unused_count = hns3_desc_unused(ring);
- struct sk_buff *skb = NULL;
- int num, bnum = 0;
+ int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
+ struct sk_buff *skb = ring->skb;
+ int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
@@ -2513,24 +2707,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
- unused_count = hns3_desc_unused(ring);
+ unused_count = hns3_desc_unused(ring) -
+ ring->pending_buf;
}
/* Poll one pkt */
- err = hns3_handle_rx_bd(ring, &skb, &bnum);
+ err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
- recv_bds += bnum;
- clean_count += bnum;
- if (unlikely(err)) { /* Do jump the err */
- recv_pkts++;
+ if (err == -ENXIO) { /* Do not get FE for the packet */
+ goto out;
+ } else if (unlikely(err)) { /* Do jump the err */
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
+ recv_bds += ring->pending_buf;
+ clean_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
recv_pkts++;
}
@@ -2669,6 +2871,7 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
struct hns3_enet_ring *ring;
int rx_pkt_total = 0;
@@ -2677,6 +2880,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
bool clean_complete = true;
int rx_budget;
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
@@ -2701,9 +2909,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
- napi_complete(napi);
- hns3_update_new_int_gl(tqp_vector);
- hns3_mask_vector_irq(tqp_vector, 1);
+ if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
+ napi_complete(napi)) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
return rx_pkt_total;
}
@@ -3319,6 +3529,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
@@ -3337,7 +3563,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->dev = &pdev->dev;
priv->netdev = netdev;
priv->ae_handle = handle;
- priv->ae_handle->last_reset_time = jiffies;
priv->tx_timeout_count = 0;
handle->kinfo.netdev = netdev;
@@ -3357,11 +3582,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (handle->flags & HNAE3_SUPPORT_VF)
- handle->reset_level = HNAE3_VF_RESET;
- else
- handle->reset_level = HNAE3_FUNC_RESET;
-
ret = hns3_get_ring_config(priv);
if (ret) {
ret = -ENOMEM;
@@ -3392,10 +3612,18 @@ static int hns3_client_init(struct hnae3_handle *handle)
goto out_reg_netdev_fail;
}
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_reg_netdev_fail;
+ }
+
hns3_dcbnl_setup(handle);
- /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
- netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
@@ -3418,11 +3646,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ hns3_client_stop(handle);
+
hns3_remove_hw_addr(netdev);
if (netdev->reg_state != NETREG_UNINITIALIZED)
unregister_netdev(netdev);
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
hns3_del_all_fd_rules(netdev, true);
hns3_force_clear_all_rx_ring(handle);
@@ -3443,6 +3678,7 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
priv->ring_data = NULL;
+out_netdev_free:
free_netdev(netdev);
}
@@ -3708,8 +3944,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv)
static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ /* it is cumbersome for hardware to pick-and-choose entries for deletion
+ * from table space. Hence, for function reset software intervention is
+ * required to delete the entries
+ */
+ if (hns3_dev_ongoing_func_reset(ae_dev)) {
+ hns3_remove_hw_addr(ndev);
+ hns3_del_all_fd_rules(ndev, false);
+ }
if (!netif_running(ndev))
return 0;
@@ -3720,6 +3970,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
int ret = 0;
if (netif_running(kinfo->netdev)) {
@@ -3729,9 +3980,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
"hns net up fail, ret=%d!\n", ret);
return ret;
}
- handle->last_reset_time = jiffies;
}
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
return ret;
}
@@ -3771,28 +4023,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
/* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ return ret;
+
hns3_restore_coal(priv);
ret = hns3_nic_init_vector_data(priv);
if (ret)
- return ret;
+ goto err_dealloc_vector;
ret = hns3_init_all_ring(priv);
- if (ret) {
- hns3_nic_uninit_vector_data(priv);
- priv->ring_data = NULL;
- }
+ if (ret)
+ goto err_uninit_vector;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+ priv->ring_data = NULL;
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
return ret;
}
static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
{
- struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
hns3_force_clear_all_rx_ring(handle);
ret = hns3_nic_uninit_vector_data(priv);
@@ -3803,18 +4071,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_store_coal(priv);
+ ret = hns3_nic_dealloc_vector_data(priv);
+ if (ret)
+ netdev_err(netdev, "dealloc vector error\n");
+
ret = hns3_uninit_all_ring(priv);
if (ret)
netdev_err(netdev, "uninit ring error\n");
- /* it is cumbersome for hardware to pick-and-choose entries for deletion
- * from table space. Hence, for function reset software intervention is
- * required to delete the entries
- */
- if (hns3_dev_ongoing_func_reset(ae_dev)) {
- hns3_remove_hw_addr(netdev);
- hns3_del_all_fd_rules(netdev, false);
- }
+ clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index d3636d088aa3..bbf227ba30f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -15,7 +15,7 @@ extern const char hns3_driver_version[];
enum hns3_nic_state {
HNS3_NIC_STATE_TESTING,
HNS3_NIC_STATE_RESETTING,
- HNS3_NIC_STATE_REINITING,
+ HNS3_NIC_STATE_INITED,
HNS3_NIC_STATE_DOWN,
HNS3_NIC_STATE_DISABLED,
HNS3_NIC_STATE_REMOVING,
@@ -47,7 +47,7 @@ enum hns3_nic_state {
#define HNS3_RING_PREFETCH_EN_REG 0x0007C
#define HNS3_RING_CFG_VF_NUM_REG 0x00080
#define HNS3_RING_ASID_REG 0x0008C
-#define HNS3_RING_RX_VM_REG 0x00090
+#define HNS3_RING_EN_REG 0x00090
#define HNS3_RING_T0_BE_RST 0x00094
#define HNS3_RING_COULD_BE_RST 0x00098
#define HNS3_RING_WRR_WEIGHT_REG 0x0009c
@@ -76,7 +76,10 @@ enum hns3_nic_state {
#define HNS3_RING_MAX_PENDING 32768
#define HNS3_RING_MIN_PENDING 8
#define HNS3_RING_BD_MULTIPLE 8
-#define HNS3_MAX_MTU 9728
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
#define HNS3_BD_SIZE_512_TYPE 0
#define HNS3_BD_SIZE_1024_TYPE 1
@@ -109,6 +112,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
@@ -135,9 +142,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
-#define HNS3_RXD_HDL_S 16
-#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
-#define HNS3_RXD_HSIND_B 31
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
@@ -194,6 +200,8 @@ enum hns3_nic_state {
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_RING_EN_B 0
+
enum hns3_pkt_l3t_type {
HNS3_L3T_NONE,
HNS3_L3T_IPV6,
@@ -399,11 +407,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
+ int pull_len; /* head length for current packet */
+ u32 frag_num;
+ unsigned char *va; /* first buffer address for current packet */
+
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
};
struct hns_queue;
@@ -577,6 +593,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring)
return ring->next_to_use == ring->next_to_clean;
}
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = READ_ONCE(base);
@@ -586,7 +607,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev)
{
- return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET));
+ return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_FLR_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FUNC_RESET ||
+ ae_dev->reset_type == HNAE3_VF_FULL_RESET ||
+ ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET));
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
}
#define hns3_write_dev(a, reg, value) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index a4762c2b8ba1..4563638367ac 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev,
int test_index = 0;
u32 i;
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
/* Only do offline selftest, or pass by default */
if (eth_test->flags != ETH_TEST_FL_OFFLINE)
return;
@@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING;
@@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev,
u32 old_desc_num, new_desc_num;
int ret;
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
if (param->rx_mini_pending || param->rx_jumbo_pending)
return -EINVAL;
@@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
struct hnae3_handle *h = priv->ae_handle;
u16 queue_num = h->kinfo.num_tqps;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (queue >= queue_num) {
netdev_err(netdev,
"Invalid queue value %d! Queue max id=%d\n",
@@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev,
int ret;
int i;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
ret = hns3_check_coalesce_para(netdev, cmd);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 690f62ed87dc..8af0cef5609b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev)
hdev->hw.cmq.crq.next_to_use = 0;
hclge_cmd_init_regs(&hdev->hw);
- clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
+
ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 872cd4bdd70d..aef044d08b11 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -152,6 +152,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
@@ -758,6 +759,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 123c37e653f3..6da9e22d82d0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -818,7 +818,6 @@ static void hclge_process_igu_egu_error(struct hclge_dev *hdev,
static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
enum hclge_err_int_type int_type)
{
- enum hnae3_reset_type reset_level = HNAE3_NONE_RESET;
struct device *dev = &hdev->pdev->dev;
const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3;
struct hclge_desc desc[2];
@@ -848,23 +847,17 @@ static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd,
}
err_sts = le32_to_cpu(desc[0].data[2]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst1, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
err_sts = le32_to_cpu(desc[0].data[3]);
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst2, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3;
- if (err_sts) {
+ if (err_sts)
hclge_log_error(dev, hw_err_lst3, err_sts);
- reset_level = HNAE3_FUNC_RESET;
- }
}
/* clear PPP INT */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ffdd96020860..f78b8e188443 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -26,7 +26,7 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
-static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu);
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
@@ -1144,6 +1166,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev)
for (i = 0; i < num_vport; i++) {
vport->back = hdev;
vport->vport_id = i;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
if (i == 0)
ret = hclge_vport_setup(vport, tqp_main_vport);
@@ -1947,10 +1970,7 @@ static int hclge_get_autoneg(struct hnae3_handle *handle)
static int hclge_mac_init(struct hclge_dev *hdev)
{
- struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct net_device *netdev = handle->kinfo.netdev;
struct hclge_mac *mac = &hdev->hw.mac;
- int mtu;
int ret;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
@@ -1964,15 +1984,16 @@ static int hclge_mac_init(struct hclge_dev *hdev)
mac->link = 0;
- if (netdev)
- mtu = netdev->mtu;
- else
- mtu = ETH_DATA_LEN;
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
- ret = hclge_set_mtu(handle, mtu);
+ ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
- "set mtu failed ret=%d\n", ret);
+ "allocate buffer fail, ret=%d\n", ret);
return ret;
}
@@ -2144,7 +2165,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
*/
/* check for vector0 reset event sources */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
@@ -2152,18 +2182,13 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
}
if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) {
+ dev_info(&hdev->pdev->dev, "core reset interrupt\n");
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
set_bit(HNAE3_CORE_RESET, &hdev->reset_pending);
*clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B);
return HCLGE_VECTOR0_EVENT_RST;
}
- if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) {
- set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
- *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
- return HCLGE_VECTOR0_EVENT_RST;
- }
-
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
@@ -2308,21 +2333,56 @@ static int hclge_notify_client(struct hclge_dev *hdev,
int ret;
ret = client->ops->reset_notify(handle, type);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
return ret;
+ }
}
return 0;
}
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret = 0;
+ u16 i;
+
+ if (!client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
static int hclge_reset_wait(struct hclge_dev *hdev)
{
#define HCLGE_RESET_WATI_MS 100
-#define HCLGE_RESET_WAIT_CNT 5
+#define HCLGE_RESET_WAIT_CNT 200
u32 val, reg, reg_bit;
u32 cnt = 0;
switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
case HNAE3_GLOBAL_RESET:
reg = HCLGE_GLOBAL_RESET_REG;
reg_bit = HCLGE_GLOBAL_RESET_BIT;
@@ -2335,6 +2395,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
reg = HCLGE_FUN_RST_ING;
reg_bit = HCLGE_FUN_RST_ING_B;
break;
+ case HNAE3_FLR_RESET:
+ break;
default:
dev_err(&hdev->pdev->dev,
"Wait for unsupported reset type: %d\n",
@@ -2342,6 +2404,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return -EINVAL;
}
+ if (hdev->reset_type == HNAE3_FLR_RESET) {
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < HCLGE_RESET_WAIT_CNT)
+ msleep(HCLGE_RESET_WATI_MS);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout: %d\n", cnt);
+ return -EBUSY;
+ }
+
+ return 0;
+ }
+
val = hclge_read_dev(&hdev->hw, reg);
while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
msleep(HCLGE_RESET_WATI_MS);
@@ -2358,6 +2434,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev)
return 0;
}
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%d) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset)
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%d) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
{
struct hclge_desc desc;
@@ -2396,11 +2521,16 @@ static void hclge_do_reset(struct hclge_dev *hdev)
break;
case HNAE3_FUNC_RESET:
dev_info(&pdev->dev, "PF Reset requested\n");
- hclge_func_reset_cmd(hdev, 0);
/* schedule again to check later */
set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
hclge_reset_task_schedule(hdev);
break;
+ case HNAE3_FLR_RESET:
+ dev_info(&pdev->dev, "FLR requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FLR_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
default:
dev_warn(&pdev->dev,
"Unsupported reset type: %d\n", hdev->reset_type);
@@ -2414,20 +2544,28 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev,
enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
/* return the highest priority reset level amongst all */
- if (test_bit(HNAE3_GLOBAL_RESET, addr))
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
rst_level = HNAE3_GLOBAL_RESET;
- else if (test_bit(HNAE3_CORE_RESET, addr))
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_CORE_RESET, addr)) {
rst_level = HNAE3_CORE_RESET;
- else if (test_bit(HNAE3_IMP_RESET, addr))
- rst_level = HNAE3_IMP_RESET;
- else if (test_bit(HNAE3_FUNC_RESET, addr))
+ clear_bit(HNAE3_CORE_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
rst_level = HNAE3_FUNC_RESET;
-
- /* now, clear all other resets */
- clear_bit(HNAE3_GLOBAL_RESET, addr);
- clear_bit(HNAE3_CORE_RESET, addr);
- clear_bit(HNAE3_IMP_RESET, addr);
- clear_bit(HNAE3_FUNC_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
return rst_level;
}
@@ -2457,39 +2595,206 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
hclge_enable_vector(&hdev->misc_vector, true);
}
+static int hclge_reset_prepare_down(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ break;
+ case HNAE3_FLR_RESET:
+ /* There is no mechanism for PF to know if VF has stopped IO
+ * for now, just wait 100 ms for VF to stop IO
+ */
+ msleep(100);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ case HNAE3_IMP_RESET:
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout)
+{
+#define MAX_RESET_FAIL_CNT 5
+#define RESET_UPGRADE_DELAY_SEC 10
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if ((hdev->reset_type != HNAE3_IMP_RESET) &&
+ (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) &
+ BIT(HCLGE_IMP_RESET_BIT))) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because IMP Reset is pending\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->reset_fail_cnt++;
+ if (is_timeout) {
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule to wait for hw reset done\n");
+ return true;
+ }
+
+ dev_info(&hdev->pdev->dev, "Upgrade reset level\n");
+ hclge_clear_reset_cause(hdev);
+ mod_timer(&hdev->reset_timer,
+ jiffies + RESET_UPGRADE_DELAY_SEC * HZ);
+
+ return false;
+ }
+
+ hclge_clear_reset_cause(hdev);
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+ return false;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ /* fall through */
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static void hclge_reset(struct hclge_dev *hdev)
{
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
- struct hnae3_handle *handle;
+ bool is_timeout = false;
+ int ret;
/* Initialize ae_dev reset status as well, in case enet layer wants to
* know if device is undergoing reset
*/
ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
+ hdev->last_reset_time = jiffies;
/* perform reset of the stack & ae device for a client */
- handle = &hdev->vport[0].nic;
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_reset_prepare_down(hdev);
+ if (ret)
+ goto err_reset;
+
rtnl_lock();
- hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
rtnl_unlock();
- if (!hclge_reset_wait(hdev)) {
- rtnl_lock();
- hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
- hclge_reset_ae_dev(hdev->ae_dev);
- hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclge_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
- hclge_clear_reset_cause(hdev);
- } else {
- rtnl_lock();
- /* schedule again to check pending resets later */
- set_bit(hdev->reset_type, &hdev->reset_pending);
- hclge_reset_task_schedule(hdev);
+ if (hclge_reset_wait(hdev)) {
+ is_timeout = true;
+ goto err_reset;
}
- hclge_notify_client(hdev, HNAE3_UP_CLIENT);
- handle->last_reset_time = jiffies;
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ goto err_reset_lock;
+
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
+
+ rtnl_unlock();
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset;
+
+ return;
+
+err_reset_lock:
rtnl_unlock();
- ae_dev->reset_type = HNAE3_NONE_RESET;
+err_reset:
+ if (hclge_reset_err_handle(hdev, is_timeout))
+ hclge_reset_task_schedule(hdev);
}
static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
@@ -2515,20 +2820,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
if (!handle)
handle = &hdev->vport[0].nic;
- if (time_before(jiffies, (handle->last_reset_time + 3 * HZ)))
+ if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ)))
return;
- else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ)))
- handle->reset_level = HNAE3_FUNC_RESET;
+ else if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclge_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ)))
+ hdev->reset_level = HNAE3_FUNC_RESET;
dev_info(&hdev->pdev->dev, "received reset event , reset type is %d",
- handle->reset_level);
+ hdev->reset_level);
/* request reset & schedule reset task */
- set_bit(handle->reset_level, &hdev->reset_request);
+ set_bit(hdev->reset_level, &hdev->reset_request);
hclge_reset_task_schedule(hdev);
- if (handle->reset_level < HNAE3_GLOBAL_RESET)
- handle->reset_level++;
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ dev_info(&hdev->pdev->dev,
+ "triggering global reset in reset timer\n");
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
}
static void hclge_reset_subtask(struct hclge_dev *hdev)
@@ -2542,6 +2869,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev)
* b. else, we can come back later to check this status so re-sched
* now.
*/
+ hdev->last_reset_time = jiffies;
hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending);
if (hdev->reset_type != HNAE3_NONE_RESET)
hclge_reset(hdev);
@@ -2584,6 +2912,23 @@ static void hclge_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
static void hclge_service_task(struct work_struct *work)
{
struct hclge_dev *hdev =
@@ -2596,6 +2941,7 @@ static void hclge_service_task(struct work_struct *work)
hclge_update_speed_duplex(hdev);
hclge_update_link_status(hdev);
+ hclge_update_vport_alive(hdev);
hclge_service_complete(hdev);
}
@@ -4336,8 +4682,12 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
struct hlist_node *node;
int ret;
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
if (!hnae3_dev_fd_supported(hdev))
- return -EOPNOTSUPP;
+ return 0;
hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
@@ -4592,6 +4942,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
return 0;
}
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->reset_count;
+}
+
static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
@@ -4805,10 +5180,6 @@ static int hclge_ae_start(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
-
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, true);
/* mac enable */
hclge_cfg_mac_mode(hdev, true);
@@ -4828,7 +5199,6 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
@@ -4836,14 +5206,15 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
cancel_work_sync(&hdev->service_task);
clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state);
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ /* If it is not PF reset, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
+ hdev->reset_type != HNAE3_FUNC_RESET) {
hclge_mac_stop_phy(hdev);
return;
}
- for (i = 0; i < vport->alloc_tqps; i++)
- hclge_tqp_enable(hdev, i, 0, false);
-
/* Mac disable */
hclge_cfg_mac_mode(hdev, false);
@@ -4856,6 +5227,32 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
hclge_update_link_status(hdev);
}
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
u16 cmdq_resp, u8 resp_code,
enum hclge_mac_vlan_tbl_opcode op)
@@ -6003,54 +6400,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
return hclge_set_vlan_rx_offload_cfg(vport);
}
-static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu)
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
{
struct hclge_config_max_frm_size_cmd *req;
struct hclge_desc desc;
- int max_frm_size;
- int ret;
-
- max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-
- if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
- max_frm_size > HCLGE_MAC_MAX_FRAME)
- return -EINVAL;
-
- max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
req = (struct hclge_config_max_frm_size_cmd *)desc.data;
- req->max_frm_size = cpu_to_le16(max_frm_size);
+ req->max_frm_size = cpu_to_le16(new_mps);
req->min_frm_size = HCLGE_MAC_MIN_FRAME;
- ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
- dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret);
- else
- hdev->mps = max_frm_size;
-
- return ret;
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
struct hclge_dev *hdev = vport->back;
- int ret;
+ int i, max_frm_size, ret = 0;
+
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
- ret = hclge_set_mac_mtu(hdev, new_mtu);
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
if (ret) {
dev_err(&hdev->pdev->dev,
"Change mtu fail, ret =%d\n", ret);
- return ret;
+ goto out;
}
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
ret = hclge_buffer_alloc(hdev);
if (ret)
dev_err(&hdev->pdev->dev,
"Allocate buffer fail, ret =%d\n", ret);
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
return ret;
}
@@ -6250,7 +6669,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
if (!phydev->link || !phydev->autoneg)
return 0;
- local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
remote_advertising = LPA_PAUSE_CAP;
@@ -6612,6 +7031,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
if (hdev->service_timer.function)
del_timer_sync(&hdev->service_timer);
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
if (hdev->service_task.func)
cancel_work_sync(&hdev->service_task);
if (hdev->rst_service_task.func)
@@ -6620,6 +7041,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev)
cancel_work_sync(&hdev->mbx_service_task);
}
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_WAIT_MS 100
+#define HCLGE_FLR_WAIT_CNT 50
+ struct hclge_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclge_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGE_FLR_WAIT_CNT)
+ msleep(HCLGE_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
@@ -6635,7 +7084,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hdev->pdev = pdev;
hdev->ae_dev = ae_dev;
hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
ae_dev->priv = hdev;
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
ret = hclge_pci_init(hdev);
if (ret) {
@@ -6727,6 +7180,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6769,6 +7226,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_dcb_ops_set(hdev);
timer_setup(&hdev->service_timer, hclge_service_timer, 0);
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
INIT_WORK(&hdev->service_task, hclge_service_task);
INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task);
INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task);
@@ -6779,6 +7237,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_enable_vector(&hdev->misc_vector, true);
hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME);
return 0;
@@ -6806,6 +7265,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev)
memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats));
}
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_start(vport);
+ vport++;
+ }
+}
+
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct hclge_dev *hdev = ae_dev->priv;
@@ -6856,6 +7326,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -6887,6 +7361,8 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
if (hclge_enable_tm_hw_error(hdev, true))
dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n");
+ hclge_reset_vport_state(hdev);
+
dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
HCLGE_DRIVER_NAME);
@@ -6913,6 +7389,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
hclge_destroy_cmd_queue(&hdev->hw);
hclge_misc_irq_uninit(hdev);
hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
ae_dev->priv = NULL;
}
@@ -7272,9 +7749,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
+static int hclge_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
.init_client_instance = hclge_init_client_instance,
.uninit_client_instance = hclge_uninit_client_instance,
.map_ring_to_vector = hclge_map_ring_to_vector,
@@ -7285,6 +7772,8 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_loopback = hclge_set_loopback,
.start = hclge_ae_start,
.stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
.get_status = hclge_get_status,
.get_ksettings_an_result = hclge_get_ksettings_an_result,
.update_speed_duplex_h = hclge_update_speed_duplex_h,
@@ -7321,6 +7810,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.set_vf_vlan_filter = hclge_set_vf_vlan_filter,
.enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
.reset_event = hclge_reset_event,
+ .set_default_reset_request = hclge_set_def_reset_request,
.get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
.set_channels = hclge_set_channels,
.get_channels = hclge_get_channels,
@@ -7337,6 +7827,10 @@ static const struct hnae3_ae_ops hclge_ops = {
.restore_fd_rules = hclge_restore_fd_entries,
.enable_fd = hclge_enable_fd,
.process_hw_error = hclge_process_ras_hw_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
};
static struct hnae3_ae_algo ae_algo = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 0d9215404269..5f24dd41d7eb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -97,11 +97,13 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
#define HCLGE_MISC_RESET_STS_REG 0x20700
#define HCLGE_MISC_VECTOR_INT_STS 0x20800
#define HCLGE_GLOBAL_RESET_REG 0x20A00
#define HCLGE_GLOBAL_RESET_BIT 0
#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
#define HCLGE_FUN_RST_ING 0x20C00
#define HCLGE_FUN_RST_ING_B 0
@@ -115,8 +117,10 @@ enum HLCGE_PORT_TYPE {
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+
#define HCLGE_MAC_DEFAULT_FRAME \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
#define HCLGE_MAC_MIN_FRAME 64
#define HCLGE_MAC_MAX_FRAME 9728
@@ -593,10 +597,16 @@ struct hclge_dev {
struct hclge_misc_vector misc_vector;
struct hclge_hw_stats hw_stats;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
unsigned long reset_request; /* reset has been requested */
unsigned long reset_pending; /* client rst is pending to be served */
+ unsigned long reset_count; /* the number of reset has been done */
+ u32 reset_fail_cnt;
u32 fw_version;
u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
u16 num_tqps; /* Num task queue pairs of this PF */
@@ -644,6 +654,7 @@ struct hclge_dev {
unsigned long service_timer_period;
unsigned long service_timer_previous;
struct timer_list service_timer;
+ struct timer_list reset_timer;
struct work_struct service_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -667,6 +678,8 @@ struct hclge_dev {
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
struct hclge_vlan_type_cfg vlan_type_cfg;
@@ -717,6 +730,11 @@ struct hclge_rss_tuple_cfg {
u8 ipv6_fragment_en;
};
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAX
+};
+
struct hclge_vport {
u16 alloc_tqps; /* Allocated Tx/Rx queues */
@@ -742,6 +760,10 @@ struct hclge_vport {
struct hclge_dev *back; /* Back reference to associated dev */
struct hnae3_handle nic;
struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
};
void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
@@ -768,6 +790,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
return tqp->index;
}
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
@@ -777,9 +805,13 @@ int hclge_buffer_alloc(struct hclge_dev *hdev);
int hclge_rss_init_hw(struct hclge_dev *hdev);
void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
void hclge_mbx_handler(struct hclge_dev *hdev);
int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
int hclge_cfg_flowctrl(struct hclge_dev *hdev);
int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index f890022938d9..e16a730a5f54 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
return status;
}
-static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
{
+ struct hclge_dev *hdev = vport->back;
+ enum hnae3_reset_type reset_type;
u8 msg_data[2];
u8 dest_vfid;
dest_vfid = (u8)vport->vport_id;
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ return -EINVAL;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
/* send this requested info to VF */
- return hclge_send_mbx_msg(vport, msg_data, sizeof(u8),
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
HCLGE_MBX_ASSERTING_RESET, dest_vfid);
}
@@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
return status;
}
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ bool gen_resp)
+{
+ bool alive = !!mbx_req->msg[2];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
static int hclge_get_vf_tcinfo(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
bool gen_resp)
@@ -363,24 +389,28 @@ static void hclge_reset_vf(struct hclge_vport *vport,
int ret;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!",
- mbx_req->mbx_src_vfid);
-
- /* Acknowledge VF that PF is now about to assert the reset for the VF.
- * On receiving this message VF will get into pending state and will
- * start polling for the hardware reset completion status.
- */
- ret = hclge_inform_reset_assert_to_vf(vport);
- if (ret) {
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to inform VF(%d)of reset, reset failed!\n",
- ret, vport->vport_id);
- return;
- }
+ vport->vport_id);
- dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n",
- mbx_req->mbx_src_vfid);
- /* reset this virtual function */
- hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid);
+ ret = hclge_func_reset_cmd(hdev, vport->vport_id);
+ hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ int ret;
+ u32 mtu;
+
+ memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu));
+ ret = hclge_set_vport_mtu(vport, mtu);
+
+ return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0);
}
static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
@@ -460,6 +490,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
"PF failed(%d) to config VF's VLAN\n",
ret);
break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
case HCLGE_MBX_GET_QINFO:
ret = hclge_get_vf_queue_info(vport, req, true);
if (ret)
@@ -487,6 +524,15 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
case HCLGE_MBX_RESET:
hclge_reset_vf(vport, req);
break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport, req);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03018638f701..741cb3b9519d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -195,12 +195,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
{
struct net_device *netdev = hdev->vport[0].nic.netdev;
struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
int ret;
if (!phydev)
return 0;
- phydev->supported &= ~SUPPORTED_FIBRE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
ret = phy_connect_direct(netdev, phydev,
hclge_mac_adjust_link,
@@ -210,7 +211,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev)
return ret;
}
- phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask);
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ mask);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ mask);
+ linkmode_and(phydev->supported, phydev->supported, mask);
phy_support_asym_pause(phydev);
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 0d3b445f6799..d5765c8cf3a3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false;
}
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = (u32)ring->desc_dma_addr;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{
int size = ring->desc_num * sizeof(struct hclgevf_desc);
@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
}
}
-static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev,
- struct hclgevf_cmq_ring *ring)
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
{
struct hclgevf_hw *hw = &hdev->hw;
- int ring_type = ring->flag;
- u32 reg_val;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret;
- ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- spin_lock_init(&ring->lock);
- ring->next_to_clean = 0;
- ring->next_to_use = 0;
ring->dev = hdev;
+ ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring);
- if (ret) {
+ if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
- return ret;
- }
- /* initialize the hardware registers with csq/crq dma-address,
- * descriptor number, head & tail pointers
- */
- switch (ring_type) {
- case HCLGEVF_TYPE_CSQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
- return 0;
- case HCLGEVF_TYPE_CRQ:
- reg_val = (u32)ring->desc_dma_addr;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
- reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
-
- reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
- reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
-
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
- hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
- return 0;
- default:
- return -EINVAL;
- }
+ return ret;
}
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
@@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
spin_lock_bh(&hw->cmq.csq.lock);
- if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ if (num > hclgevf_ring_space(&hw->cmq.csq) ||
+ test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
spin_unlock_bh(&hw->cmq.csq.lock);
return -EBUSY;
}
@@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status;
}
-int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
- /* setup Tx write back timeout */
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
- /* setup queue CSQ/CRQ rings */
- hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CSQ ring\n", ret);
+ "CSQ ring setup error %d\n", ret);
return ret;
}
- hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ;
- ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to initialize CRQ ring\n", ret);
+ "CRQ ring setup error %d\n", ret);
goto err_csq;
}
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ u32 version;
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock_bh(&hdev->hw.cmq.crq.lock);
+
/* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev;
hdev->arq.head = 0;
hdev->arq.tail = 0;
hdev->arq.count = 0;
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock_bh(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EBUSY;
+ }
/* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret);
- goto err_crq;
+ return ret;
}
hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0;
-err_crq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
-err_csq:
- hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
-
- return ret;
}
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index bc294b0c8b62..47030b42341f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ __le16 gro_en;
+ u8 rsv[22];
+};
+
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
@@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 085edb945389..efec1b7a6a64 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2,6 +2,7 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
#include <net/rtnetlink.h>
#include "hclgevf_cmd.h"
#include "hclgevf_main.h"
@@ -10,8 +11,7 @@
#define HCLGEVF_NAME "hclgevf"
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev);
-static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev);
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
static struct hnae3_ae_algo ae_algovf;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
@@ -209,12 +209,6 @@ static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
struct hclgevf_tqp *tqp;
int i;
- /* if this is on going reset then we need to re-allocate the TPQs
- * since we cannot assume we would get same number of TPQs back from PF
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, hdev->htqp);
-
hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
sizeof(struct hclgevf_tqp), GFP_KERNEL);
if (!hdev->htqp)
@@ -258,12 +252,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
new_tqps = kinfo->rss_size * kinfo->num_tc;
kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
- /* if this is on going reset then we need to re-allocate the hnae queues
- * as well since number of TPQs from PF might have changed.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- devm_kfree(&hdev->pdev->dev, kinfo->tqp);
-
kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
sizeof(struct hnae3_queue *), GFP_KERNEL);
if (!kinfo->tqp)
@@ -868,6 +856,9 @@ static int hclgevf_unmap_ring_from_vector(
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
int ret, vector_id;
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
vector_id = hclgevf_get_vector_index(hdev, vector);
if (vector_id < 0) {
dev_err(&handle->pdev->dev,
@@ -956,13 +947,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id,
return status;
}
-static int hclgevf_get_queue_id(struct hnae3_queue *queue)
-{
- struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q);
-
- return tqp->index;
-}
-
static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
@@ -1097,38 +1081,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
2, true, NULL, 0);
}
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu,
+ sizeof(new_mtu), true, NULL, 0);
+}
+
static int hclgevf_notify_client(struct hclgevf_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->nic_client;
struct hnae3_handle *handle = &hdev->nic;
+ int ret;
if (!client->ops->reset_notify)
return -EOPNOTSUPP;
- return client->ops->reset_notify(handle, type);
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+}
+
+static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev,
+ unsigned long delay_us,
+ unsigned long wait_cnt)
+{
+ unsigned long cnt = 0;
+
+ while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) &&
+ cnt++ < wait_cnt)
+ usleep_range(delay_us, delay_us * 2);
+
+ if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) {
+ dev_err(&hdev->pdev->dev,
+ "flr wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
}
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
-#define HCLGEVF_RESET_WAIT_MS 500
-#define HCLGEVF_RESET_WAIT_CNT 20
- u32 val, cnt = 0;
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
/* wait to check the hardware reset completion status */
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) &&
- (cnt < HCLGEVF_RESET_WAIT_CNT)) {
- msleep(HCLGEVF_RESET_WAIT_MS);
- val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING);
- cnt++;
- }
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val);
+
+ if (hdev->reset_type == HNAE3_FLR_RESET)
+ return hclgevf_flr_poll_timeout(hdev,
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_CNT);
+
+ ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
/* hardware completion status should be available by this time */
- if (cnt >= HCLGEVF_RESET_WAIT_CNT) {
- dev_warn(&hdev->pdev->dev,
- "could'nt get reset done status from h/w, timeout!\n");
- return -EBUSY;
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could'nt get reset done status from h/w, timeout!\n");
+ return ret;
}
/* we will wait a bit more to let reset of the stack to complete. This
@@ -1145,10 +1178,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
int ret;
/* uninitialize the nic client */
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
/* re-initialize the hclge device */
- ret = hclgevf_init_hdev(hdev);
+ ret = hclgevf_reset_hdev(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
"hclge device re-init failed, VF is disabled!\n");
@@ -1156,22 +1191,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
}
/* bring up the nic client again */
- hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
return 0;
}
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_VF_FUNC_RESET:
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
+ 0, true, NULL, sizeof(u8));
+ break;
+ case HNAE3_FLR_RESET:
+ set_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ break;
+ default:
+ break;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
+ hdev->reset_type, ret);
+
+ return ret;
+}
+
static int hclgevf_reset(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
int ret;
+ /* Initialize ae_dev reset status as well, in case enet layer wants to
+ * know if device is undergoing reset
+ */
+ ae_dev->reset_type = hdev->reset_type;
+ hdev->reset_count++;
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
+ ret = hclgevf_reset_prepare_wait(hdev);
+ if (ret)
+ goto err_reset;
+
/* check if VF could successfully fetch the hardware reset completion
* status from the hardware
*/
@@ -1181,58 +1254,118 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
dev_err(&hdev->pdev->dev,
"VF failed(=%d) to fetch H/W reset completion status\n",
ret);
-
- dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n");
- rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
-
- rtnl_unlock();
- return ret;
+ goto err_reset;
}
rtnl_lock();
/* now, re-initialize the nic client and ae device*/
ret = hclgevf_reset_stack(hdev);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ goto err_reset_lock;
+ }
/* bring up the nic to enable TX/RX again */
- hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ goto err_reset_lock;
rtnl_unlock();
return ret;
-}
+err_reset_lock:
+ rtnl_unlock();
+err_reset:
+ /* When VF reset failed, only the higher level reset asserted by PF
+ * can restore it, so re-initialize the command queue to receive
+ * this higher reset event.
+ */
+ hclgevf_cmd_init(hdev);
+ dev_err(&hdev->pdev->dev, "failed to reset VF\n");
-static int hclgevf_do_reset(struct hclgevf_dev *hdev)
-{
- int status;
- u8 respmsg;
+ return ret;
+}
- status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL,
- 0, false, &respmsg, sizeof(u8));
- if (status)
- dev_err(&hdev->pdev->dev,
- "VF reset request to PF failed(=%d)\n", status);
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
- return status;
+ return rst_level;
}
static void hclgevf_reset_event(struct pci_dev *pdev,
struct hnae3_handle *handle)
{
- struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
- handle->reset_level = HNAE3_VF_RESET;
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
/* reset of this VF requested */
set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
- handle->last_reset_time = jiffies;
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_WAIT_MS 100
+#define HCLGEVF_FLR_WAIT_CNT 50
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int cnt = 0;
+
+ clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state);
+ clear_bit(HNAE3_FLR_DONE, &hdev->flr_state);
+ set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request);
+ hclgevf_reset_event(hdev->pdev, NULL);
+
+ while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) &&
+ cnt++ < HCLGEVF_FLR_WAIT_CNT)
+ msleep(HCLGEVF_FLR_WAIT_MS);
+
+ if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state))
+ dev_err(&hdev->pdev->dev,
+ "flr wait down timeout: %d\n", cnt);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -1321,9 +1454,15 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
hdev->reset_attempts = 0;
- ret = hclgevf_reset(hdev);
- if (ret)
- dev_err(&hdev->pdev->dev, "VF stack reset failed.\n");
+ hdev->last_reset_time = jiffies;
+ while ((hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending))
+ != HNAE3_NONE_RESET) {
+ ret = hclgevf_reset(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF stack reset failed %d.\n", ret);
+ }
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
/* we could be here when either of below happens:
@@ -1352,19 +1491,17 @@ static void hclgevf_reset_service_task(struct work_struct *work)
*/
if (hdev->reset_attempts > 3) {
/* prepare for full reset of stack + pcie interface */
- hdev->nic.reset_level = HNAE3_VF_FULL_RESET;
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
/* "defer" schedule the reset task again */
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
} else {
hdev->reset_attempts++;
- /* request PF for resetting this VF via mailbox */
- ret = hclgevf_do_reset(hdev);
- if (ret)
- dev_warn(&hdev->pdev->dev,
- "VF rst fail, stack will call\n");
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
}
+ hclgevf_reset_task_schedule(hdev);
}
clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
@@ -1386,6 +1523,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work)
clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
}
+static void hclgevf_keep_alive_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer);
+
+ schedule_work(&hdev->keep_alive_task);
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+}
+
+static void hclgevf_keep_alive_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev;
+ u8 respmsg;
+ int ret;
+
+ hdev = container_of(work, struct hclgevf_dev, keep_alive_task);
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL,
+ 0, false, &respmsg, sizeof(u8));
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
static void hclgevf_service_task(struct work_struct *work)
{
struct hclgevf_dev *hdev;
@@ -1407,24 +1566,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
}
-static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval)
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
{
- u32 cmdq_src_reg;
+ u32 cmdq_src_reg, rst_ing_reg;
/* fetch the events from their corresponding regs */
cmdq_src_reg = hclgevf_read_dev(&hdev->hw,
HCLGEVF_VECTOR0_CMDQ_SRC_REG);
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
/* check for vector0 mailbox(=CMDQ RX) event source */
if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
*clearval = cmdq_src_reg;
- return true;
+ return HCLGEVF_VECTOR0_EVENT_MBX;
}
dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n");
- return false;
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
@@ -1434,19 +1606,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
+ enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
hclgevf_enable_vector(&hdev->misc_vector, false);
- if (!hclgevf_check_event_cause(hdev, &clearval))
- goto skip_sched;
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
- hclgevf_mbx_handler(hdev);
-
- hclgevf_clear_event_cause(hdev, clearval);
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ hclgevf_reset_task_schedule(hdev);
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
-skip_sched:
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) {
+ hclgevf_clear_event_cause(hdev, clearval);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+ }
return IRQ_HANDLED;
}
@@ -1504,6 +1685,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = cpu_to_le16(en ? 1 : 0);
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
@@ -1566,21 +1770,7 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
static int hclgevf_ae_start(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
-
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* ring enable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, true);
- }
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
@@ -1595,24 +1785,10 @@ static int hclgevf_ae_start(struct hnae3_handle *handle)
static void hclgevf_ae_stop(struct hnae3_handle *handle)
{
- struct hnae3_knic_private_info *kinfo = &handle->kinfo;
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- int i, queue_id;
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- for (i = 0; i < kinfo->num_tqps; i++) {
- /* Ring disable */
- queue_id = hclgevf_get_queue_id(kinfo->tqp[i]);
- if (queue_id < 0) {
- dev_warn(&hdev->pdev->dev,
- "Get invalid queue id, ignore it\n");
- continue;
- }
-
- hclgevf_tqp_enable(hdev, queue_id, 0, false);
- }
-
/* reset tqp stats */
hclgevf_reset_tqp_stats(handle);
del_timer_sync(&hdev->service_timer);
@@ -1621,12 +1797,40 @@ static void hclgevf_ae_stop(struct hnae3_handle *handle)
hclgevf_update_link_status(hdev, 0);
}
-static void hclgevf_state_init(struct hclgevf_dev *hdev)
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
{
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 msg_data;
+
+ msg_data = alive ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE,
+ 0, &msg_data, 1, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ);
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+
+ del_timer_sync(&hdev->keep_alive_timer);
+ cancel_work_sync(&hdev->keep_alive_task);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
/* setup tasks for the MBX */
INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task);
clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
@@ -1668,10 +1872,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
int vectors;
int i;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
vectors = pci_alloc_irq_vectors(pdev,
hdev->roce_base_msix_offset + 1,
@@ -1710,6 +1910,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
sizeof(int), GFP_KERNEL);
if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
pci_free_irq_vectors(pdev);
return -ENOMEM;
}
@@ -1721,6 +1922,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
pci_free_irq_vectors(pdev);
}
@@ -1728,10 +1931,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
{
int ret = 0;
- /* if this is on going reset then skip this initialization */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
hclgevf_get_misc_vector(hdev);
ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
@@ -1861,14 +2060,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev)
struct hclgevf_hw *hw;
int ret;
- /* check if we need to skip initialization of pci. This will happen if
- * device is undergoing VF reset. Otherwise, we would need to
- * re-initialize pci interface again i.e. when device is not going
- * through *any* reset or actually undergoing full reset.
- */
- if (hclgevf_dev_ongoing_reset(hdev))
- return 0;
-
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "failed to enable PCI device\n");
@@ -1957,23 +2148,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
-static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
int ret;
- /* check if device is on-going full reset(i.e. pcie as well) */
- if (hclgevf_dev_ongoing_full_reset(hdev)) {
- dev_warn(&pdev->dev, "device is going full reset\n");
- hclgevf_uninit_hdev(hdev);
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
}
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
ret = hclgevf_pci_init(hdev);
if (ret) {
dev_err(&pdev->dev, "PCI initialization failed\n");
return ret;
}
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
+ goto err_cmd_queue_init;
+ }
+
ret = hclgevf_cmd_init(hdev);
if (ret)
goto err_cmd_init;
@@ -1983,16 +2249,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) {
dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
- goto err_query_vf;
+ goto err_cmd_init;
}
hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
ret = hclgevf_misc_irq_init(hdev);
if (ret) {
@@ -2001,6 +2268,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_misc_irq_init;
}
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
ret = hclgevf_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
@@ -2019,6 +2288,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
@@ -2034,6 +2307,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
+ hdev->last_reset_time = jiffies;
pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME);
return 0;
@@ -2043,25 +2317,31 @@ err_config:
err_misc_irq_init:
hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev);
-err_query_vf:
- hclgevf_cmd_uninit(hdev);
err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
}
static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
{
hclgevf_state_uninit(hdev);
- hclgevf_misc_irq_uninit(hdev);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ hclgevf_pci_uninit(hdev);
+ }
+
hclgevf_cmd_uninit(hdev);
- hclgevf_uninit_msi(hdev);
- hclgevf_pci_uninit(hdev);
}
static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
{
struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
int ret;
ret = hclgevf_alloc_hdev(ae_dev);
@@ -2071,10 +2351,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
}
ret = hclgevf_init_hdev(ae_dev->priv);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
- return ret;
+ hdev = ae_dev->priv;
+ timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0);
+ INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task);
+
+ return 0;
}
static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
@@ -2151,6 +2437,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
+static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
@@ -2159,13 +2452,38 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle,
*media_type = hdev->hw.mac.media_type;
}
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->reset_count;
+}
+
static const struct hnae3_ae_ops hclgevf_ops = {
.init_ae_dev = hclgevf_init_ae_dev,
.uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
.init_client_instance = hclgevf_init_client_instance,
.uninit_client_instance = hclgevf_uninit_client_instance,
.start = hclgevf_ae_start,
.stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
.map_ring_to_vector = hclgevf_map_ring_to_vector,
.unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
.get_vector = hclgevf_get_vector,
@@ -2193,11 +2511,17 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.set_vlan_filter = hclgevf_set_vlan_filter,
.enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
.reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
.get_channels = hclgevf_get_channels,
.get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
.get_status = hclgevf_get_status,
.get_ksettings_an_result = hclgevf_get_ksettings_an_result,
.get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index aed241e8ffab..4517b7ea5817 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -31,11 +31,19 @@
#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
/* CMDQ register bits for RX event(=MBX event) */
#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
#define HCLGEVF_TQP_RESET_TRY_TIMES 10
/* Reset related Registers */
-#define HCLGEVF_FUN_RST_ING 0x20C00
-#define HCLGEVF_FUN_RST_ING_B 0
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
#define HCLGEVF_RSS_IND_TBL_SIZE 512
#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
@@ -54,17 +62,25 @@
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
/* states of hclgevf device & tasks */
enum hclgevf_states {
/* device states */
HCLGEVF_STATE_DOWN,
HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
/* task states */
HCLGEVF_STATE_SERVICE_SCHED,
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
HCLGEVF_STATE_MBX_SERVICE_SCHED,
HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
};
#define HCLGEVF_MPF_ENBALE 1
@@ -145,10 +161,17 @@ struct hclgevf_dev {
struct hclgevf_misc_vector misc_vector;
struct hclgevf_rss_cfg rss_cfg;
unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1
unsigned long reset_state; /* requested, pending */
+ unsigned long reset_count; /* the number of reset has been done */
u32 reset_attempts;
u32 fw_version;
@@ -178,7 +201,9 @@ struct hclgevf_dev {
struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
struct timer_list service_timer;
+ struct timer_list keep_alive_timer;
struct work_struct service_task;
+ struct work_struct keep_alive_task;
struct work_struct rst_service_task;
struct work_struct mbx_service_task;
@@ -192,18 +217,9 @@ struct hclgevf_dev {
u32 flag;
};
-static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev)
-{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_RESET));
-}
-
-static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev)
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
{
- return (hdev &&
- (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) &&
- (hdev->nic.reset_level == HNAE3_VF_FULL_RESET));
+ return !!hdev->reset_pending;
}
int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index e9d5a4f96304..ef9c8e6eca28 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -40,6 +40,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
}
while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
udelay(HCLGEVF_SLEEP_USCOEND);
i++;
}
@@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
crq = &hdev->hw.cmq.crq;
while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
desc = &crq->desc[crq->next_to_use];
req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
@@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
{
+ enum hnae3_reset_type reset_type;
u16 link_status;
u16 *msg_q;
u8 duplex;
@@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
/* process all the async queue messages */
while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
msg_q = hdev->arq.msg_q[hdev->arq.head];
switch (msg_q[0]) {
@@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
* has been completely reset. After this stack should
* eventually be re-initialized.
*/
- hdev->nic.reset_level = HNAE3_VF_RESET;
+ reset_type = le16_to_cpu(msg_q[1]);
+ set_bit(reset_type, &hdev->reset_pending);
set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
hclgevf_reset_task_schedule(hdev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 760b2ad8e295..209255495bc9 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev)
dev->phy.duplex = phy->duplex;
dev->phy.pause = phy->pause;
dev->phy.asym_pause = phy->asym_pause;
- dev->phy.advertising = phy->advertising;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising,
+ phy->advertising);
}
static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
@@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy,
phy_dev->autoneg = phy->autoneg;
phy_dev->speed = phy->speed;
phy_dev->duplex = phy->duplex;
- phy_dev->advertising = phy->advertising;
+ ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising,
+ phy->advertising);
return phy_start_aneg(phy_dev);
}
@@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev,
dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask;
dev->phy.def->name = dev->phy_dev->drv->name;
dev->phy.def->ops = &emac_dt_mdio_phy_ops;
- dev->phy.features = dev->phy_dev->supported;
+ ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features,
+ dev->phy_dev->supported);
dev->phy.address = dev->phy_dev->mdio.addr;
dev->phy.mode = dev->phy_dev->interface;
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c760dc72c520..be13227f1697 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
void e1000e_ptp_remove(struct e1000_adapter *adapter);
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts);
+
static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
{
return hw->phy.ops.reset(hw);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 16a73bd9f4cb..59bd587d809d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4319,13 +4319,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
/**
* e1000e_sanitize_systim - sanitize raw cycle counter reads
* @hw: pointer to the HW structure
- * @systim: time value read, sanitized and returned
+ * @systim: PHC time value read, sanitized and returned
+ * @sts: structure to hold system time before and after reading SYSTIML,
+ * may be NULL
*
* Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
* check to see that the time is incrementing at a reasonable
* rate and is a multiple of incvalue.
**/
-static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
+static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim,
+ struct ptp_system_timestamp *sts)
{
u64 time_delta, rem, temp;
u64 systim_next;
@@ -4335,7 +4338,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
/* latch SYSTIMH on read of SYSTIML */
+ ptp_read_system_prets(sts);
systim_next = (u64)er32(SYSTIML);
+ ptp_read_system_postts(sts);
systim_next |= (u64)er32(SYSTIMH) << 32;
time_delta = systim_next - systim;
@@ -4353,15 +4358,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim)
}
/**
- * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
- * @cc: cyclecounter structure
+ * e1000e_read_systim - read SYSTIM register
+ * @adapter: board private structure
+ * @sts: structure which will contain system time before and after reading
+ * SYSTIML, may be NULL
**/
-static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+u64 e1000e_read_systim(struct e1000_adapter *adapter,
+ struct ptp_system_timestamp *sts)
{
- struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
- cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel, systimeh;
+ u32 systimel, systimel_2, systimeh;
u64 systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4369,11 +4375,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
+ ptp_read_system_prets(sts);
systimel = er32(SYSTIML);
+ ptp_read_system_postts(sts);
systimeh = er32(SYSTIMH);
/* Is systimel is so large that overflow is possible? */
if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
- u32 systimel_2 = er32(SYSTIML);
+ ptp_read_system_prets(sts);
+ systimel_2 = er32(SYSTIML);
+ ptp_read_system_postts(sts);
if (systimel > systimel_2) {
/* There was an overflow, read again SYSTIMH, and use
* systimel_2
@@ -4386,12 +4396,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim |= (u64)systimeh << 32;
if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
- systim = e1000e_sanitize_systim(hw, systim);
+ systim = e1000e_sanitize_systim(hw, systim, sts);
return systim;
}
/**
+ * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
+ * @cc: cyclecounter structure
+ **/
+static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc)
+{
+ struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
+ cc);
+
+ return e1000e_read_systim(adapter, NULL);
+}
+
+/**
* e1000_sw_init - Initialize general software structures (struct e1000_adapter)
* @adapter: board private structure to initialize
*
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 37c76945ad9b..1a4c65d9feb4 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp,
#endif/*CONFIG_E1000E_HWTS*/
/**
- * e1000e_phc_gettime - Reads the current time from the hardware clock
+ * e1000e_phc_gettimex - Reads the current time from the hardware clock and
+ * system clock
* @ptp: ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec structure to hold the current PHC time
+ * @sts: structure to hold the current system time
*
* Read the timecounter and return the correct value in ns after converting
* it into a struct timespec.
**/
-static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int e1000e_phc_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter,
ptp_clock_info);
unsigned long flags;
- u64 ns;
+ u64 cycles, ns;
spin_lock_irqsave(&adapter->systim_lock, flags);
- ns = timecounter_read(&adapter->tc);
+
+ /* NOTE: Non-monotonic SYSTIM readings may be returned */
+ cycles = e1000e_read_systim(adapter, sts);
+ ns = timecounter_cyc2time(&adapter->tc, cycles);
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work)
systim_overflow_work.work);
struct e1000_hw *hw = &adapter->hw;
struct timespec64 ts;
+ u64 ns;
- adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&adapter->tc);
+ ts = ns_to_timespec64(ns);
e_dbg("SYSTIM overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = {
.pps = 0,
.adjfreq = e1000e_phc_adjfreq,
.adjtime = e1000e_phc_adjtime,
- .gettime64 = e1000e_phc_gettime,
+ .gettimex64 = e1000e_phc_gettimex,
.settime64 = e1000e_phc_settime,
.enable = e1000e_phc_enable,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 876cac317e79..8de9085bba9e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
@@ -146,6 +147,7 @@ enum i40e_state_t {
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
__I40E_CLIENT_RESET,
+ __I40E_VIRTCHNL_OP_PENDING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
};
@@ -494,7 +496,6 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
-#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u32 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT(0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 501ee718177f..7ab61f6ebb5f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ }
+ if (hw->mac.type == I40E_MAC_X722 &&
+ hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+ hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
}
/* Newer versions of firmware require lock when reading the NVM */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 80e3eec6134e..11506102471c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -11,7 +11,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X722 0x0006
#define I40E_FW_API_VERSION_MINOR_X710 0x0007
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
@@ -20,6 +20,8 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 85f75b5978fc..97a9b1fb4763 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
i40e_status status;
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_dcb_parameters);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 9f8464f80783..a6bc7847346b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ks->base.speed = SPEED_100;
break;
default:
+ ks->base.speed = SPEED_UNKNOWN;
break;
}
ks->base.duplex = DUPLEX_FULL;
@@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
i40e_status status;
u8 aq_failures;
int err = 0;
+ u32 is_an;
/* Changing the port's flow control is not supported if this isn't the
* port's controlling PF
@@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev,
if (vsi != pf->vsi[pf->lan_vsi])
return -EOPNOTSUPP;
- if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
- AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+ is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
+ if (pause->autoneg != is_an) {
netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
return -EOPNOTSUPP;
}
/* If we have link and don't have autoneg */
- if (!test_bit(__I40E_DOWN, pf->state) &&
- !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && !is_an) {
/* Send message that it might not necessarily work*/
netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
}
@@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
err = -EAGAIN;
}
- if (!test_bit(__I40E_DOWN, pf->state)) {
+ if (!test_bit(__I40E_DOWN, pf->state) && is_an) {
/* Give it a little more time to try to come back */
msleep(75);
if (!test_bit(__I40E_DOWN, pf->state))
@@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC))
+ if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
+ | (wol->wolopts != WAKE_FILTER))
return -EOPNOTSUPP;
/* is this a new value? */
@@ -4659,14 +4661,15 @@ flags_complete:
return -EOPNOTSUPP;
/* If the driver detected FW LLDP was disabled on init, this flag could
- * be set, however we do not support _changing_ the flag if NPAR is
- * enabled or FW API version < 1.7. There are situations where older
- * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
- * not allow the user to enable/disable LLDP with this flag on
- * unsupported FW versions.
+ * be set, however we do not support _changing_ the flag:
+ * - on XL710 if NPAR is enabled or FW API version < 1.7
+ * - on X722 with FW API version < 1.6
+ * There are situations where older FW versions/NPAR enabled PFs could
+ * disable LLDP, however we _must_ not allow the user to enable/disable
+ * LLDP with this flag on unsupported FW versions.
*/
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
+ if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) {
dev_warn(&pf->pdev->dev,
"Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 21c2688d6308..47f0fdadbac9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_MINOR 7
+#define DRV_VERSION_BUILD 6
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
@@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr)
bool found = false;
int bkt;
- WARN(!spin_is_locked(&vsi->mac_filter_hash_lock),
- "Missing mac_filter_hash_lock\n");
+ lockdep_assert_held(&vsi->mac_filter_hash_lock);
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) {
if (ether_addr_equal(macaddr, f->macaddr)) {
__i40e_del_filter(vsi, f);
@@ -9632,6 +9635,7 @@ end_core_reset:
clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
}
/**
@@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
- /* Stopping the FW LLDP engine is only supported on the
- * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
- * engine is not supported if NPAR is functioning on this
- * part
+ /* Stopping FW LLDP engine is supported on XL710 and X722
+ * starting from FW versions determined in i40e_init_adminq.
+ * Stopping the FW LLDP engine is not supported on XL710
+ * if NPAR is functioning so unset this hw flag in this case.
*/
if (pf->hw.mac.type == I40E_MAC_XL710 &&
- !pf->hw.func_caps.npar_enable &&
- (pf->hw.aq.api_maj_ver > 1 ||
- (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
- pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
+ pf->hw.func_caps.npar_enable &&
+ (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
+ pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
@@ -14302,23 +14305,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strncpy(width, "8", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strncpy(width, "4", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strncpy(width, "2", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strncpy(width, "1", PCI_WIDTH_SIZE); break;
+ strlcpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 1199f0502d6d..e6fc0aff8c99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -694,7 +694,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strncpy(pf->ptp_caps.name, i40e_driver_name,
+ strlcpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index aef3c89ee79c..c4d44096cdaf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3473,6 +3473,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -3526,6 +3528,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc;
+ void *data = xdpf->data;
u32 size = xdpf->len;
dma_addr_t dma;
@@ -3533,8 +3536,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED;
}
-
- dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
+ dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED;
@@ -3652,8 +3654,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 7df969c59855..2781ab91ca82 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -615,6 +615,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
u64 flags;
/* Used in set switch config AQ command */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index ac5698ed0b11..2ac23ebfbf31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi)
return I40E_ERR_PARAM;
- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
+ (allmulti || alluni)) {
dev_err(&pf->pdev->dev,
"Unprivileged VF %d is attempting to configure promiscuous mode\n",
vf->vf_id);
@@ -1675,13 +1676,20 @@ err_out:
int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ int ret = 0;
+
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
}
- return i40e_pci_sriov_enable(pdev, num_vfs);
+ ret = i40e_pci_sriov_enable(pdev, num_vfs);
+ goto sriov_configure_out;
}
if (!pci_vfs_assigned(pf->pdev)) {
@@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto sriov_configure_out;
}
- return 0;
+sriov_configure_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
+ return ret;
}
/***********************virtual channel routines******************/
@@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
goto error_param;
}
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
if (is_multicast_ether_addr(mac)) {
dev_err(&pf->pdev->dev,
"Invalid Ethernet address %pM for VF %d\n", mac, vf_id);
@@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n");
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
ret = 0;
error_pvid:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
vf->tx_rate = max_tx_rate;
error:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
ret = i40e_validate_vf(pf, vf_id);
if (ret)
@@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ret = 0;
error_param:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
int abs_vf_id;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
0, (u8 *)&pfe, sizeof(pfe), NULL);
error_out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
@@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
ret = -EIO;
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
@@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
struct i40e_vf *vf;
int ret = 0;
+ if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
+ dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n");
+ return -EAGAIN;
+ }
+
/* validate the request */
if (vf_id >= pf->num_alloc_vfs) {
dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (pf->flags & I40E_FLAG_MFP_ENABLED) {
dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vf = &pf->vf[vf_id];
@@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
}
out:
+ clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state);
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index bf67d62e2b5f..f9621026beef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -13,9 +13,9 @@
#define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3
#define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10
-#define I40E_VLAN_PRIORITY_SHIFT 12
+#define I40E_VLAN_PRIORITY_SHIFT 13
#define I40E_VLAN_MASK 0xFFF
-#define I40E_PRIORITY_MASK 0x7000
+#define I40E_PRIORITY_MASK 0xE000
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index fb9bfad96daf..3b1dc77ae368 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2343,6 +2343,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag);
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch.
*
@@ -2461,8 +2463,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb,
if (tso < 0)
goto out_drop;
- skb_tx_timestamp(skb);
-
/* always enable CRC insertion offload */
td_cmd |= IAVF_TX_DESC_CMD_ICRC;
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b8548370f1c7..7d8575d11786 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -52,7 +52,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXQ_LEN 64
#define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff
-#define ICE_MAX_VSI_ALLOC 130
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0
@@ -113,7 +112,9 @@ extern const char ice_drv_ver[];
struct ice_tc_info {
u16 qoffset;
- u16 qcount;
+ u16 qcount_tx;
+ u16 qcount_rx;
+ u8 netdev_tc;
};
struct ice_tc_cfg {
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 6653555f55dd..4078070881ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -87,6 +87,7 @@ struct ice_aqc_list_caps {
/* Device/Function buffer entry, repeated per reported capability */
struct ice_aqc_list_caps_elem {
__le16 cap;
+#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005
#define ICE_AQC_CAPS_SRIOV 0x0012
#define ICE_AQC_CAPS_VF 0x0013
#define ICE_AQC_CAPS_VSI 0x0017
@@ -1065,10 +1066,10 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LAST_CMD BIT(0)
#define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */
#define ICE_AQC_NVM_PRESERVATION_S 1
-#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S)
-#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_PRESERVE_ALL BIT(1)
-#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S)
+#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S)
#define ICE_AQC_NVM_FLASH_ONLY BIT(7)
__le16 module_typeid;
__le16 length;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 554fd707a6d6..9de5a3aac77d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1387,6 +1387,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
+ * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * @hw: pointer to the hw structure
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of VSI per PF.
+ */
+static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+{
+ u8 funcs;
+
+#define ICE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
+ ICE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return ICE_MAX_VSI / funcs;
+}
+
+/**
* ice_parse_caps - parse function/device capabilities
* @hw: pointer to the hw struct
* @buf: pointer to a buffer containing function/device capability records
@@ -1428,6 +1449,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
u16 cap = le16_to_cpu(cap_resp->cap);
switch (cap) {
+ case ICE_AQC_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ ice_debug(hw, ICE_DBG_INIT,
+ "HW caps: Valid Functions = %d\n",
+ caps->valid_functions);
+ break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
ice_debug(hw, ICE_DBG_INIT,
@@ -1457,10 +1484,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guaranteed_num_vsi = number;
+ func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
- func_p->guaranteed_num_vsi);
+ number);
}
break;
case ICE_AQC_CAPS_RSS:
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 596b9fb1c510..5507928c8fbe 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -7,6 +7,9 @@
#define _ICE_HW_AUTOGEN_H_
#define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4))
+#define QTX_COMM_HEAD_HEAD_S 0
+#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0)
#define PF_FW_ARQBAH 0x00080180
#define PF_FW_ARQBAL 0x00080080
#define PF_FW_ARQH 0x00080380
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1041fa2a7767..a5961a8fe73c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
{
int i;
- for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) {
+ for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
break;
- usleep_range(10, 20);
+ usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_RETRY_LIMIT)
+ if (i >= ICE_Q_WAIT_MAX_RETRY)
return -ETIMEDOUT;
return 0;
@@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
*/
static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
{
- u16 offset = 0, qmap = 0, numq_tc;
- u16 pow = 0, max_rss = 0, qcount;
+ u16 offset = 0, qmap = 0, tx_count = 0;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
+ u16 tx_numq_tc, rx_numq_tc;
+ u16 pow = 0, max_rss = 0;
bool ena_tc0 = false;
+ u8 netdev_tc = 0;
int i;
/* at least TC0 should be enabled by default */
@@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->tc_cfg.ena_tc |= 1;
}
- numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
+ if (!rx_numq_tc)
+ rx_numq_tc = 1;
+ tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
+ if (!tx_numq_tc)
+ tx_numq_tc = 1;
/* TC mapping is a function of the number of Rx queues assigned to the
* VSI for each traffic class and the offset of these queues.
@@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
* Setup number and offset of Rx queues for all TCs for the VSI
*/
- qcount = numq_tc;
+ qcount_rx = rx_numq_tc;
+
/* qcount will change if RSS is enabled */
if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
@@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
max_rss = ICE_MAX_LG_RSS_QS;
else
max_rss = ICE_MAX_SMALL_RSS_QS;
- qcount = min_t(int, numq_tc, max_rss);
- qcount = min_t(int, qcount, vsi->rss_size);
+ qcount_rx = min_t(int, rx_numq_tc, max_rss);
+ qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
}
}
/* find the (rounded up) power-of-2 of qcount */
- pow = order_base_2(qcount);
+ pow = order_base_2(qcount_rx);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
- vsi->tc_cfg.tc_info[i].qcount = 1;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
ctxt->info.tc_mapping[i] = 0;
continue;
}
/* TC is enabled */
vsi->tc_cfg.tc_info[i].qoffset = offset;
- vsi->tc_cfg.tc_info[i].qcount = qcount;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
ICE_AQ_VSI_TC_Q_OFFSET_M) |
((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
ICE_AQ_VSI_TC_Q_NUM_M);
- offset += qcount;
+ offset += qcount_rx;
+ tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
-
- vsi->num_txq = qcount_tx;
vsi->num_rxq = offset;
+ vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
@@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
+ u8 num_q_grps, q_idx = 0;
enum ice_status status;
u16 buf_len, i, pf_q;
int err = 0, tc = 0;
- u8 num_q_grps;
buf_len = sizeof(struct ice_aqc_add_tx_qgrp);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf)
return -ENOMEM;
- if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) {
- err = -EINVAL;
- goto err_cfg_txqs;
- }
qg_buf->num_txqs = 1;
num_q_grps = 1;
- /* set up and configure the Tx queues */
- ice_for_each_txq(vsi, i) {
- struct ice_tlan_ctx tlan_ctx = { 0 };
+ /* set up and configure the Tx queues for each enabled TC */
+ for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
+ break;
- pf_q = vsi->txq_map[i];
- ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q);
- /* copy context contents into the qg_buf */
- qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
- ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
- ice_tlan_ctx_info);
+ for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
+ struct ice_tlan_ctx tlan_ctx = { 0 };
+
+ pf_q = vsi->txq_map[q_idx];
+ ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx,
+ pf_q);
+ /* copy context contents into the qg_buf */
+ qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
+ ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
+ ice_tlan_ctx_info);
+
+ /* init queue specific tail reg. It is referred as
+ * transmit comm scheduler queue doorbell.
+ */
+ vsi->tx_rings[q_idx]->tail =
+ pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ num_q_grps, qg_buf, buf_len,
+ NULL);
+ if (status) {
+ dev_err(&vsi->back->pdev->dev,
+ "Failed to set LAN Tx queue context, error: %d\n",
+ status);
+ err = -ENODEV;
+ goto err_cfg_txqs;
+ }
- /* init queue specific tail reg. It is referred as transmit
- * comm scheduler queue doorbell.
- */
- vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
- num_q_grps, qg_buf, buf_len, NULL);
- if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Tx queue context, error: %d\n",
- status);
- err = -ENODEV;
- goto err_cfg_txqs;
- }
+ /* Add Tx Queue TEID into the VSI Tx ring from the
+ * response. This will complete configuring and
+ * enabling the queue.
+ */
+ txq = &qg_buf->txqs[0];
+ if (pf_q == le16_to_cpu(txq->txq_id))
+ vsi->tx_rings[q_idx]->txq_teid =
+ le32_to_cpu(txq->q_teid);
- /* Add Tx Queue TEID into the VSI Tx ring from the response
- * This will complete configuring and enabling the queue.
- */
- txq = &qg_buf->txqs[0];
- if (pf_q == le16_to_cpu(txq->txq_id))
- vsi->tx_rings[i]->txq_teid =
- le32_to_cpu(txq->q_teid);
+ q_idx++;
+ }
}
err_cfg_txqs:
devm_kfree(&pf->pdev->dev, qg_buf);
@@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
ice_for_each_txq(vsi, i) {
u16 v_idx;
- if (!vsi->tx_rings || !vsi->tx_rings[i]) {
+ if (!vsi->tx_rings || !vsi->tx_rings[i] ||
+ !vsi->tx_rings[i]->q_vector) {
err = -EINVAL;
goto err_out;
}
@@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
/* set RSS capabilities */
ice_vsi_set_rss_params(vsi);
+ /* set tc configuration */
+ ice_vsi_set_tc_cfg(vsi);
+
/* create the VSI */
ret = ice_vsi_init(vsi);
if (ret)
@@ -2119,11 +2142,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
goto unroll_vsi_init;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
@@ -2491,6 +2512,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
ice_vsi_clear_rings(vsi);
@@ -2518,11 +2540,14 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_pf *pf;
int ret, i;
if (!vsi)
return -EINVAL;
+ pf = vsi->back;
+ ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
@@ -2532,6 +2557,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
ice_vsi_set_num_qs(vsi);
+ ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
ret = ice_vsi_init(vsi);
@@ -2578,11 +2604,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
break;
}
- ice_vsi_set_tc_cfg(vsi);
-
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->num_txq;
+ max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 333312a1d595..292d19e65af0 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf);
+ if (hw->port_info)
+ ice_sched_clear_port(hw->port_info);
+
ice_shutdown_all_ctrlq(hw);
set_bit(__ICE_PREPARED_FOR_RESET, pf->state);
@@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev,
ice_determine_q_usage(pf);
- pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC,
- hw->func_caps.guaranteed_num_vsi);
+ pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
@@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
if (err)
return err;
}
-
err = ice_vsi_cfg_txqs(vsi);
if (!err)
err = ice_vsi_cfg_rxqs(vsi);
@@ -3138,8 +3139,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
+ * @locked: is the rtnl_lock already held
*/
-static void ice_dis_vsi(struct ice_vsi *vsi)
+static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
@@ -3148,9 +3150,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
- rtnl_lock();
- vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
- rtnl_unlock();
+ if (!locked) {
+ rtnl_lock();
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ rtnl_unlock();
+ } else {
+ vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
+ }
} else {
ice_vsi_close(vsi);
}
@@ -3189,7 +3195,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf)
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
- ice_dis_vsi(pf->vsi[v]);
+ ice_dis_vsi(pf->vsi[v], false);
}
/**
@@ -3691,8 +3697,8 @@ static void ice_tx_timeout(struct net_device *netdev)
struct ice_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- u32 head, val = 0, i;
int hung_queue = -1;
+ u32 i;
pf->tx_timeout_count++;
@@ -3736,17 +3742,20 @@ static void ice_tx_timeout(struct net_device *netdev)
return;
if (tx_ring) {
- head = tx_ring->next_to_clean;
+ struct ice_hw *hw = &pf->hw;
+ u32 head, val = 0;
+
+ head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) &
+ QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S;
/* Read interrupt register */
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
- val = rd32(&pf->hw,
+ val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
tx_ring->vsi->hw_base_vector));
- netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
+ netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
- head, tx_ring->next_to_use,
- readl(tx_ring->tail), val);
+ head, tx_ring->next_to_use, val);
}
pf->tx_timeout_last_recovery = jiffies;
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7cc8aa18a22b..7293e4f4d758 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi)
*
* Cleanup scheduling elements from SW DB
*/
-static void ice_sched_clear_port(struct ice_port_info *pi)
+void ice_sched_clear_port(struct ice_port_info *pi)
{
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return;
@@ -1527,7 +1527,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_cfg_vsi - configure the new/exisiting VSI
+ * ice_sched_cfg_vsi - configure the new/existing VSI
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc: TC number
@@ -1605,3 +1605,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
return status;
}
+
+/**
+ * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function removes single aggregator VSI info entry from
+ * aggregator list.
+ */
+static void
+ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle)
+{
+ struct ice_sched_agg_info *agg_info;
+ struct ice_sched_agg_info *atmp;
+
+ list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) {
+ struct ice_sched_agg_vsi_info *agg_vsi_info;
+ struct ice_sched_agg_vsi_info *vtmp;
+
+ list_for_each_entry_safe(agg_vsi_info, vtmp,
+ &agg_info->agg_vsi_list, list_entry)
+ if (agg_vsi_info->vsi_handle == vsi_handle) {
+ list_del(&agg_vsi_info->list_entry);
+ devm_kfree(ice_hw_to_dev(pi->hw),
+ agg_vsi_info);
+ return;
+ }
+ }
+}
+
+/**
+ * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @owner: LAN or RDMA
+ *
+ * This function removes the VSI and its LAN or RDMA children nodes from the
+ * scheduler tree.
+ */
+static enum ice_status
+ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_vsi_ctx *vsi_ctx;
+ u8 i, j = 0;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return status;
+ mutex_lock(&pi->sched_lock);
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ goto exit_sched_rm_vsi_cfg;
+
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ struct ice_sched_node *vsi_node, *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, i);
+ if (!tc_node)
+ continue;
+
+ vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle);
+ if (!vsi_node)
+ continue;
+
+ while (j < vsi_node->num_children) {
+ if (vsi_node->children[j]->owner == owner) {
+ ice_free_sched_node(pi, vsi_node->children[j]);
+
+ /* reset the counter again since the num
+ * children will be updated after node removal
+ */
+ j = 0;
+ } else {
+ j++;
+ }
+ }
+ /* remove the VSI if it has no children */
+ if (!vsi_node->num_children) {
+ ice_free_sched_node(pi, vsi_node);
+ vsi_ctx->sched.vsi_node[i] = NULL;
+
+ /* clean up agg related vsi info if any */
+ ice_sched_rm_agg_vsi_info(pi, vsi_handle);
+ }
+ if (owner == ICE_SCHED_NODE_OWNER_LAN)
+ vsi_ctx->sched.max_lanq[i] = 0;
+ }
+ status = 0;
+
+exit_sched_rm_vsi_cfg:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its LAN children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 5dc9cfa04c58..da5b4c166da8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -12,6 +12,7 @@
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
+ u16 vsi_handle;
};
struct ice_sched_agg_info {
@@ -25,6 +26,7 @@ struct ice_sched_agg_info {
/* FW AQ command calls */
enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+void ice_sched_clear_port(struct ice_port_info *pi);
void ice_sched_cleanup_all(struct ice_hw *hw);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
@@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
+enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index f4dbc81c1988..0ea428104215 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -124,6 +124,8 @@ struct ice_phy_info {
/* Common HW capabilities for SW use */
struct ice_hw_common_caps {
+ u32 valid_functions;
+
/* TX/RX queues */
u16 num_rxq; /* Number/Total RX queues */
u16 rxq_first_id; /* First queue ID for RX queues */
@@ -150,7 +152,7 @@ struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
u32 num_allocd_vfs; /* Number of allocated VFs */
u32 vf_base_id; /* Logical ID of the first VF */
- u32 guaranteed_num_vsi;
+ u32 guar_num_vsi;
};
/* Device wide capabilities */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e71065f9d391..20b94dee0036 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -215,6 +215,15 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
+ /* Disable IOV before freeing resources. This lets any VF drivers
+ * running in the host get themselves cleaned up before we yank
+ * the carpet out from underneath their feet.
+ */
+ if (!pci_vfs_assigned(pf->pdev))
+ pci_disable_sriov(pf->pdev);
+ else
+ dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
+
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) {
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
@@ -228,15 +237,6 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
- /* Disable IOV before freeing resources. This lets any VF drivers
- * running in the host get themselves cleaned up before we yank
- * the carpet out from underneath their feet.
- */
- if (!pci_vfs_assigned(pf->pdev))
- pci_disable_sriov(pf->pdev);
- else
- dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
-
tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 5acf3b743876..c57671068245 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER))
return -EOPNOTSUPP;
if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 5df88ad8ac81..4584ebc9e8fe 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6019,6 +6019,8 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -6147,8 +6149,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
else if (!tso)
igb_tx_csum(tx_ring, first);
- skb_tx_timestamp(skb);
-
if (igb_tx_map(tx_ring, first, hdr_len))
goto cleanup_tx_tstamp;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 2b95dc9c7a6a..fd3071f55bd3 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta)
return 0;
}
-static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
+ u32 lo, hi;
u64 ns;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- ns = timecounter_read(&igb->tc);
+ ptp_read_system_prets(sts);
+ lo = rd32(E1000_SYSTIML);
+ ptp_read_system_postts(sts);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
+
+ spin_unlock_irqrestore(&igb->tmreg_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
+ ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
+ unsigned long flags;
+ u32 lo, hi;
+ u64 ns;
+
+ spin_lock_irqsave(&igb->tmreg_lock, flags);
+
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ lo = rd32(E1000_SYSTIML);
+ hi = rd32(E1000_SYSTIMH);
+
+ ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct igb_adapter *igb = container_of(ptp, struct igb_adapter,
ptp_caps);
+ struct e1000_hw *hw = &igb->hw;
unsigned long flags;
spin_lock_irqsave(&igb->tmreg_lock, flags);
- igb_ptp_read_i210(igb, ts);
+ ptp_read_system_prets(sts);
+ rd32(E1000_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = rd32(E1000_SYSTIML);
+ ts->tv_sec = rd32(E1000_SYSTIMH);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
@@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work)
struct igb_adapter *igb =
container_of(work, struct igb_adapter, ptp_overflow_work.work);
struct timespec64 ts;
+ u64 ns;
- igb->ptp_caps.gettime64(&igb->ptp_caps, &ts);
+ /* Update the timecounter */
+ ns = timecounter_read(&igb->tc);
+ ts = ns_to_timespec64(ns);
pr_debug("igb overflow check at %lld.%09lu\n",
(long long) ts.tv_sec, ts.tv_nsec);
@@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
@@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580;
adapter->ptp_caps.settime64 = igb_ptp_settime_82576;
adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
@@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
- adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210;
+ adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210;
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 163e5838f7c2..a3cd7ac48d4b 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
@@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size)
s32 err;
u16 i;
- WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock));
+ lockdep_assert_held(&hw->mbx_lock);
/* lock the mailbox to prevent pf/vf race condition */
err = e1000_obtain_mbx_lock_vf(hw);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index cdf18a5d9e08..3b00b109b34a 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -5,23 +5,14 @@
#define _IGC_H_
#include <linux/kobject.h>
-
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-
#include <linux/ethtool.h>
-
#include <linux/sctp.h>
#define IGC_ERR(args...) pr_err("igc: " args)
-#define PFX "igc: "
-
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <linux/ptp_clock_kernel.h>
-
#include "igc_hw.h"
/* main */
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 832da609d9a7..df40af759542 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
{
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- u32 ctrl_ext;
if (hw->phy.media_type != igc_media_type_copper) {
phy->type = igc_phy_none;
@@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
- ctrl_ext = rd32(IGC_CTRL_EXT);
-
/* set lan id */
hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >>
IGC_STATUS_FUNC_SHIFT;
@@ -287,8 +284,6 @@ out:
static s32 igc_get_invariants_base(struct igc_hw *hw)
{
struct igc_mac_info *mac = &hw->mac;
- u32 link_mode = 0;
- u32 ctrl_ext = 0;
s32 ret_val = 0;
switch (hw->device_id) {
@@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
hw->phy.media_type = igc_media_type_copper;
- ctrl_ext = rd32(IGC_CTRL_EXT);
- link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK;
-
/* mac initialization and operations */
ret_val = igc_init_mac_params_base(hw);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9d85707e8a81..d002055c0623 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
@@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
- skb_tx_timestamp(skb);
-
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
@@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
/* update pointers within the skb to store the data */
skb_reserve(skb, IGC_SKB_PAD);
- __skb_put(skb, size);
+ __skb_put(skb, size);
/* update buffer offset */
#if (PAGE_SIZE < 8192)
@@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
(va + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
+ rx_buffer->page_offset ^= truesize;
#else
- rx_buffer->page_offset += truesize;
+ rx_buffer->page_offset += truesize;
#endif
} else {
rx_buffer->pagecnt_bias++;
@@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
tx_buffer->next_to_watch,
jiffies,
tx_buffer->next_to_watch->wb.status);
- netif_stop_subqueue(tx_ring->netdev,
- tx_ring->queue_index);
+ netif_stop_subqueue(tx_ring->netdev,
+ tx_ring->queue_index);
/* we are about to reset, no point in enabling stuff */
return true;
@@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
}
/**
- * igc_ioctl - I/O control method
- * @netdev: network interface device structure
- * @ifreq: frequency
- * @cmd: command
- */
-static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- default:
- return -EOPNOTSUPP;
- }
-}
-
-/**
* igc_up - Open the interface and prepare it to handle traffic
* @adapter: board private structure
*/
@@ -3358,7 +3344,7 @@ static int __igc_open(struct net_device *netdev, bool resuming)
goto err_req_irq;
/* Notify the stack of the actual queue counts. */
- netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
goto err_set_queues;
@@ -3445,7 +3431,6 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
- .ndo_do_ioctl = igc_ioctl,
};
/* PCIe configuration access */
@@ -3532,19 +3517,16 @@ static int igc_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igc_hw *hw;
const struct igc_info *ei = igc_info_tbl[ent->driver_data];
- int err, pci_using_dac;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
return err;
- pci_using_dac = 0;
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(64));
- if (!err)
- pci_using_dac = 1;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 732b1e6ecc43..acba067cc15a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE |
+ WAKE_FILTER))
return -EOPNOTSUPP;
if (ixgbe_wol_exclusion(adapter, wol))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index fd1b0546fd67..4d77f42e035c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -4,6 +4,7 @@
#include "ixgbe.h"
#include <net/xfrm.h>
#include <crypto/aead.h>
+#include <linux/if_bridge.h>
#define IXGBE_IPSEC_KEY_BITS 160
static const char aes_gcm_name[] = "rfc4106(gcm(aes))";
@@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
} else {
struct tx_sa tsa;
- if (adapter->num_vfs)
+ if (adapter->num_vfs &&
+ adapter->bridge_mode != BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
/* find the first unused index */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 113b38e0defb..cfb83687c3d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8269,6 +8269,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/*
* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
@@ -8646,8 +8648,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
}
}
- skb_tx_timestamp(skb);
-
#ifdef CONFIG_PCI_IOV
/*
* Use the l2switch_enable flag - would be false if the DMA
@@ -10517,7 +10517,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring)
ixgbe_configure_rx_ring(adapter, rx_ring);
clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state);
- clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
+ if (xdp_ring)
+ clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index b3e0d8bb5cbd..d81a50dc9535 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * ixgbe_ptp_gettime
+ * ixgbe_ptp_gettimex
* @ptp: the ptp clock structure
- * @ts: timespec structure to hold the current time value
+ * @ts: timespec to hold the PHC timestamp
+ * @sts: structure to hold the system time before and after reading the PHC
*
* read the timecounter and return the correct value on ns,
* after converting it into a struct timespec.
*/
-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
+ struct ixgbe_hw *hw = &adapter->hw;
unsigned long flags;
- u64 ns;
+ u64 ns, stamp;
spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->hw_tc);
+
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ /* Upper 32 bits represent billions of cycles, lower 32 bits
+ * represent cycles. However, we use timespec64_to_ns for the
+ * correct math even though the units haven't been corrected
+ * yet.
+ */
+ ptp_read_system_prets(sts);
+ IXGBE_READ_REG(hw, IXGBE_SYSTIMR);
+ ptp_read_system_postts(sts);
+ ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH);
+ stamp = timespec64_to_ns(ts);
+ break;
+ default:
+ ptp_read_system_prets(sts);
+ stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ ptp_read_system_postts(sts);
+ stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
+ break;
+ }
+
+ ns = timecounter_cyc2time(&adapter->hw_tc, stamp);
+
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
{
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
IXGBE_OVERFLOW_PERIOD);
- struct timespec64 ts;
+ unsigned long flags;
if (timeout) {
- ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
+ /* Update the timecounter */
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+ timecounter_read(&adapter->hw_tc);
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
adapter->last_overflow_check = jiffies;
}
}
@@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540;
@@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
@@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
adapter->ptp_caps.pps = 0;
adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550;
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
- adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime;
+ adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex;
adapter->ptp_caps.settime64 = ixgbe_ptp_settime;
adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
adapter->ptp_setup_sdp = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 5e47ede7e832..196b890467b2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4016,6 +4016,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
/* set the timestamp */
first->time_stamp = jiffies;
+ skb_tx_timestamp(skb);
+
/* Force memory writes to complete before letting h/w know there
* are new descriptors to fetch. (Only applicable for weak-ordered
* memory model archs, such as IA-64).
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1e9bcbdc6a90..2f427271a793 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp,
struct ethtool_link_ksettings *cmd)
{
struct net_device *dev = mp->dev;
- u32 supported, advertising;
phy_ethtool_ksettings_get(dev->phydev, cmd);
/*
* The MAC does not support 1000baseT_Half.
*/
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
- supported &= ~SUPPORTED_1000baseT_Half;
- advertising &= ~ADVERTISED_1000baseT_Half;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ cmd->link_modes.advertising);
return 0;
}
@@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
phy->autoneg = AUTONEG_ENABLE;
phy->speed = 0;
phy->duplex = 0;
- phy->advertising = phy->supported | ADVERTISED_Autoneg;
+ linkmode_copy(phy->advertising, phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phy->advertising);
} else {
phy->autoneg = AUTONEG_DISABLE;
- phy->advertising = 0;
+ linkmode_zero(phy->advertising);
phy->speed = speed;
phy->duplex = duplex;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b374f5e..c7cd0081058e 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
skb->ip_summed = re->skb->ip_summed;
skb->csum = re->skb->csum;
skb_copy_hash(skb, re->skb);
- skb->vlan_proto = re->skb->vlan_proto;
- skb->vlan_tci = re->skb->vlan_tci;
+ __vlan_hwaccel_copy_tag(skb, re->skb);
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE);
- re->skb->vlan_proto = 0;
- re->skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(re->skb);
skb_clear_hash(re->skb);
re->skb->ip_summed = CHECKSUM_NONE;
skb_put(skb, length);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 7dbfdac4067a..399f565dd85a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (dev->phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising);
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (flowctrl & FLOW_CTRL_TX)
@@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev)
phy_set_max_speed(dev->phydev, SPEED_1000);
phy_support_asym_pause(dev->phydev);
- dev->phydev->advertising = dev->phydev->supported |
- ADVERTISED_Autoneg;
+ linkmode_copy(dev->phydev->advertising, dev->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ dev->phydev->advertising);
phy_start_aneg(dev->phydev);
of_node_put(np);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index db00bf1c23f5..fd09ba98c0a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -875,7 +875,7 @@ csum_none:
skb->data_len = length;
napi_gro_frags(&cq->napi);
} else {
- skb->vlan_tci = 0;
+ __vlan_hwaccel_clear_tag(skb);
skb_clear_hash(skb);
}
next:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 8a291eb36c64..080ddd1942ec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -80,6 +80,7 @@ config MLXSW_SPECTRUM
depends on IPV6_GRE || IPV6_GRE=n
select GENERIC_ALLOCATOR
select PARMAN
+ select OBJAGG
select MLXFW
default m
---help---
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index 785bf01fe2be..df78d23b3ec3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end)
+ char *key, char *mask)
{
+ unsigned int blocks_count =
+ mlxsw_afk_key_info_blocks_count_get(key_info);
char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE];
const struct mlxsw_afk_element_inst *elinst;
enum mlxsw_afk_element element;
int block_index, i;
- for (i = block_start; i <= block_end; i++) {
+ for (i = 0; i < blocks_count; i++) {
memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE);
@@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
values->storage.mask);
}
- if (key)
- mlxsw_afk->ops->encode_block(block_key, i, key);
- if (mask)
- mlxsw_afk->ops->encode_block(block_mask, i, mask);
+ mlxsw_afk->ops->encode_block(key, i, block_key);
+ mlxsw_afk->ops->encode_block(mask, i, block_mask);
}
}
EXPORT_SYMBOL(mlxsw_afk_encode);
+
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end)
+{
+ int i;
+
+ for (i = block_start; i <= block_end; i++)
+ mlxsw_afk->ops->clear_block(key, i);
+}
+EXPORT_SYMBOL(mlxsw_afk_clear);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index c29c045d826d..bcd264135af7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -188,7 +188,8 @@ struct mlxsw_afk;
struct mlxsw_afk_ops {
const struct mlxsw_afk_block *blocks;
unsigned int blocks_count;
- void (*encode_block)(char *block, int block_index, char *output);
+ void (*encode_block)(char *output, int block_index, char *block);
+ void (*clear_block)(char *output, int block_index);
};
struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks,
@@ -228,6 +229,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values,
void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk,
struct mlxsw_afk_key_info *key_info,
struct mlxsw_afk_element_values *values,
- char *key, char *mask, int block_start, int block_end);
+ char *key, char *mask);
+void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key,
+ int block_start, int block_end);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index db3d2790aeec..be2ffbd19e3a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -2834,8 +2834,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
u32 priority,
const char *tcam_region_info,
const char *key, u8 erp_id,
- bool large_exists, u32 lkey_id,
- u32 action_pointer)
+ u16 delta_start, u8 delta_mask,
+ u8 delta_value, bool large_exists,
+ u32 lkey_id, u32 action_pointer)
{
MLXSW_REG_ZERO(ptce3, payload);
mlxsw_reg_ptce3_v_set(payload, valid);
@@ -2844,6 +2845,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid,
mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info);
mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key);
mlxsw_reg_ptce3_erp_id_set(payload, erp_id);
+ mlxsw_reg_ptce3_delta_start_set(payload, delta_start);
+ mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask);
+ mlxsw_reg_ptce3_delta_value_set(payload, delta_value);
mlxsw_reg_ptce3_large_exists_set(payload, large_exists);
mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id);
mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer);
@@ -4231,8 +4235,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1,
MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2,
+ MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3,
MLXSW_REG_PPCNT_EXT_CNT = 0x5,
+ MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
@@ -4247,6 +4254,7 @@ enum mlxsw_reg_ppcnt_grp {
* 0x2: RFC 2819 Counters
* 0x3: RFC 3635 Counters
* 0x5: Ethernet Extended Counters
+ * 0x6: Ethernet Discard Counters
* 0x8: Link Level Retransmission Counters
* 0x10: Per Priority Counters
* 0x11: Per Traffic Class Counters
@@ -4390,8 +4398,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64);
+/* Ethernet RFC 2863 Counter Group */
+
+/* reg_ppcnt_if_in_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_in_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_if_out_discards
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_discards,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_if_out_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, if_out_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* Ethernet RFC 2819 Counter Group */
+/* reg_ppcnt_ether_stats_undersize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_ether_stats_oversize_pkts
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64);
+
+/* reg_ppcnt_ether_stats_fragments
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
/* reg_ppcnt_ether_stats_pkts64octets
* Access: RO
*/
@@ -4452,6 +4498,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets,
MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64);
+/* Ethernet RFC 3635 Counter Group */
+
+/* reg_ppcnt_dot3stats_fcs_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_dot3stats_symbol_errors
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_dot3control_in_unknown_opcodes
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64);
+
+/* reg_ppcnt_dot3in_pause_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Extended Counter Group Counters */
/* reg_ppcnt_ecn_marked
@@ -4460,6 +4532,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets,
MLXSW_ITEM64(reg, ppcnt, ecn_marked,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+/* Ethernet Discard Counter Group Counters */
+
+/* reg_ppcnt_ingress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+
+/* reg_ppcnt_ingress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
+/* reg_ppcnt_ingress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64);
+
+/* reg_ppcnt_ingress_tag_frame_type
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64);
+
+/* reg_ppcnt_egress_vlan_membership
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64);
+
+/* reg_ppcnt_loopback_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, loopback_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64);
+
+/* reg_ppcnt_egress_general
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_general,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64);
+
+/* reg_ppcnt_egress_hoq
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_hoq,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64);
+
+/* reg_ppcnt_egress_policy_engine
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_policy_engine,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64);
+
+/* reg_ppcnt_ingress_tx_link_down
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64);
+
+/* reg_ppcnt_egress_stp_filter
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_stp_filter,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64);
+
+/* reg_ppcnt_egress_sll
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, egress_sll,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
+
/* Ethernet Per Priority Group Counters */
/* reg_ppcnt_rx_octets
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9bec940330a4..637e2ef76abe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1876,8 +1876,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = {
+ {
+ .str = "if_in_discards",
+ .getter = mlxsw_reg_ppcnt_if_in_discards_get,
+ },
+ {
+ .str = "if_out_discards",
+ .getter = mlxsw_reg_ppcnt_if_out_discards_get,
+ },
+ {
+ .str = "if_out_errors",
+ .getter = mlxsw_reg_ppcnt_if_out_errors_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
{
+ .str = "ether_stats_undersize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get,
+ },
+ {
+ .str = "ether_stats_oversize_pkts",
+ .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get,
+ },
+ {
+ .str = "ether_stats_fragments",
+ .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get,
+ },
+ {
.str = "ether_pkts64octets",
.getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get,
},
@@ -1922,6 +1952,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = {
#define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \
ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = {
+ {
+ .str = "dot3stats_fcs_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get,
+ },
+ {
+ .str = "dot3stats_symbol_errors",
+ .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get,
+ },
+ {
+ .str = "dot3control_in_unknown_opcodes",
+ .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get,
+ },
+ {
+ .str = "dot3in_pause_frames",
+ .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats)
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = {
+ {
+ .str = "discard_ingress_general",
+ .getter = mlxsw_reg_ppcnt_ingress_general_get,
+ },
+ {
+ .str = "discard_ingress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get,
+ },
+ {
+ .str = "discard_ingress_tag_frame_type",
+ .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get,
+ },
+ {
+ .str = "discard_egress_vlan_membership",
+ .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get,
+ },
+ {
+ .str = "discard_loopback_filter",
+ .getter = mlxsw_reg_ppcnt_loopback_filter_get,
+ },
+ {
+ .str = "discard_egress_general",
+ .getter = mlxsw_reg_ppcnt_egress_general_get,
+ },
+ {
+ .str = "discard_egress_hoq",
+ .getter = mlxsw_reg_ppcnt_egress_hoq_get,
+ },
+ {
+ .str = "discard_egress_policy_engine",
+ .getter = mlxsw_reg_ppcnt_egress_policy_engine_get,
+ },
+ {
+ .str = "discard_ingress_tx_link_down",
+ .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get,
+ },
+ {
+ .str = "discard_egress_stp_filter",
+ .getter = mlxsw_reg_ppcnt_egress_stp_filter_get,
+ },
+ {
+ .str = "discard_egress_sll",
+ .getter = mlxsw_reg_ppcnt_egress_sll_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \
+ ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats)
+
static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
{
.str = "rx_octets_prio",
@@ -1974,7 +2080,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
+ MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \
+ MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
@@ -2015,12 +2124,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) {
memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
mlxsw_sp_port_get_prio_strings(&p, i);
@@ -2063,10 +2191,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
*p_hw_stats = mlxsw_sp_port_hw_stats;
*p_len = MLXSW_SP_PORT_HW_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_2863_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_RFC_2819_CNT:
*p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats;
*p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
break;
+ case MLXSW_REG_PPCNT_RFC_3635_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats;
+ *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_DISCARD_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_discard_stats;
+ *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+ break;
case MLXSW_REG_PPCNT_PRIO_CNT:
*p_hw_stats = mlxsw_sp_port_hw_prio_stats;
*p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
@@ -2116,11 +2256,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
data, data_index);
data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+ /* RFC 2863 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN;
+
/* RFC 2819 Counters */
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0,
data, data_index);
data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN;
+ /* RFC 3635 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN;
+
+ /* Discard Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN;
+
/* Per-Priority Counters */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
__mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
index 8ca77f3e8f27..62e6cf4bc16e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c
@@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio
{
struct mlxsw_sp_acl_atcam_region *aregion;
struct mlxsw_sp_acl_atcam_entry *aentry;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- erp = mlxsw_sp_acl_erp_get(aregion, mask, true);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
return 0;
}
@@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio
aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
static const struct mlxsw_sp_acl_ctcam_region_ops
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 2dda028f94db..e7bd8733e58e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -14,8 +14,8 @@
#include "spectrum_acl_tcam.h"
#include "core_acl_flex_keys.h"
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6
-#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0
+#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5
struct mlxsw_sp_acl_atcam_lkey_id_ht_key {
char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */
@@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops {
void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion);
struct mlxsw_sp_acl_atcam_lkey_id *
(*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id);
+ char *enc_key, u8 erp_id);
void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id);
};
@@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = {
static bool
mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry)
{
- return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp);
+ return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask);
}
static int
@@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_generic *region_generic;
@@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion,
static struct mlxsw_sp_acl_atcam_lkey_id *
mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_rule_info *rulei,
- u8 erp_id)
+ char *enc_key, u8 erp_id)
{
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
@@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion,
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key,
- NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START,
- MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END);
+ memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key));
+ mlxsw_afk_clear(afk, ht_key.enc_key,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START,
+ MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END);
ht_key.erp_id = erp_id;
lkey_id = rhashtable_lookup_fast(&region_12kb->lkey_ht, &ht_key,
mlxsw_sp_acl_atcam_lkey_id_ht_params);
@@ -379,7 +378,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_rule_info *rulei)
{
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
u32 kvdl_index, priority;
@@ -389,7 +388,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -398,6 +398,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
aentry->ht_key.enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
refcount_read(&lkey_id->refcnt) != 1, lkey_id->id,
kvdl_index);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
@@ -418,12 +421,17 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id;
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
- u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp);
+ u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask);
+ char *enc_key = aentry->ht_key.enc_key;
char ptce3_pl[MLXSW_REG_PTCE3_LEN];
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
- region->tcam_region_info, aentry->ht_key.enc_key,
- erp_id, refcount_read(&lkey_id->refcnt) != 1,
+ region->tcam_region_info,
+ enc_key, erp_id,
+ aentry->delta_info.start,
+ aentry->delta_info.mask,
+ aentry->delta_info.value,
+ refcount_read(&lkey_id->refcnt) != 1,
lkey_id->id, 0);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl);
aregion->ops->lkey_id_put(aregion, lkey_id);
@@ -438,19 +446,30 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = aregion->region;
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 };
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
- struct mlxsw_sp_acl_erp *erp;
- unsigned int blocks_count;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
int err;
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.enc_key, mask, 0, blocks_count - 1);
-
- erp = mlxsw_sp_acl_erp_get(aregion, mask, false);
- if (IS_ERR(erp))
- return PTR_ERR(erp);
- aentry->erp = erp;
- aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp);
+ aentry->full_enc_key, mask);
+
+ erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
+ if (IS_ERR(erp_mask))
+ return PTR_ERR(erp_mask);
+ aentry->erp_mask = erp_mask;
+ aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
+ memcpy(aentry->ht_key.enc_key, aentry->full_enc_key,
+ sizeof(aentry->ht_key.enc_key));
+
+ /* Compute all needed delta information and clear the delta bits
+ * from the encrypted key.
+ */
+ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
+ aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
+ aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
+ aentry->delta_info.value =
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* We can't insert identical rules into the A-TCAM, so fail and
* let the rule spill into C-TCAM
@@ -472,7 +491,7 @@ err_rule_insert:
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
err_rhashtable_insert:
- mlxsw_sp_acl_erp_put(aregion, erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, erp_mask);
return err;
}
@@ -484,7 +503,7 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry);
rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node,
mlxsw_sp_acl_atcam_entries_ht_params);
- mlxsw_sp_acl_erp_put(aregion, aentry->erp);
+ mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
}
int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8b1d40..f3e834bfea1a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *region = cregion->region;
struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
char ptce2_pl[MLXSW_REG_PTCE2_LEN];
- unsigned int blocks_count;
char *act_set;
u32 priority;
char *mask;
@@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
centry->parman_item.index, priority);
key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl);
mask = mlxsw_reg_ptce2_mask_data(ptce2_pl);
- blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info);
- mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0,
- blocks_count - 1);
+ mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask);
err = cregion->ops->entry_insert(cregion, centry, mask);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index 0a4fd3c8662a..d9a4b7e8434b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -7,7 +7,7 @@
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/list.h>
-#include <linux/rhashtable.h>
+#include <linux/objagg.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
@@ -29,6 +29,8 @@ struct mlxsw_sp_acl_erp_core {
struct mlxsw_sp_acl_erp_key {
char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN];
+#define __MASK_LEN 0x38
+#define __MASK_IDX(i) (__MASK_LEN - (i) - 1)
bool ctcam;
};
@@ -36,10 +38,8 @@ struct mlxsw_sp_acl_erp {
struct mlxsw_sp_acl_erp_key key;
u8 id;
u8 index;
- refcount_t refcnt;
DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
struct list_head list;
- struct rhash_head ht_node;
struct mlxsw_sp_acl_erp_table *erp_table;
};
@@ -53,7 +53,6 @@ struct mlxsw_sp_acl_erp_table {
DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION);
struct list_head atcam_erps_list;
- struct rhashtable erp_ht;
struct mlxsw_sp_acl_erp_core *erp_core;
struct mlxsw_sp_acl_atcam_region *aregion;
const struct mlxsw_sp_acl_erp_table_ops *ops;
@@ -61,12 +60,8 @@ struct mlxsw_sp_acl_erp_table {
unsigned int num_atcam_erps;
unsigned int num_max_atcam_erps;
unsigned int num_ctcam_erps;
-};
-
-static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = {
- .key_len = sizeof(struct mlxsw_sp_acl_erp_key),
- .key_offset = offsetof(struct mlxsw_sp_acl_erp, key),
- .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node),
+ unsigned int num_deltas;
+ struct objagg *objagg;
};
struct mlxsw_sp_acl_erp_table_ops {
@@ -119,16 +114,6 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = {
.erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy,
};
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp)
-{
- return erp->key.ctcam;
-}
-
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp)
-{
- return erp->id;
-}
-
static unsigned int
mlxsw_sp_acl_erp_table_entry_size(const struct mlxsw_sp_acl_erp_table *erp_table)
{
@@ -194,12 +179,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table)
static int
mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
@@ -210,7 +198,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
return err;
@@ -218,12 +206,15 @@ err_master_mask_update:
static int
mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
- const struct mlxsw_sp_acl_erp *erp)
+ struct mlxsw_sp_acl_erp_key *key)
{
+ DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN);
unsigned long bit;
int err;
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ bitmap_from_arr32(mask_bitmap, (u32 *) key->mask,
+ MLXSW_SP_ACL_TCAM_MASK_LEN);
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_clear(bit,
&erp_table->master_mask);
@@ -234,7 +225,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table,
return 0;
err_master_mask_update:
- for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
+ for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN)
mlxsw_sp_acl_erp_master_mask_bit_set(bit,
&erp_table->master_mask);
return err;
@@ -256,26 +247,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table,
goto err_erp_id_get;
memcpy(&erp->key, key, sizeof(*key));
- bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
- MLXSW_SP_ACL_TCAM_MASK_LEN);
list_add(&erp->list, &erp_table->atcam_erps_list);
- refcount_set(&erp->refcnt, 1);
erp_table->num_atcam_erps++;
erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
goto err_master_mask_set;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_insert;
-
return erp;
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
err_master_mask_set:
erp_table->num_atcam_erps--;
list_del(&erp->list);
@@ -290,9 +271,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
erp_table->num_atcam_erps--;
list_del(&erp->list);
mlxsw_sp_acl_erp_id_put(erp_table, erp->id);
@@ -647,9 +626,56 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table)
mlxsw_sp_acl_erp_table_enable(erp_table, false);
}
+static int
+__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *inc_num)
+{
+ int err;
+
+ /* If there are C-TCAM eRP or deltas in use we need to transition
+ * the region to use eRP table, if it is not already done
+ */
+ if (erp_table->ops != &erp_two_masks_ops &&
+ erp_table->ops != &erp_multiple_masks_ops) {
+ err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
+ if (err)
+ return err;
+ }
+
+ /* When C-TCAM or deltas are used, the eRP table must be used */
+ if (erp_table->ops != &erp_multiple_masks_ops)
+ erp_table->ops = &erp_multiple_masks_ops;
+
+ (*inc_num)++;
+
+ return 0;
+}
+
+static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ return __mlxsw_sp_acl_erp_table_other_inc(erp_table,
+ &erp_table->num_deltas);
+}
+
static void
-mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
+__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table,
+ unsigned int *dec_num)
{
+ (*dec_num)--;
+
+ /* If there are no C-TCAM eRP or deltas in use, the state we
+ * transition to depends on the number of A-TCAM eRPs currently
+ * in use.
+ */
+ if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0)
+ return;
+
switch (erp_table->num_atcam_erps) {
case 2:
/* Keep using the eRP table, but correctly set the
@@ -683,9 +709,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table)
}
}
+static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_ctcam_erps);
+}
+
+static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table)
+{
+ __mlxsw_sp_acl_erp_table_other_dec(erp_table,
+ &erp_table->num_deltas);
+}
+
static struct mlxsw_sp_acl_erp *
-__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
+mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
+ struct mlxsw_sp_acl_erp_key *key)
{
struct mlxsw_sp_acl_erp *erp;
int err;
@@ -697,89 +735,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
memcpy(&erp->key, key, sizeof(*key));
bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask,
MLXSW_SP_ACL_TCAM_MASK_LEN);
- refcount_set(&erp->refcnt, 1);
- erp_table->num_ctcam_erps++;
- erp->erp_table = erp_table;
- err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp);
+ err = mlxsw_sp_acl_erp_ctcam_inc(erp_table);
if (err)
- goto err_master_mask_set;
+ goto err_erp_ctcam_inc;
- err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
+ erp->erp_table = erp_table;
+
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key);
if (err)
- goto err_rhashtable_insert;
+ goto err_master_mask_set;
err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table);
if (err)
goto err_erp_region_ctcam_enable;
- /* When C-TCAM is used, the eRP table must be used */
- erp_table->ops = &erp_multiple_masks_ops;
-
return erp;
err_erp_region_ctcam_enable:
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
-err_rhashtable_insert:
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
err_master_mask_set:
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
+err_erp_ctcam_inc:
kfree(erp);
return ERR_PTR(err);
}
-static struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
- struct mlxsw_sp_acl_erp_key *key)
-{
- struct mlxsw_sp_acl_erp *erp;
- int err;
-
- /* There is a special situation where we need to spill rules
- * into the C-TCAM, yet the region is still using a master
- * mask and thus not performing a lookup in the C-TCAM. This
- * can happen when two rules that only differ in priority - and
- * thus sharing the same key - are programmed. In this case
- * we transition the region to use an eRP table
- */
- err = mlxsw_sp_acl_erp_region_table_trans(erp_table);
- if (err)
- return ERR_PTR(err);
-
- erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
- if (IS_ERR(erp)) {
- err = PTR_ERR(erp);
- goto err_erp_create;
- }
-
- return erp;
-
-err_erp_create:
- mlxsw_sp_acl_erp_region_master_mask_trans(erp_table);
- return ERR_PTR(err);
-}
-
static void
mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp)
{
struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table;
mlxsw_sp_acl_erp_region_ctcam_disable(erp_table);
- rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node,
- mlxsw_sp_acl_erp_ht_params);
- mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp);
- erp_table->num_ctcam_erps--;
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key);
+ mlxsw_sp_acl_erp_ctcam_dec(erp_table);
kfree(erp);
-
- /* Once the last C-TCAM eRP was destroyed, the state we
- * transition to depends on the number of A-TCAM eRPs currently
- * in use
- */
- if (erp_table->num_ctcam_erps > 0)
- return;
- mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table);
}
static struct mlxsw_sp_acl_erp *
@@ -790,7 +780,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table,
int err;
if (key->ctcam)
- return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
+ return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key);
/* Expand the eRP table for the new eRP, if needed */
err = mlxsw_sp_acl_erp_table_expand(erp_table);
@@ -838,7 +828,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
mlxsw_sp_acl_erp_index_put(erp_table, erp->index);
mlxsw_sp_acl_erp_generic_destroy(erp);
- if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0)
+ if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 &&
+ erp_table->num_deltas == 0)
erp_table->ops = &erp_two_masks_ops;
}
@@ -940,13 +931,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table,
WARN_ON(1);
}
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam)
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam)
{
- struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
struct mlxsw_sp_acl_erp_key key;
- struct mlxsw_sp_acl_erp *erp;
+ struct objagg_obj *objagg_obj;
/* eRPs are allocated from a shared resource, but currently all
* allocations are done under RTNL.
@@ -955,29 +945,238 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN);
key.ctcam = ctcam;
- erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key,
- mlxsw_sp_acl_erp_ht_params);
- if (erp) {
- refcount_inc(&erp->refcnt);
- return erp;
+ objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key);
+ if (IS_ERR(objagg_obj))
+ return ERR_CAST(objagg_obj);
+ return (struct mlxsw_sp_acl_erp_mask *) objagg_obj;
+}
+
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+
+ ASSERT_RTNL();
+ objagg_obj_put(aregion->erp_table->objagg, objagg_obj);
+}
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj);
+
+ return key->ctcam;
+}
+
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj);
+
+ return erp->id;
+}
+
+struct mlxsw_sp_acl_erp_delta {
+ struct mlxsw_sp_acl_erp_key key;
+ u16 start;
+ u8 mask;
+};
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->start;
+}
+
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta)
+{
+ return delta->mask;
+}
+
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ u16 tmp;
+
+ if (!mask)
+ return 0;
+
+ tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)];
+ if (start / 8 + 1 < __MASK_LEN)
+ tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8;
+ tmp >>= start % 8;
+ tmp &= mask;
+ return tmp;
+}
+
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key)
+{
+ u16 start = delta->start;
+ u8 mask = delta->mask;
+ unsigned char *byte;
+ u16 tmp;
+
+ tmp = mask;
+ tmp <<= start % 8;
+ tmp = ~tmp;
+
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)];
+ *byte &= tmp & 0xff;
+ if (start / 8 + 1 < __MASK_LEN) {
+ byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)];
+ *byte &= (tmp >> 8) & 0xff;
}
+}
+
+static const struct mlxsw_sp_acl_erp_delta
+mlxsw_sp_acl_erp_delta_default = {};
- return erp_table->ops->erp_create(erp_table, &key);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask)
+{
+ struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask;
+ const struct mlxsw_sp_acl_erp_delta *delta;
+
+ delta = objagg_obj_delta_priv(objagg_obj);
+ if (!delta)
+ delta = &mlxsw_sp_acl_erp_delta_default;
+ return delta;
}
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp)
+static int
+mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key,
+ const struct mlxsw_sp_acl_erp_key *key,
+ u16 *delta_start, u8 *delta_mask)
{
+ int offset = 0;
+ int si = -1;
+ u16 pmask;
+ u16 mask;
+ int i;
+
+ /* The difference between 2 masks can be up to 8 consecutive bits. */
+ for (i = 0; i < __MASK_LEN; i++) {
+ if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)])
+ continue;
+ if (si == -1)
+ si = i;
+ else if (si != i - 1)
+ return -EINVAL;
+ }
+ if (si == -1) {
+ /* The masks are the same, this cannot happen.
+ * That means the caller is broken.
+ */
+ WARN_ON(1);
+ *delta_start = 0;
+ *delta_mask = 0;
+ return 0;
+ }
+ pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)];
+ mask = (unsigned char) key->mask[__MASK_IDX(si)];
+ if (si + 1 < __MASK_LEN) {
+ pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8;
+ mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8;
+ }
+
+ if ((pmask ^ mask) & pmask)
+ return -EINVAL;
+ mask &= ~pmask;
+ while (!(mask & (1 << offset)))
+ offset++;
+ while (!(mask & 1))
+ mask >>= 1;
+ if (mask & 0xff00)
+ return -EINVAL;
+
+ *delta_start = si * 8 + offset;
+ *delta_mask = mask;
+
+ return 0;
+}
+
+static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
+ void *obj)
+{
+ struct mlxsw_sp_acl_erp_key *parent_key = parent_obj;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+ struct mlxsw_sp_acl_erp_delta *delta;
+ u16 delta_start;
+ u8 delta_mask;
+ int err;
- ASSERT_RTNL();
+ if (parent_key->ctcam || key->ctcam)
+ return ERR_PTR(-EINVAL);
+ err = mlxsw_sp_acl_erp_delta_fill(parent_key, key,
+ &delta_start, &delta_mask);
+ if (err)
+ return ERR_PTR(-EINVAL);
- if (!refcount_dec_and_test(&erp->refcnt))
- return;
+ delta = kzalloc(sizeof(*delta), GFP_KERNEL);
+ if (!delta)
+ return ERR_PTR(-ENOMEM);
+ delta->start = delta_start;
+ delta->mask = delta_mask;
+
+ err = mlxsw_sp_acl_erp_delta_inc(erp_table);
+ if (err)
+ goto err_erp_delta_inc;
+
+ memcpy(&delta->key, key, sizeof(*key));
+ err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key);
+ if (err)
+ goto err_master_mask_set;
+
+ return delta;
- erp_table->ops->erp_destroy(erp_table, erp);
+err_master_mask_set:
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+err_erp_delta_inc:
+ kfree(delta);
+ return ERR_PTR(err);
}
+static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv)
+{
+ struct mlxsw_sp_acl_erp_delta *delta = delta_priv;
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key);
+ mlxsw_sp_acl_erp_delta_dec(erp_table);
+ kfree(delta);
+}
+
+static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+ struct mlxsw_sp_acl_erp_key *key = obj;
+
+ return erp_table->ops->erp_create(erp_table, key);
+}
+
+static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
+{
+ struct mlxsw_sp_acl_atcam_region *aregion = priv;
+ struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table;
+
+ erp_table->ops->erp_destroy(erp_table, root_priv);
+}
+
+static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
+ .obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
+ .delta_create = mlxsw_sp_acl_erp_delta_create,
+ .delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
+ .root_create = mlxsw_sp_acl_erp_root_create,
+ .root_destroy = mlxsw_sp_acl_erp_root_destroy,
+};
+
static struct mlxsw_sp_acl_erp_table *
mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
{
@@ -988,9 +1187,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
if (!erp_table)
return ERR_PTR(-ENOMEM);
- err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params);
- if (err)
- goto err_rhashtable_init;
+ erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops,
+ aregion);
+ if (IS_ERR(erp_table->objagg)) {
+ err = PTR_ERR(erp_table->objagg);
+ goto err_objagg_create;
+ }
erp_table->erp_core = aregion->atcam->erp_core;
erp_table->ops = &erp_no_mask_ops;
@@ -999,7 +1201,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion)
return erp_table;
-err_rhashtable_init:
+err_objagg_create:
kfree(erp_table);
return ERR_PTR(err);
}
@@ -1008,7 +1210,7 @@ static void
mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table)
{
WARN_ON(!list_empty(&erp_table->atcam_erps_list));
- rhashtable_destroy(&erp_table->erp_ht);
+ objagg_destroy(erp_table->objagg);
kfree(erp_table);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index d409b09ba8df..2e1e8c4b3922 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = {
#define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16
-static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
- char *output)
+static void mlxsw_sp1_afk_encode_block(char *output, int block_index,
+ char *block)
{
unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
char *output_indexed = output + offset;
@@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index,
memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
}
+static void mlxsw_sp1_afk_clear_block(char *output, int block_index)
+{
+ unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE;
+ char *output_indexed = output + offset;
+
+ memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE);
+}
+
const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
.blocks = mlxsw_sp1_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks),
.encode_block = mlxsw_sp1_afk_encode_block,
+ .clear_block = mlxsw_sp1_afk_clear_block,
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
@@ -263,10 +272,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = {
MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12),
};
-static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
- char *output)
+static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index,
+ u64 block_value)
{
- u64 block_value = mlxsw_sp2_afk_block_value_get(block);
const struct mlxsw_sp2_afk_block_layout *block_layout;
if (WARN_ON(block_index < 0 ||
@@ -278,8 +286,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index,
&block_layout->item, 0, block_value);
}
+static void mlxsw_sp2_afk_encode_block(char *output, int block_index,
+ char *block)
+{
+ u64 block_value = mlxsw_sp2_afk_block_value_get(block);
+
+ __mlxsw_sp2_afk_block_value_set(output, block_index, block_value);
+}
+
+static void mlxsw_sp2_afk_clear_block(char *output, int block_index)
+{
+ __mlxsw_sp2_afk_block_value_set(output, block_index, 0);
+}
+
const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = {
.blocks = mlxsw_sp2_afk_blocks,
.blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks),
.encode_block = mlxsw_sp2_afk_encode_block,
+ .clear_block = mlxsw_sp2_afk_clear_block,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 219a4e26c332..9a73759d901f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -154,7 +154,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
+ * minus delta bits.
+ */
u8 erp_id;
};
@@ -165,9 +167,15 @@ struct mlxsw_sp_acl_atcam_chunk {
struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
+ char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */
+ struct {
+ u16 start;
+ u8 mask;
+ u8 value;
+ } delta_info;
struct mlxsw_sp_acl_ctcam_entry centry;
struct mlxsw_sp_acl_atcam_lkey_id *lkey_id;
- struct mlxsw_sp_acl_erp *erp;
+ struct mlxsw_sp_acl_erp_mask *erp_mask;
};
static inline struct mlxsw_sp_acl_atcam_region *
@@ -209,15 +217,27 @@ int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_atcam *atcam);
-struct mlxsw_sp_acl_erp;
-
-bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp);
-u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp);
-struct mlxsw_sp_acl_erp *
-mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion,
- const char *mask, bool ctcam);
-void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion,
- struct mlxsw_sp_acl_erp *erp);
+struct mlxsw_sp_acl_erp_delta;
+
+u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta);
+u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta,
+ const char *enc_key);
+
+struct mlxsw_sp_acl_erp_mask;
+
+bool
+mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+const struct mlxsw_sp_acl_erp_delta *
+mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask);
+struct mlxsw_sp_acl_erp_mask *
+mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion,
+ const char *mask, bool ctcam);
+void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion,
+ struct mlxsw_sp_acl_erp_mask *erp_mask);
int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion);
void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion);
int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile
index 4afb10375397..190e8b56a41f 100644
--- a/drivers/net/ethernet/netronome/nfp/Makefile
+++ b/drivers/net/ethernet/netronome/nfp/Makefile
@@ -57,6 +57,7 @@ endif
ifeq ($(CONFIG_NFP_APP_ABM_NIC),y)
nfp-objs += \
abm/ctrl.o \
+ abm/qdisc.o \
abm/main.o
endif
diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
index 3c661f422688..1629b07f727b 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c
@@ -50,56 +50,37 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
return 0;
}
-static int
-nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym,
- unsigned int stride, unsigned int offset, bool is_u64,
- u64 *res)
-{
- u64 val, sum = 0;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i,
- is_u64, &val);
- if (err)
- return err;
- sum += val;
- }
-
- *res = sum;
- return 0;
-}
-
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val)
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val)
{
- struct nfp_cpp *cpp = alink->abm->app->cpp;
+ struct nfp_cpp *cpp = abm->app->cpp;
u64 sym_offset;
int err;
- sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
- err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0,
- sym_offset, val);
+ __clear_bit(id, abm->threshold_undef);
+ if (abm->thresholds[id] == val)
+ return 0;
+
+ sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS;
+ err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val);
if (err) {
- nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n",
- alink->id, i);
+ nfp_err(cpp,
+ "RED offload setting level failed on subqueue %d\n",
+ id);
return err;
}
+ abm->thresholds[id] = val;
return 0;
}
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val)
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
+ u32 val)
{
- int i, err;
+ unsigned int threshold;
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- err = nfp_abm_ctrl_set_q_lvl(alink, i, val);
- if (err)
- return err;
- }
+ threshold = alink->queue_base + queue;
- return 0;
+ return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val);
}
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i)
@@ -153,42 +134,6 @@ int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
i, true, &stats->overlimits);
}
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats)
-{
- u64 pkts = 0, bytes = 0;
- int i, err;
-
- for (i = 0; i < alink->vnic->max_rx_rings; i++) {
- pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i));
- bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8);
- }
- stats->tx_pkts = pkts;
- stats->tx_bytes = bytes;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES,
- false, &stats->backlog_bytes);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls,
- NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS,
- false, &stats->backlog_pkts);
- if (err)
- return err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &stats->drops);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &stats->overlimits);
-}
-
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats)
{
@@ -205,22 +150,6 @@ int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
i, true, &xstats->ecn_marked);
}
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats)
-{
- int err;
-
- err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP,
- true, &xstats->pdrop);
- if (err)
- return err;
-
- return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats,
- NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN,
- true, &xstats->ecn_marked);
-}
-
int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm)
{
return nfp_mbox_cmd(abm->app->pf, NFP_MBOX_PCIE_ABM_ENABLE,
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index c0830c0c2c3f..a5732d3bd1b7 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -2,14 +2,13 @@
/* Copyright (C) 2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/etherdevice.h>
#include <linux/lockdep.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <linux/rtnetlink.h>
#include <linux/slab.h>
-#include <net/pkt_cls.h>
-#include <net/pkt_sched.h>
-#include <net/red.h>
#include "../nfpcore/nfp.h"
#include "../nfpcore/nfp_cpp.h"
@@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id)
}
static int
-__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs, u32 init_val)
-{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
- int ret;
-
- ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val);
- memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs);
-
- alink->parent = handle;
- alink->num_qdiscs = qs;
- port->tc_offload_cnt = qs;
-
- return ret;
-}
-
-static void
-nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle, unsigned int qs)
-{
- __nfp_abm_reset_root(netdev, alink, handle, qs, ~0);
-}
-
-static int
-nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- unsigned int i = TC_H_MIN(opt->parent) - 1;
-
- if (opt->parent == TC_H_ROOT)
- i = 0;
- else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent))
- i = TC_H_MIN(opt->parent) - 1;
- else
- return -EOPNOTSUPP;
-
- if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle)
- return -EOPNOTSUPP;
-
- return i;
-}
-
-static void
-nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
- u32 handle)
-{
- unsigned int i;
-
- for (i = 0; i < alink->num_qdiscs; i++)
- if (handle == alink->qdiscs[i].handle)
- break;
- if (i == alink->num_qdiscs)
- return;
-
- if (alink->parent == TC_H_ROOT) {
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- } else {
- nfp_abm_ctrl_set_q_lvl(alink, i, ~0);
- memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs));
- }
-}
-
-static int
-nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- bool existing;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- existing = i >= 0;
-
- if (opt->set.min != opt->set.max || !opt->set.is_ecn) {
- nfp_warn(alink->abm->app->cpp,
- "RED offload failed - unsupported parameters\n");
- err = -EINVAL;
- goto err_destroy;
- }
-
- if (existing) {
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min);
- else
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- if (err)
- goto err_destroy;
- return 0;
- }
-
- if (opt->parent == TC_H_ROOT) {
- i = 0;
- err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1,
- opt->set.min);
- } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) {
- i = TC_H_MIN(opt->parent) - 1;
- err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min);
- } else {
- return -EINVAL;
- }
- /* Set the handle to try full clean up, in case IO failed */
- alink->qdiscs[i].handle = opt->handle;
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i,
- &alink->qdiscs[i].stats);
- if (err)
- goto err_destroy;
-
- if (opt->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink,
- &alink->qdiscs[i].xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i,
- &alink->qdiscs[i].xstats);
- if (err)
- goto err_destroy;
-
- alink->qdiscs[i].stats.backlog_pkts = 0;
- alink->qdiscs[i].stats.backlog_bytes = 0;
-
- return 0;
-err_destroy:
- /* If the qdisc keeps on living, but we can't offload undo changes */
- if (existing) {
- opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts;
- opt->set.qstats->backlog -=
- alink->qdiscs[i].stats.backlog_bytes;
- }
- nfp_abm_red_destroy(netdev, alink, opt->handle);
-
- return err;
-}
-
-static void
-nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old,
- struct tc_qopt_offload_stats *stats)
-{
- _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes,
- new->tx_pkts - old->tx_pkts);
- stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts;
- stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes;
- stats->qstats->overlimits += new->overlimits - old->overlimits;
- stats->qstats->drops += new->drops - old->drops;
-}
-
-static int
-nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_stats *prev_stats;
- struct nfp_alink_stats stats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_stats = &alink->qdiscs[i].stats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_stats(alink, &stats);
- else
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, prev_stats, &opt->stats);
-
- *prev_stats = stats;
-
- return 0;
-}
-
-static int
-nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
-{
- struct nfp_alink_xstats *prev_xstats;
- struct nfp_alink_xstats xstats;
- int i, err;
-
- i = nfp_abm_red_find(alink, opt);
- if (i < 0)
- return i;
- prev_xstats = &alink->qdiscs[i].xstats;
-
- if (alink->parent == TC_H_ROOT)
- err = nfp_abm_ctrl_read_xstats(alink, &xstats);
- else
- err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats);
- if (err)
- return err;
-
- opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked;
- opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop;
-
- *prev_xstats = xstats;
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_red_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_RED_REPLACE:
- return nfp_abm_red_replace(netdev, alink, opt);
- case TC_RED_DESTROY:
- nfp_abm_red_destroy(netdev, alink, opt->handle);
- return 0;
- case TC_RED_STATS:
- return nfp_abm_red_stats(alink, opt);
- case TC_RED_XSTATS:
- return nfp_abm_red_xstats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
-nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt)
-{
- struct nfp_alink_stats stats;
- unsigned int i;
- int err;
-
- for (i = 0; i < alink->num_qdiscs; i++) {
- if (alink->qdiscs[i].handle == TC_H_UNSPEC)
- continue;
-
- err = nfp_abm_ctrl_read_q_stats(alink, i, &stats);
- if (err)
- return err;
-
- nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats,
- &opt->stats);
- }
-
- return 0;
-}
-
-static int
-nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
- struct tc_mq_qopt_offload *opt)
-{
- switch (opt->command) {
- case TC_MQ_CREATE:
- nfp_abm_reset_root(netdev, alink, opt->handle,
- alink->total_queues);
- return 0;
- case TC_MQ_DESTROY:
- if (opt->handle == alink->parent)
- nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0);
- return 0;
- case TC_MQ_STATS:
- return nfp_abm_mq_stats(alink, opt);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static int
nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data)
{
@@ -302,6 +38,8 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
switch (type) {
+ case TC_SETUP_ROOT_QDISC:
+ return nfp_abm_setup_root(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_MQ:
return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data);
case TC_SETUP_QDISC_RED:
@@ -573,31 +311,23 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
alink->abm = abm;
alink->vnic = nn;
alink->id = id;
- alink->parent = TC_H_ROOT;
alink->total_queues = alink->vnic->max_rx_rings;
- alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs),
- GFP_KERNEL);
- if (!alink->qdiscs) {
- err = -ENOMEM;
- goto err_free_alink;
- }
/* This is a multi-host app, make sure MAC/PHY is up, but don't
* make the MAC/PHY state follow the state of any of the ports.
*/
err = nfp_eth_set_configured(app->cpp, eth_port->index, true);
if (err < 0)
- goto err_free_qdiscs;
+ goto err_free_alink;
netif_keep_dst(nn->dp.netdev);
nfp_abm_vnic_set_mac(app->pf, abm, nn, id);
nfp_abm_ctrl_read_params(alink);
+ INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL);
return 0;
-err_free_qdiscs:
- kvfree(alink->qdiscs);
err_free_alink:
kfree(alink);
return err;
@@ -608,7 +338,7 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn)
struct nfp_abm_link *alink = nn->app_priv;
nfp_abm_kill_reprs(alink->abm, alink);
- kvfree(alink->qdiscs);
+ WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n");
kfree(alink);
}
@@ -664,6 +394,7 @@ static int nfp_abm_init(struct nfp_app *app)
struct nfp_pf *pf = app->pf;
struct nfp_reprs *reprs;
struct nfp_abm *abm;
+ unsigned int i;
int err;
if (!pf->eth_tbl) {
@@ -690,15 +421,28 @@ static int nfp_abm_init(struct nfp_app *app)
if (err)
goto err_free_abm;
+ err = -ENOMEM;
+ abm->num_thresholds = NFP_NET_MAX_RX_RINGS;
+ abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL);
+ if (!abm->threshold_undef)
+ goto err_free_abm;
+
+ abm->thresholds = kvcalloc(abm->num_thresholds,
+ sizeof(*abm->thresholds), GFP_KERNEL);
+ if (!abm->thresholds)
+ goto err_free_thresh_umap;
+ for (i = 0; i < NFP_NET_MAX_RX_RINGS; i++)
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
/* We start in legacy mode, make sure advanced queuing is disabled */
err = nfp_abm_ctrl_qm_disable(abm);
if (err)
- goto err_free_abm;
+ goto err_free_thresh;
err = -ENOMEM;
reprs = nfp_reprs_alloc(pf->max_data_vnics);
if (!reprs)
- goto err_free_abm;
+ goto err_free_thresh;
RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs);
reprs = nfp_reprs_alloc(pf->max_data_vnics);
@@ -710,6 +454,10 @@ static int nfp_abm_init(struct nfp_app *app)
err_free_phys:
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+err_free_thresh:
+ kvfree(abm->thresholds);
+err_free_thresh_umap:
+ bitmap_free(abm->threshold_undef);
err_free_abm:
kfree(abm);
app->priv = NULL;
@@ -723,6 +471,8 @@ static void nfp_abm_clean(struct nfp_app *app)
nfp_abm_eswitch_clean_up(abm);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
+ bitmap_free(abm->threshold_undef);
+ kvfree(abm->thresholds);
kfree(abm);
app->priv = NULL;
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h
index f907b7d98917..240e2c8683fe 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.h
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.h
@@ -4,7 +4,17 @@
#ifndef __NFP_ABM_H__
#define __NFP_ABM_H__ 1
+#include <linux/bits.h>
+#include <linux/radix-tree.h>
#include <net/devlink.h>
+#include <net/pkt_cls.h>
+
+/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz;
+ * 2.5ms / 400Hz seems more than sufficient for stats resolution.
+ */
+#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */
+
+#define NFP_ABM_LVL_INFINITY S32_MAX
struct nfp_app;
struct nfp_net;
@@ -16,6 +26,11 @@ struct nfp_net;
* struct nfp_abm - ABM NIC app structure
* @app: back pointer to nfp_app
* @pf_id: ID of our PF link
+ *
+ * @thresholds: current threshold configuration
+ * @threshold_undef: bitmap of thresholds which have not been set
+ * @num_thresholds: number of @thresholds and bits in @threshold_undef
+ *
* @eswitch_mode: devlink eswitch mode, advanced functions only visible
* in switchdev mode
* @q_lvls: queue level control area
@@ -24,6 +39,11 @@ struct nfp_net;
struct nfp_abm {
struct nfp_app *app;
unsigned int pf_id;
+
+ u32 *thresholds;
+ unsigned long *threshold_undef;
+ size_t num_thresholds;
+
enum devlink_eswitch_mode eswitch_mode;
const struct nfp_rtsym *q_lvls;
const struct nfp_rtsym *qm_stats;
@@ -57,16 +77,67 @@ struct nfp_alink_xstats {
u64 pdrop;
};
+enum nfp_qdisc_type {
+ NFP_QDISC_NONE = 0,
+ NFP_QDISC_MQ,
+ NFP_QDISC_RED,
+};
+
+#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL)
+
/**
- * struct nfp_red_qdisc - representation of single RED Qdisc
- * @handle: handle of currently offloaded RED Qdisc
- * @stats: statistics from last refresh
- * @xstats: base of extended statistics
+ * struct nfp_qdisc - tracked TC Qdisc
+ * @netdev: netdev on which Qdisc was created
+ * @type: Qdisc type
+ * @handle: handle of this Qdisc
+ * @parent_handle: handle of the parent (unreliable if Qdisc was grafted)
+ * @use_cnt: number of attachment points in the hierarchy
+ * @num_children: current size of the @children array
+ * @children: pointers to children
+ *
+ * @params_ok: parameters of this Qdisc are OK for offload
+ * @offload_mark: offload refresh state - selected for offload
+ * @offloaded: Qdisc is currently offloaded to the HW
+ *
+ * @mq: MQ Qdisc specific parameters and state
+ * @mq.stats: current stats of the MQ Qdisc
+ * @mq.prev_stats: previously reported @mq.stats
+ *
+ * @red: RED Qdisc specific parameters and state
+ * @red.threshold: ECN marking threshold
+ * @red.stats: current stats of the RED Qdisc
+ * @red.prev_stats: previously reported @red.stats
+ * @red.xstats: extended stats for RED - current
+ * @red.prev_xstats: extended stats for RED - previously reported
*/
-struct nfp_red_qdisc {
+struct nfp_qdisc {
+ struct net_device *netdev;
+ enum nfp_qdisc_type type;
u32 handle;
- struct nfp_alink_stats stats;
- struct nfp_alink_xstats xstats;
+ u32 parent_handle;
+ unsigned int use_cnt;
+ unsigned int num_children;
+ struct nfp_qdisc **children;
+
+ bool params_ok;
+ bool offload_mark;
+ bool offloaded;
+
+ union {
+ /* NFP_QDISC_MQ */
+ struct {
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ } mq;
+ /* TC_SETUP_QDISC_RED */
+ struct {
+ u32 threshold;
+ struct nfp_alink_stats stats;
+ struct nfp_alink_stats prev_stats;
+ struct nfp_alink_xstats xstats;
+ struct nfp_alink_xstats prev_xstats;
+ } red;
+ };
};
/**
@@ -76,9 +147,11 @@ struct nfp_red_qdisc {
* @id: id of the data vNIC
* @queue_base: id of base to host queue within PCIe (not QC idx)
* @total_queues: number of PF queues
- * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT
- * @num_qdiscs: number of currently used qdiscs
- * @qdiscs: array of qdiscs
+ *
+ * @last_stats_update: ktime of last stats update
+ *
+ * @root_qdisc: pointer to the current root of the Qdisc hierarchy
+ * @qdiscs: all qdiscs recorded by major part of the handle
*/
struct nfp_abm_link {
struct nfp_abm *abm;
@@ -86,22 +159,28 @@ struct nfp_abm_link {
unsigned int id;
unsigned int queue_base;
unsigned int total_queues;
- u32 parent;
- unsigned int num_qdiscs;
- struct nfp_red_qdisc *qdiscs;
+
+ u64 last_stats_update;
+
+ struct nfp_qdisc *root_qdisc;
+ struct radix_tree_root qdiscs;
};
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink);
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt);
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt);
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt);
+
void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink);
int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm);
-int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val);
-int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i,
+int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val);
+int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int queue,
u32 val);
-int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink,
- struct nfp_alink_stats *stats);
int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_stats *stats);
-int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink,
- struct nfp_alink_xstats *xstats);
int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i,
struct nfp_alink_xstats *xstats);
u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i);
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
new file mode 100644
index 000000000000..16c4afe3a37f
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -0,0 +1,663 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+/* Copyright (C) 2018 Netronome Systems, Inc. */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/red.h>
+
+#include "../nfpcore/nfp_cpp.h"
+#include "../nfp_app.h"
+#include "../nfp_main.h"
+#include "../nfp_net.h"
+#include "../nfp_port.h"
+#include "main.h"
+
+static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
+{
+ return qdisc->type == NFP_QDISC_RED;
+}
+
+static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
+{
+ return qdisc->children[id] &&
+ qdisc->children[id] != NFP_QDISC_UNTRACKED;
+}
+
+static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
+{
+ return rtnl_dereference(*slot);
+}
+
+static void
+nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
+ struct nfp_alink_stats *child)
+{
+ parent->tx_pkts += child->tx_pkts;
+ parent->tx_bytes += child->tx_bytes;
+ parent->backlog_pkts += child->backlog_pkts;
+ parent->backlog_bytes += child->backlog_bytes;
+ parent->overlimits += child->overlimits;
+ parent->drops += child->drops;
+}
+
+static void
+nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+ int err;
+
+ if (!qdisc->offloaded)
+ return;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, queue, &qdisc->red.stats);
+ if (err)
+ nfp_err(cpp, "RED stats (%d) read failed with error %d\n",
+ queue, err);
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, queue, &qdisc->red.xstats);
+ if (err)
+ nfp_err(cpp, "RED xstats (%d) read failed with error %d\n",
+ queue, err);
+}
+
+static void
+nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ if (qdisc->type != NFP_QDISC_MQ)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i))
+ nfp_abm_stats_update_red(alink, qdisc->children[i], i);
+}
+
+static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
+{
+ alink->last_stats_update = time_now;
+ if (alink->root_qdisc)
+ nfp_abm_stats_update_mq(alink, alink->root_qdisc);
+}
+
+static void nfp_abm_stats_update(struct nfp_abm_link *alink)
+{
+ u64 now;
+
+ /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
+ * of all their leafs, so we would read the same stat multiple times
+ * for every dump.
+ */
+ now = ktime_get();
+ if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
+ return;
+
+ __nfp_abm_stats_update(alink, now);
+}
+
+static void
+nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
+ unsigned int start, unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++)
+ if (nfp_abm_qdisc_child_valid(qdisc, i)) {
+ qdisc->children[i]->use_cnt--;
+ qdisc->children[i] = NULL;
+ }
+}
+
+static void
+nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ /* Don't complain when qdisc is getting unlinked */
+ if (qdisc->use_cnt)
+ nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
+ qdisc->handle);
+
+ if (!nfp_abm_qdisc_is_red(qdisc))
+ return;
+
+ qdisc->red.stats.backlog_pkts = 0;
+ qdisc->red.stats.backlog_bytes = 0;
+}
+
+static int
+__nfp_abm_stats_init(struct nfp_abm_link *alink,
+ unsigned int queue, struct nfp_alink_stats *prev_stats,
+ struct nfp_alink_xstats *prev_xstats)
+{
+ u64 backlog_pkts, backlog_bytes;
+ int err;
+
+ /* Don't touch the backlog, backlog can only be reset after it has
+ * been reported back to the tc qdisc stats.
+ */
+ backlog_pkts = prev_stats->backlog_pkts;
+ backlog_bytes = prev_stats->backlog_bytes;
+
+ err = nfp_abm_ctrl_read_q_stats(alink, queue, prev_stats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED stats init (%d) failed with error %d\n",
+ queue, err);
+ return err;
+ }
+
+ err = nfp_abm_ctrl_read_q_xstats(alink, queue, prev_xstats);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "RED xstats init (%d) failed with error %d\n",
+ queue, err);
+ return err;
+ }
+
+ prev_stats->backlog_pkts = backlog_pkts;
+ prev_stats->backlog_bytes = backlog_bytes;
+ return 0;
+}
+
+static int
+nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ return __nfp_abm_stats_init(alink, queue,
+ &qdisc->red.prev_stats,
+ &qdisc->red.prev_xstats);
+}
+
+static void
+nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
+ unsigned int queue)
+{
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_RED &&
+ qdisc->params_ok &&
+ qdisc->use_cnt == 1 &&
+ !qdisc->children[0];
+
+ /* If we are starting offload init prev_stats */
+ if (qdisc->offload_mark && !qdisc->offloaded)
+ if (nfp_abm_stats_init(alink, qdisc, queue))
+ qdisc->offload_mark = false;
+
+ if (!qdisc->offload_mark)
+ return;
+
+ nfp_abm_ctrl_set_q_lvl(alink, queue, qdisc->red.threshold);
+}
+
+static void
+nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
+{
+ unsigned int i;
+
+ qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
+ if (!qdisc->offload_mark)
+ return;
+
+ for (i = 0; i < alink->total_queues; i++) {
+ struct nfp_qdisc *child = qdisc->children[i];
+
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ nfp_abm_offload_compile_red(alink, child, i);
+ }
+}
+
+void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
+{
+ struct nfp_abm *abm = alink->abm;
+ struct radix_tree_iter iter;
+ struct nfp_qdisc *qdisc;
+ void __rcu **slot;
+ size_t i;
+
+ /* Mark all thresholds as unconfigured */
+ __bitmap_set(abm->threshold_undef,
+ alink->queue_base, alink->total_queues);
+
+ /* Clear offload marks */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ qdisc->offload_mark = false;
+ }
+
+ if (alink->root_qdisc)
+ nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
+
+ /* Refresh offload status */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
+ if (!qdisc->offload_mark && qdisc->offloaded)
+ nfp_abm_qdisc_offload_stop(alink, qdisc);
+ qdisc->offloaded = qdisc->offload_mark;
+ }
+
+ /* Reset the unconfigured thresholds */
+ for (i = 0; i < abm->num_thresholds; i++)
+ if (test_bit(i, abm->threshold_undef))
+ __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
+
+ __nfp_abm_stats_update(alink, ktime_get());
+}
+
+static void
+nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct radix_tree_iter iter;
+ unsigned int mq_refs = 0;
+ void __rcu **slot;
+
+ if (!qdisc->use_cnt)
+ return;
+ /* MQ doesn't notify well on destruction, we need special handling of
+ * MQ's children.
+ */
+ if (qdisc->type == NFP_QDISC_MQ &&
+ qdisc == alink->root_qdisc &&
+ netdev->reg_state == NETREG_UNREGISTERING)
+ return;
+
+ /* Count refs held by MQ instances and clear pointers */
+ radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
+ struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
+ unsigned int i;
+
+ if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
+ continue;
+ for (i = 0; i < mq->num_children; i++)
+ if (mq->children[i] == qdisc) {
+ mq->children[i] = NULL;
+ mq_refs++;
+ }
+ }
+
+ WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
+ qdisc->use_cnt, mq_refs);
+}
+
+static void
+nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct nfp_qdisc *qdisc)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+
+ if (!qdisc)
+ return;
+ nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
+ WARN_ON(radix_tree_delete(&alink->qdiscs,
+ TC_H_MAJ(qdisc->handle)) != qdisc);
+
+ kfree(qdisc->children);
+ kfree(qdisc);
+
+ port->tc_offload_cnt--;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children)
+{
+ struct nfp_port *port = nfp_port_from_netdev(netdev);
+ struct nfp_qdisc *qdisc;
+ int err;
+
+ qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
+ if (!qdisc)
+ return NULL;
+
+ qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
+ if (!qdisc->children)
+ goto err_free_qdisc;
+
+ qdisc->netdev = netdev;
+ qdisc->type = type;
+ qdisc->parent_handle = parent_handle;
+ qdisc->handle = handle;
+ qdisc->num_children = children;
+
+ err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
+ if (err) {
+ nfp_err(alink->abm->app->cpp,
+ "Qdisc insertion into radix tree failed: %d\n", err);
+ goto err_free_child_tbl;
+ }
+
+ port->tc_offload_cnt++;
+ return qdisc;
+
+err_free_child_tbl:
+ kfree(qdisc->children);
+err_free_qdisc:
+ kfree(qdisc);
+ return NULL;
+}
+
+static struct nfp_qdisc *
+nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
+{
+ return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
+}
+
+static int
+nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
+ unsigned int children, struct nfp_qdisc **qdisc)
+{
+ *qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (*qdisc) {
+ if (WARN_ON((*qdisc)->type != type))
+ return -EINVAL;
+ return 1;
+ }
+
+ *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
+ children);
+ return *qdisc ? 0 : -ENOMEM;
+}
+
+static void
+nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
+ u32 handle)
+{
+ struct nfp_qdisc *qdisc;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return;
+
+ /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
+ if (alink->root_qdisc == qdisc)
+ qdisc->use_cnt--;
+
+ nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
+ nfp_abm_qdisc_free(netdev, alink, qdisc);
+
+ if (alink->root_qdisc == qdisc) {
+ alink->root_qdisc = NULL;
+ /* Only root change matters, other changes are acted upon on
+ * the graft notification.
+ */
+ nfp_abm_qdisc_offload_update(alink);
+ }
+}
+
+static int
+nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
+ unsigned int id)
+{
+ struct nfp_qdisc *parent, *child;
+
+ parent = nfp_abm_qdisc_find(alink, handle);
+ if (!parent)
+ return 0;
+
+ if (WARN(id >= parent->num_children,
+ "graft child out of bound %d >= %d\n",
+ id, parent->num_children))
+ return -EINVAL;
+
+ nfp_abm_qdisc_unlink_children(parent, id, id + 1);
+
+ child = nfp_abm_qdisc_find(alink, child_handle);
+ if (child)
+ child->use_cnt++;
+ else
+ child = NFP_QDISC_UNTRACKED;
+ parent->children[id] = child;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+static void
+nfp_abm_stats_calculate(struct nfp_alink_stats *new,
+ struct nfp_alink_stats *old,
+ struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_queue *qstats)
+{
+ _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
+ new->tx_pkts - old->tx_pkts);
+ qstats->qlen += new->backlog_pkts - old->backlog_pkts;
+ qstats->backlog += new->backlog_bytes - old->backlog_bytes;
+ qstats->overlimits += new->overlimits - old->overlimits;
+ qstats->drops += new->drops - old->drops;
+}
+
+static void
+nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
+ struct nfp_alink_xstats *old,
+ struct red_stats *stats)
+{
+ stats->forced_mark += new->ecn_marked - old->ecn_marked;
+ stats->pdrop += new->pdrop - old->pdrop;
+}
+
+static int
+nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (!qdisc || !qdisc->offloaded)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_red_calculate(&qdisc->red.xstats,
+ &qdisc->red.prev_xstats,
+ opt->xstats);
+ qdisc->red.prev_xstats = qdisc->red.xstats;
+ return 0;
+}
+
+static int
+nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc;
+
+ nfp_abm_stats_update(alink);
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+ /* If the qdisc offload has stopped we may need to adjust the backlog
+ * counters back so carry on even if qdisc is not currently offloaded.
+ */
+
+ nfp_abm_stats_calculate(&qdisc->red.stats,
+ &qdisc->red.prev_stats,
+ stats->bstats, stats->qstats);
+ qdisc->red.prev_stats = qdisc->red.stats;
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+static bool
+nfp_abm_red_check_params(struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_cpp *cpp = alink->abm->app->cpp;
+
+ if (!opt->set.is_ecn) {
+ nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.is_harddrop) {
+ nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min != opt->set.max) {
+ nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
+ opt->parent, opt->handle);
+ return false;
+ }
+ if (opt->set.min > NFP_ABM_LVL_INFINITY) {
+ nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
+ opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
+ opt->handle);
+ return false;
+ }
+
+ return true;
+}
+
+static int
+nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
+ opt->handle, 1, &qdisc);
+ if (ret < 0)
+ return ret;
+
+ /* If limit != 0 child gets reset */
+ if (opt->set.limit) {
+ if (nfp_abm_qdisc_child_valid(qdisc, 0))
+ qdisc->children[0]->use_cnt--;
+ qdisc->children[0] = NULL;
+ } else {
+ /* Qdisc was just allocated without a limit will use noop_qdisc,
+ * i.e. a block hole.
+ */
+ if (!ret)
+ qdisc->children[0] = NFP_QDISC_UNTRACKED;
+ }
+
+ qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
+ if (qdisc->params_ok)
+ qdisc->red.threshold = opt->set.min;
+
+ if (qdisc->use_cnt == 1)
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
+
+int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_red_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_RED_REPLACE:
+ return nfp_abm_red_replace(netdev, alink, opt);
+ case TC_RED_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_RED_STATS:
+ return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
+ case TC_RED_XSTATS:
+ return nfp_abm_red_xstats(alink, opt);
+ case TC_RED_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->child_handle, 0);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ struct nfp_qdisc *qdisc;
+ int ret;
+
+ ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
+ TC_H_ROOT, opt->handle, alink->total_queues,
+ &qdisc);
+ if (ret < 0)
+ return ret;
+
+ qdisc->params_ok = true;
+ qdisc->offloaded = true;
+ nfp_abm_qdisc_offload_update(alink);
+ return 0;
+}
+
+static int
+nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
+ struct tc_qopt_offload_stats *stats)
+{
+ struct nfp_qdisc *qdisc, *red;
+ unsigned int i;
+
+ qdisc = nfp_abm_qdisc_find(alink, handle);
+ if (!qdisc)
+ return -EOPNOTSUPP;
+
+ nfp_abm_stats_update(alink);
+
+ /* MQ stats are summed over the children in the core, so we need
+ * to add up the unreported child values.
+ */
+ memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
+ memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
+
+ for (i = 0; i < qdisc->num_children; i++) {
+ if (!nfp_abm_qdisc_child_valid(qdisc, i))
+ continue;
+
+ if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
+ continue;
+ red = qdisc->children[i];
+
+ nfp_abm_stats_propagate(&qdisc->mq.stats,
+ &red->red.stats);
+ nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
+ &red->red.prev_stats);
+ }
+
+ nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
+ stats->bstats, stats->qstats);
+
+ return qdisc->offloaded ? 0 : -EOPNOTSUPP;
+}
+
+int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_mq_qopt_offload *opt)
+{
+ switch (opt->command) {
+ case TC_MQ_CREATE:
+ return nfp_abm_mq_create(netdev, alink, opt);
+ case TC_MQ_DESTROY:
+ nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
+ return 0;
+ case TC_MQ_STATS:
+ return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
+ case TC_MQ_GRAFT:
+ return nfp_abm_qdisc_graft(alink, opt->handle,
+ opt->graft_params.child_handle,
+ opt->graft_params.queue);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
+ struct tc_root_qopt_offload *opt)
+{
+ if (opt->ingress)
+ return -EOPNOTSUPP;
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt--;
+ alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
+ if (alink->root_qdisc)
+ alink->root_qdisc->use_cnt++;
+
+ nfp_abm_qdisc_offload_update(alink);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 244dc261006e..8d54b36afee8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -2,7 +2,6 @@
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#include <linux/bitfield.h>
-#include <net/geneve.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include <net/tc_act/tc_csum.h>
@@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
return act_size;
}
-static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
- enum nfp_flower_tun_type tun_type)
-{
- if (!out_dev->rtnl_link_ops)
- return false;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
- return tun_type == NFP_FL_TUNNEL_VXLAN;
-
- if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
- return tun_type == NFP_FL_TUNNEL_GENEVE;
-
- return false;
-}
-
static int
nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
@@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
/* Set action output parameters. */
output->flags = cpu_to_be16(tmp_flags);
- /* Only offload if egress ports are on the same device as the
- * ingress port.
- */
- if (!switchdev_port_same_parent_id(in_dev, out_dev))
- return -EOPNOTSUPP;
+ if (nfp_netdev_is_nfp_repr(in_dev)) {
+ /* Confirm ingress and egress are on same device. */
+ if (!switchdev_port_same_parent_id(in_dev, out_dev))
+ return -EOPNOTSUPP;
+ }
+
if (!nfp_netdev_is_nfp_repr(out_dev))
return -EOPNOTSUPP;
@@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
return 0;
}
+struct ipv4_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
static int
nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
- struct nfp_fl_set_ip4_addrs *set_ip_addr)
+ struct nfp_fl_set_ip4_addrs *set_ip_addr,
+ struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
{
+ struct ipv4_ttl_word *ttl_word_mask;
+ struct ipv4_ttl_word *ttl_word;
+ struct iphdr *tos_word_mask;
+ struct iphdr *tos_word;
__be32 exact, mask;
/* We are expecting tcf_pedit to return a big endian value */
@@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
set_ip_addr->ipv4_dst_mask |= mask;
set_ip_addr->ipv4_dst &= ~mask;
set_ip_addr->ipv4_dst |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
break;
case offsetof(struct iphdr, saddr):
set_ip_addr->ipv4_src_mask |= mask;
set_ip_addr->ipv4_src &= ~mask;
set_ip_addr->ipv4_src |= exact & mask;
+ set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
+ set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case offsetof(struct iphdr, ttl):
+ ttl_word_mask = (struct ipv4_ttl_word *)&mask;
+ ttl_word = (struct ipv4_ttl_word *)&exact;
+
+ if (ttl_word_mask->protocol || ttl_word_mask->check)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
+ set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
+ break;
+ case round_down(offsetof(struct iphdr, tos), 4):
+ tos_word_mask = (struct iphdr *)&mask;
+ tos_word = (struct iphdr *)&exact;
+
+ if (tos_word_mask->version || tos_word_mask->ihl ||
+ tos_word_mask->tot_len)
+ return -EOPNOTSUPP;
+
+ set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
+ set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
+ set_ip_ttl_tos->head.jump_id =
+ NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
+ set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
+ NFP_FL_LW_SIZ;
break;
default:
return -EOPNOTSUPP;
}
- set_ip_addr->reserved = cpu_to_be16(0);
- set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
- set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
-
return 0;
}
@@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
}
+struct ipv6_hop_limit_word {
+ __be16 payload_len;
+ u8 nexthdr;
+ u8 hop_limit;
+};
+
+static int
+nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
+{
+ struct ipv6_hop_limit_word *fl_hl_mask;
+ struct ipv6_hop_limit_word *fl_hl;
+
+ switch (off) {
+ case offsetof(struct ipv6hdr, payload_len):
+ fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
+ fl_hl = (struct ipv6_hop_limit_word *)&exact;
+
+ if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
+ ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
+ fl_hl_mask->hop_limit;
+ break;
+ case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
+ if (mask & ~IPV6_FLOW_LABEL_MASK ||
+ exact & ~IPV6_FLOW_LABEL_MASK)
+ return -EOPNOTSUPP;
+
+ ip_hl_fl->ipv6_label_mask |= mask;
+ ip_hl_fl->ipv6_label &= ~mask;
+ ip_hl_fl->ipv6_label |= exact & mask;
+ break;
+ }
+
+ ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
+ ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
+
+ return 0;
+}
+
static int
nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
struct nfp_fl_set_ipv6_addr *ip_dst,
- struct nfp_fl_set_ipv6_addr *ip_src)
+ struct nfp_fl_set_ipv6_addr *ip_src,
+ struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
{
__be32 exact, mask;
+ int err = 0;
u8 word;
/* We are expecting tcf_pedit to return a big endian value */
@@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
if (off < offsetof(struct ipv6hdr, saddr)) {
- return -EOPNOTSUPP;
+ err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
+ ip_hl_fl);
} else if (off < offsetof(struct ipv6hdr, daddr)) {
word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
@@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
return -EOPNOTSUPP;
}
- return 0;
+ return err;
}
static int
@@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
char *nfp_action, int *a_len, u32 *csum_updated)
{
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
+ struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
+ struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
@@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
u32 offset, cmd;
u8 ip_proto = 0;
+ memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
+ memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
memset(&set_ip6_src, 0, sizeof(set_ip6_src));
memset(&set_ip_addr, 0, sizeof(set_ip_addr));
@@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
err = nfp_fl_set_eth(action, idx, offset, &set_eth);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
+ err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
+ &set_ip_ttl_tos);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
- &set_ip6_src);
+ &set_ip6_src, &set_ip6_tc_hl_fl);
break;
case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
err = nfp_fl_set_tport(action, idx, offset, &set_tport,
@@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
memcpy(nfp_action, &set_eth, act_size);
*a_len += act_size;
}
+ if (set_ip_ttl_tos.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip_ttl_tos);
+ memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
+ *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
+ nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip_addr.head.len_lw) {
nfp_action += act_size;
act_size = sizeof(set_ip_addr);
@@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
+ if (set_ip6_tc_hl_fl.head.len_lw) {
+ nfp_action += act_size;
+ act_size = sizeof(set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ *a_len += act_size;
+
+ /* Hardware will automatically fix TCP/UDP checksum. */
+ *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
+ }
if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
@@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
- struct nfp_repr *repr = netdev_priv(netdev);
- *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
+ *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 29d673aa5277..15f41cfef9f1 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -8,6 +8,7 @@
#include <linux/skbuff.h>
#include <linux/types.h>
#include <net/geneve.h>
+#include <net/vxlan.h>
#include "../nfp_app.h"
#include "../nfpcore/nfp_cpp.h"
@@ -65,8 +66,10 @@
#define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6
#define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7
#define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9
+#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10
#define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11
#define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12
+#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13
#define NFP_FL_ACTION_OPCODE_SET_UDP 14
#define NFP_FL_ACTION_OPCODE_SET_TCP 15
#define NFP_FL_ACTION_OPCODE_PRE_LAG 16
@@ -82,6 +85,8 @@
#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
+#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
+
/* LAG ports */
#define NFP_FL_LAG_OUT 0xC0DE0000
@@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs {
__be32 ipv4_dst;
};
+struct nfp_fl_set_ip4_ttl_tos {
+ struct nfp_fl_act_head head;
+ u8 ipv4_ttl_mask;
+ u8 ipv4_tos_mask;
+ u8 ipv4_ttl;
+ u8 ipv4_tos;
+ __be16 reserved;
+};
+
+struct nfp_fl_set_ipv6_tc_hl_fl {
+ struct nfp_fl_act_head head;
+ u8 ipv6_tc_mask;
+ u8 ipv6_hop_limit_mask;
+ __be16 reserved;
+ u8 ipv6_tc;
+ u8 ipv6_hop_limit;
+ __be32 ipv6_label_mask;
+ __be32 ipv6_label;
+};
+
struct nfp_fl_set_ipv6_addr {
struct nfp_fl_act_head head;
__be16 reserved;
@@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb)
return skb->len - NFP_FLOWER_CMSG_HLEN;
}
+static inline bool
+nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
+ enum nfp_flower_tun_type tun_type)
+{
+ if (netif_is_vxlan(netdev))
+ return tun_type == NFP_FL_TUNNEL_VXLAN;
+ if (netif_is_geneve(netdev))
+ return tun_type == NFP_FL_TUNNEL_GENEVE;
+
+ return false;
+}
+
+static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
+{
+ if (!netdev->rtnl_link_ops)
+ return false;
+ if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
+ return true;
+ if (netif_is_vxlan(netdev))
+ return true;
+ if (netif_is_geneve(netdev))
+ return true;
+
+ return false;
+}
+
struct sk_buff *
nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports);
void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
index 81dcf5b318ba..5db838f45694 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c
@@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag,
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
}
-static int
+static void
nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
struct net_device *master)
{
struct nfp_fl_lag_group *group;
+ struct nfp_flower_priv *priv;
+
+ priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
+
+ if (!netif_is_bond_master(master))
+ return;
mutex_lock(&lag->lock);
group = nfp_fl_lag_find_group_for_master_with_lag(lag, master);
if (!group) {
mutex_unlock(&lag->lock);
- return -ENOENT;
+ nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n",
+ netdev_name(master));
+ return;
}
group->to_remove = true;
@@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
static int
@@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag,
return 0;
}
-static int
+static void
nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
struct netdev_notifier_changelowerstate_info *info)
{
@@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
unsigned long *flags;
if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev))
- return 0;
+ return;
lag_lower_info = info->lower_state_info;
if (!lag_lower_info)
- return 0;
+ return;
priv = container_of(lag, struct nfp_flower_priv, nfp_lag);
repr = netdev_priv(netdev);
/* Verify that the repr is associated with this app. */
if (repr->app != priv->app)
- return 0;
+ return;
repr_priv = repr->app_priv;
flags = &repr_priv->lag_port_flags;
@@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev,
mutex_unlock(&lag->lock);
schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY);
- return 0;
}
-static int
-nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
- void *ptr)
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct net_device *netdev;
- struct nfp_fl_lag *lag;
+ struct nfp_fl_lag *lag = &priv->nfp_lag;
int err;
- netdev = netdev_notifier_info_to_dev(ptr);
- lag = container_of(nb, struct nfp_fl_lag, lag_nb);
-
switch (event) {
case NETDEV_CHANGEUPPER:
err = nfp_fl_lag_changeupper_event(lag, ptr);
@@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event,
return NOTIFY_BAD;
return NOTIFY_OK;
case NETDEV_CHANGELOWERSTATE:
- err = nfp_fl_lag_changels_event(lag, netdev, ptr);
- if (err)
- return NOTIFY_BAD;
+ nfp_fl_lag_changels_event(lag, netdev, ptr);
return NOTIFY_OK;
case NETDEV_UNREGISTER:
- if (netif_is_bond_master(netdev)) {
- err = nfp_fl_lag_schedule_group_delete(lag, netdev);
- if (err)
- return NOTIFY_BAD;
- return NOTIFY_OK;
- }
+ nfp_fl_lag_schedule_group_delete(lag, netdev);
+ return NOTIFY_OK;
}
return NOTIFY_DONE;
@@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag)
/* 0 is a reserved batch version so increment to first valid value. */
nfp_fl_increment_version(lag);
-
- lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event;
}
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index 3a54728d2ea6..5059110a1768 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
}
-static int
-nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev)
-{
- return tc_setup_cb_egdev_register(netdev,
- nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
-}
-
static void
nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
kfree(repr->app_priv);
-
- tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb,
- netdev_priv(netdev));
}
static void
@@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app)
goto err_cleanup_metadata;
}
+ INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
+
return 0;
err_cleanup_metadata:
@@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app)
err = nfp_flower_lag_reset(&app_priv->nfp_lag);
if (err)
return err;
-
- err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
- if (err)
- return err;
}
return nfp_tunnel_config_start(app);
@@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
+ nfp_tunnel_config_stop(app);
+}
+
+static int
+nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr)
+{
struct nfp_flower_priv *app_priv = app->priv;
+ int ret;
- if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG)
- unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb);
+ if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
+ ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+ }
- nfp_tunnel_config_stop(app);
+ ret = nfp_flower_reg_indir_block_handler(app, netdev, event);
+ if (ret & NOTIFY_STOP_MASK)
+ return ret;
+
+ return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
}
const struct nfp_app_type app_flower = {
@@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = {
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
- .repr_init = nfp_flower_repr_netdev_init,
.repr_preclean = nfp_flower_repr_netdev_preclean,
.repr_clean = nfp_flower_repr_netdev_clean,
@@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = {
.start = nfp_flower_start,
.stop = nfp_flower_stop,
+ .netdev_event = nfp_flower_netdev_event,
+
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 90045bab95bf..b858bac47621 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -20,7 +20,6 @@ struct nfp_fl_pre_lag;
struct net_device;
struct nfp_app;
-#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff)
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
init_unalloc)
#define NFP_FLOWER_MASK_ENTRY_RS 256
@@ -72,7 +71,6 @@ struct nfp_mtu_conf {
/**
* struct nfp_fl_lag - Flower APP priv data for link aggregation
- * @lag_nb: Notifier to track master/slave events
* @work: Work queue for writing configs to the HW
* @lock: Lock to protect lag_group_list
* @group_list: List of all master/slave groups offloaded
@@ -85,7 +83,6 @@ struct nfp_mtu_conf {
* retransmission
*/
struct nfp_fl_lag {
- struct notifier_block lag_nb;
struct delayed_work work;
struct mutex lock;
struct list_head group_list;
@@ -126,13 +123,13 @@ struct nfp_fl_lag {
* @nfp_neigh_off_lock: Lock for the neighbour address list
* @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
* @nfp_mac_off_count: Number of MACs in address list
- * @nfp_tun_mac_nb: Notifier to monitor link state
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
* @reify_replies: atomically stores the number of replies received
* from firmware for repr reify
* @reify_wait_queue: wait queue for repr reify response counting
* @mtu_conf: Configuration of repr MTU value
* @nfp_lag: Link aggregation data block
+ * @indr_block_cb_priv: List of priv data passed to indirect block cbs
*/
struct nfp_flower_priv {
struct nfp_app *app;
@@ -160,12 +157,12 @@ struct nfp_flower_priv {
spinlock_t nfp_neigh_off_lock;
struct ida nfp_mac_off_ids;
int nfp_mac_off_count;
- struct notifier_block nfp_tun_mac_nb;
struct notifier_block nfp_tun_neigh_nb;
atomic_t reify_replies;
wait_queue_head_t reify_wait_queue;
struct nfp_mtu_conf mtu_conf;
struct nfp_fl_lag nfp_lag;
+ struct list_head indr_block_cb_priv;
};
/**
@@ -209,7 +206,6 @@ struct nfp_fl_payload {
char *unmasked_data;
char *mask_data;
char *action_data;
- bool ingress_offload;
};
extern const struct rhashtable_params nfp_flower_table_params;
@@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app);
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
enum tc_setup_type type, void *type_data);
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
@@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app,
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx);
+ struct net_device *netdev);
struct nfp_fl_payload *
nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie);
@@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb);
int nfp_tunnel_config_start(struct nfp_app *app);
void nfp_tunnel_config_stop(struct nfp_app *app);
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
void nfp_tunnel_write_macs(struct nfp_app *app);
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb);
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv);
void nfp_flower_lag_init(struct nfp_fl_lag *lag);
void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag);
int nfp_flower_lag_reset(struct nfp_fl_lag *lag);
+int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv,
+ struct net_device *netdev,
+ unsigned long event, void *ptr);
bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb);
int nfp_flower_lag_populate_pre_action(struct nfp_app *app,
struct net_device *master,
struct nfp_fl_pre_lag *pre_act);
int nfp_flower_lag_get_output_id(struct nfp_app *app,
struct net_device *master);
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e54fb6034326..cdf75595f627 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port,
return 0;
}
- if (tun_type)
+ if (tun_type) {
frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
- else
+ } else {
+ if (!cmsg_port)
+ return -EOPNOTSUPP;
frame->in_port = cpu_to_be32(cmsg_port);
+ }
return 0;
}
@@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame,
}
}
-int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
+int nfp_flower_compile_flow_match(struct nfp_app *app,
+ struct tc_cls_flower_offload *flow,
struct nfp_fl_key_ls *key_ls,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow,
enum nfp_flower_tun_type tun_type)
{
- struct nfp_repr *netdev_repr;
+ u32 cmsg_port = 0;
int err;
u8 *ext;
u8 *msk;
+ if (nfp_netdev_is_nfp_repr(netdev))
+ cmsg_port = nfp_repr_get_port_id(netdev);
+
memset(nfp_flow->unmasked_data, 0, key_ls->key_size);
memset(nfp_flow->mask_data, 0, key_ls->key_size);
@@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
/* Populate Exact Port data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext,
- nfp_repr_get_port_id(netdev),
- false, tun_type);
+ cmsg_port, false, tun_type);
if (err)
return err;
/* Populate Mask Port Data. */
err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
- nfp_repr_get_port_id(netdev),
- true, tun_type);
+ cmsg_port, true, tun_type);
if (err)
return err;
@@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow,
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
/* Configure tunnel end point MAC. */
- if (nfp_netdev_is_nfp_repr(netdev)) {
- netdev_repr = netdev_priv(netdev);
- nfp_tunnel_write_macs(netdev_repr->app);
-
- /* Store the tunnel destination in the rule data.
- * This must be present and be an exact match.
- */
- nfp_flow->nfp_tun_ipv4_addr = tun_dst;
- nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst);
- }
+ nfp_tunnel_write_macs(app);
+
+ /* Store the tunnel destination in the rule data.
+ * This must be present and be an exact match.
+ */
+ nfp_flow->nfp_tun_ipv4_addr = tun_dst;
+ nfp_tunnel_add_ipv4_off(app, tun_dst);
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
err = nfp_flower_compile_geneve_opt(ext, flow, false);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
index 48729bf171e0..573a4400a26c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c
@@ -21,7 +21,6 @@ struct nfp_mask_id_table {
struct nfp_fl_flow_table_cmp_arg {
struct net_device *netdev;
unsigned long cookie;
- __be32 host_ctx;
};
static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id)
@@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
/* Must be called with either RTNL or rcu_read_lock */
struct nfp_fl_payload *
nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie,
- struct net_device *netdev, __be32 host_ctx)
+ struct net_device *netdev)
{
struct nfp_fl_flow_table_cmp_arg flower_cmp_arg;
struct nfp_flower_priv *priv = app->priv;
flower_cmp_arg.netdev = netdev;
flower_cmp_arg.cookie = tc_flower_cookie;
- flower_cmp_arg.host_ctx = host_ctx;
return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg,
nfp_flower_table_params);
@@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt);
nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie);
+ nfp_flow->ingress_dev = netdev;
new_mask_id = 0;
if (!nfp_check_mask_add(app, nfp_flow->mask_data,
@@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app,
priv->stats[stats_cxt].bytes = 0;
priv->stats[stats_cxt].used = jiffies;
- check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (check_entry) {
if (nfp_release_stats_entry(app, stats_cxt))
return -EINVAL;
@@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg,
const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key;
const struct nfp_fl_payload *flow_entry = obj;
- if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) &&
- (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE ||
- flow_entry->meta.host_ctx_id == cmp_arg->host_ctx))
+ if (flow_entry->ingress_dev == cmp_arg->netdev)
return flow_entry->tc_flower_cookie != cmp_arg->cookie;
return 1;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 29c95423ab64..545d94168874 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -56,11 +56,10 @@
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
static int
-nfp_flower_xmit_flow(struct net_device *netdev,
- struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+ u8 mtype)
{
u32 meta_len, key_len, mask_len, act_len, tot_len;
- struct nfp_repr *priv = netdev_priv(netdev);
struct sk_buff *skb;
unsigned char *msg;
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
- skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+ skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
if (!skb)
return -ENOMEM;
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
- nfp_ctrl_tx(priv->app->ctrl, skb);
+ nfp_ctrl_tx(app->ctrl, skb);
return 0;
}
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
static int
nfp_flower_calculate_key_layers(struct nfp_app *app,
+ struct net_device *netdev,
struct nfp_fl_key_ls *ret_key_ls,
struct tc_cls_flower_offload *flow,
- bool egress,
enum nfp_flower_tun_type *tun_type)
{
struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL,
flow->key);
- if (!egress)
- return -EOPNOTSUPP;
if (mask_enc_ctl->addr_type != 0xffff ||
enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
default:
return -EOPNOTSUPP;
}
- } else if (egress) {
- /* Reject non tunnel matches offloaded to egress repr. */
- return -EOPNOTSUPP;
+
+ /* Ensure the ingress netdev matches the expected tun type. */
+ if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+ return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
{
struct nfp_fl_payload *flow_pay;
@@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
flow_pay->nfp_tun_ipv4_addr = 0;
flow_pay->meta.flags = 0;
- flow_pay->ingress_offload = !egress;
return flow_pay;
@@ -416,7 +413,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure.
- * @egress: NFP netdev is the egress.
*
* Adds a new flow to the repeated hash structure and action payload.
*
@@ -424,46 +420,35 @@ err_free_flow:
*/
static int
nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *flow_pay;
struct nfp_fl_key_ls *key_layer;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
- if (flow_pay) {
- /* Ignore as duplicate if it has been added by different cb. */
- if (flow_pay->ingress_offload && egress)
- return 0;
- else
- return -EOPNOTSUPP;
- }
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
if (!key_layer)
return -ENOMEM;
- err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+ err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
&tun_type);
if (err)
goto err_free_key_ls;
- flow_pay = nfp_flower_allocate_new(key_layer, egress);
+ flow_pay = nfp_flower_allocate_new(key_layer);
if (!flow_pay) {
err = -ENOMEM;
goto err_free_key_ls;
}
- flow_pay->ingress_dev = egress ? NULL : netdev;
-
- err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
- tun_type);
+ err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+ flow_pay, tun_type);
if (err)
goto err_destroy_flow;
@@ -471,12 +456,11 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- err = nfp_compile_flow_metadata(app, flow, flow_pay,
- flow_pay->ingress_dev);
+ err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
if (err)
goto err_destroy_flow;
- err = nfp_flower_xmit_flow(netdev, flow_pay,
+ err = nfp_flower_xmit_flow(app, flow_pay,
NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
if (err)
goto err_destroy_flow;
@@ -487,7 +471,8 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
if (err)
goto err_destroy_flow;
- port->tc_offload_cnt++;
+ if (port)
+ port->tc_offload_cnt++;
/* Deallocate flow payload when flower rule has been destroyed. */
kfree(key_layer);
@@ -509,7 +494,6 @@ err_free_key_ls:
* @app: Pointer to the APP handle
* @netdev: netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Removes a flow from the repeated hash structure and clears the
* action payload.
@@ -518,19 +502,19 @@ err_free_key_ls:
*/
static int
nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
- struct nfp_port *port = nfp_port_from_netdev(netdev);
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
+ struct nfp_port *port = NULL;
int err;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ if (nfp_netdev_is_nfp_repr(netdev))
+ port = nfp_port_from_netdev(netdev);
+
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
- return egress ? 0 : -ENOENT;
+ return -ENOENT;
err = nfp_modify_flow_metadata(app, nfp_flow);
if (err)
@@ -539,13 +523,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
if (nfp_flow->nfp_tun_ipv4_addr)
nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
- err = nfp_flower_xmit_flow(netdev, nfp_flow,
+ err = nfp_flower_xmit_flow(app, nfp_flow,
NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
if (err)
goto err_free_flow;
err_free_flow:
- port->tc_offload_cnt--;
+ if (port)
+ port->tc_offload_cnt--;
kfree(nfp_flow->action_data);
kfree(nfp_flow->mask_data);
kfree(nfp_flow->unmasked_data);
@@ -561,7 +546,6 @@ err_free_flow:
* @app: Pointer to the APP handle
* @netdev: Netdev structure.
* @flow: TC flower classifier offload structure
- * @egress: Netdev is the egress dev.
*
* Populates a flow statistics structure which which corresponds to a
* specific flow.
@@ -570,22 +554,16 @@ err_free_flow:
*/
static int
nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flow, bool egress)
+ struct tc_cls_flower_offload *flow)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_fl_payload *nfp_flow;
- struct net_device *ingr_dev;
u32 ctx_id;
- ingr_dev = egress ? NULL : netdev;
- nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
- NFP_FL_STATS_CTX_DONT_CARE);
+ nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
if (!nfp_flow)
return -EINVAL;
- if (nfp_flow->ingress_offload && egress)
- return 0;
-
ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
spin_lock_bh(&priv->stats_lock);
@@ -602,35 +580,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
static int
nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
- struct tc_cls_flower_offload *flower, bool egress)
+ struct tc_cls_flower_offload *flower)
{
if (!eth_proto_is_802_3(flower->common.protocol))
return -EOPNOTSUPP;
switch (flower->command) {
case TC_CLSFLOWER_REPLACE:
- return nfp_flower_add_offload(app, netdev, flower, egress);
+ return nfp_flower_add_offload(app, netdev, flower);
case TC_CLSFLOWER_DESTROY:
- return nfp_flower_del_offload(app, netdev, flower, egress);
+ return nfp_flower_del_offload(app, netdev, flower);
case TC_CLSFLOWER_STATS:
- return nfp_flower_get_stats(app, netdev, flower, egress);
- default:
- return -EOPNOTSUPP;
- }
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
- void *cb_priv)
-{
- struct nfp_repr *repr = cb_priv;
-
- if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
- return -EOPNOTSUPP;
-
- switch (type) {
- case TC_SETUP_CLSFLOWER:
- return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, true);
+ return nfp_flower_get_stats(app, netdev, flower);
default:
return -EOPNOTSUPP;
}
@@ -647,7 +608,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
switch (type) {
case TC_SETUP_CLSFLOWER:
return nfp_flower_repr_offload(repr->app, repr->netdev,
- type_data, false);
+ type_data);
default:
return -EOPNOTSUPP;
}
@@ -686,3 +647,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
return -EOPNOTSUPP;
}
}
+
+struct nfp_flower_indr_block_cb_priv {
+ struct net_device *netdev;
+ struct nfp_app *app;
+ struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+ struct net_device *netdev)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+
+ /* All callback list access should be protected by RTNL. */
+ ASSERT_RTNL();
+
+ list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+
+ return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+ struct tc_cls_flower_offload *flower = type_data;
+
+ if (flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return nfp_flower_repr_offload(priv->app, priv->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+ struct tc_block_offload *f)
+{
+ struct nfp_flower_indr_block_cb_priv *cb_priv;
+ struct nfp_flower_priv *priv = app->priv;
+ int err;
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+ if (!cb_priv)
+ return -ENOMEM;
+
+ cb_priv->netdev = netdev;
+ cb_priv->app = app;
+ list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+ err = tcf_block_cb_register(f->block,
+ nfp_flower_setup_indr_block_cb,
+ netdev, cb_priv, f->extack);
+ if (err) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return err;
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block,
+ nfp_flower_setup_indr_block_cb, netdev);
+ cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+ if (cb_priv) {
+ list_del(&cb_priv->list);
+ kfree(cb_priv);
+ }
+
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+ enum tc_setup_type type, void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event)
+{
+ int err;
+
+ if (!nfp_fl_is_netdev_to_offload(netdev))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
+ err = __tc_indr_block_cb_register(netdev, app,
+ nfp_flower_indr_setup_tc_cb,
+ netdev);
+ if (err)
+ nfp_flower_cmsg_warn(app,
+ "Indirect block reg failed - %s\n",
+ netdev->name);
+ } else if (event == NETDEV_UNREGISTER) {
+ __tc_indr_block_cb_unregister(netdev,
+ nfp_flower_indr_setup_tc_cb,
+ netdev);
+ }
+
+ return NOTIFY_OK;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 8e5bec04d1f9..2d9f26a725c2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -4,7 +4,6 @@
#include <linux/etherdevice.h>
#include <linux/inetdevice.h>
#include <net/netevent.h>
-#include <net/vxlan.h>
#include <linux/idr.h>
#include <net/dst_metadata.h>
#include <net/arp.h>
@@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
}
}
-static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev)
-{
- if (!netdev->rtnl_link_ops)
- return false;
- if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch"))
- return true;
- if (netif_is_vxlan(netdev))
- return true;
-
- return false;
-}
-
static int
nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata,
gfp_t flag)
@@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
if (nfp_netdev_is_nfp_repr(netdev))
port = nfp_repr_get_port_id(netdev);
- else if (!nfp_tun_is_netdev_to_offload(netdev))
+ else if (!nfp_fl_is_netdev_to_offload(netdev))
return;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
@@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
mutex_unlock(&priv->nfp_mac_off_lock);
}
-static int nfp_tun_mac_event_handler(struct notifier_block *nb,
- unsigned long event, void *ptr)
+int nfp_tunnel_mac_event_handler(struct nfp_app *app,
+ struct net_device *netdev,
+ unsigned long event, void *ptr)
{
- struct nfp_flower_priv *app_priv;
- struct net_device *netdev;
- struct nfp_app *app;
-
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
/* If non-nfp netdev then free its offload index. */
- if (nfp_tun_is_netdev_to_offload(netdev))
+ if (nfp_fl_is_netdev_to_offload(netdev))
nfp_tun_del_mac_idx(app, netdev->ifindex);
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
event == NETDEV_REGISTER) {
- app_priv = container_of(nb, struct nfp_flower_priv,
- nfp_tun_mac_nb);
- app = app_priv->app;
- netdev = netdev_notifier_info_to_dev(ptr);
-
nfp_tun_add_to_mac_offload_list(netdev, app);
/* Force a list write to keep NFP up to date. */
@@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb,
int nfp_tunnel_config_start(struct nfp_app *app)
{
struct nfp_flower_priv *priv = app->priv;
- struct net_device *netdev;
- int err;
/* Initialise priv data for MAC offloading. */
priv->nfp_mac_off_count = 0;
mutex_init(&priv->nfp_mac_off_lock);
INIT_LIST_HEAD(&priv->nfp_mac_off_list);
- priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler;
mutex_init(&priv->nfp_mac_index_lock);
INIT_LIST_HEAD(&priv->nfp_mac_index_list);
ida_init(&priv->nfp_mac_off_ids);
@@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app)
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
- err = register_netdevice_notifier(&priv->nfp_tun_mac_nb);
- if (err)
- goto err_free_mac_ida;
-
- err = register_netevent_notifier(&priv->nfp_tun_neigh_nb);
- if (err)
- goto err_unreg_mac_nb;
-
- /* Parse netdevs already registered for MACs that need offloaded. */
- rtnl_lock();
- for_each_netdev(&init_net, netdev)
- nfp_tun_add_to_mac_offload_list(netdev, app);
- rtnl_unlock();
-
- return 0;
-
-err_unreg_mac_nb:
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
-err_free_mac_ida:
- ida_destroy(&priv->nfp_mac_off_ids);
- return err;
+ return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
}
void nfp_tunnel_config_stop(struct nfp_app *app)
@@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app)
struct nfp_ipv4_addr_entry *ip_entry;
struct list_head *ptr, *storage;
- unregister_netdevice_notifier(&priv->nfp_tun_mac_nb);
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
/* Free any memory that may be occupied by MAC list. */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c
index 68a0991aac22..4a1b8f79e731 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c
@@ -136,6 +136,53 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
return old;
}
+static int
+nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct net_device *netdev;
+ struct nfp_app *app;
+
+ netdev = netdev_notifier_info_to_dev(ptr);
+ app = container_of(nb, struct nfp_app, netdev_nb);
+
+ if (app->type->netdev_event)
+ return app->type->netdev_event(app, netdev, event, ptr);
+ return NOTIFY_DONE;
+}
+
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
+{
+ int err;
+
+ app->ctrl = ctrl;
+
+ if (app->type->start) {
+ err = app->type->start(app);
+ if (err)
+ return err;
+ }
+
+ app->netdev_nb.notifier_call = nfp_app_netdev_event;
+ err = register_netdevice_notifier(&app->netdev_nb);
+ if (err)
+ goto err_app_stop;
+
+ return 0;
+
+err_app_stop:
+ if (app->type->stop)
+ app->type->stop(app);
+ return err;
+}
+
+void nfp_app_stop(struct nfp_app *app)
+{
+ unregister_netdevice_notifier(&app->netdev_nb);
+
+ if (app->type->stop)
+ app->type->stop(app);
+}
+
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index 4d6ecf99b1cc..d578d856a009 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm;
* @port_get_stats_strings: get strings for extra statistics
* @start: start application logic
* @stop: stop application logic
+ * @netdev_event: Netdevice notifier event
* @ctrl_msg_rx: control message handler
* @ctrl_msg_rx_raw: handler for control messages from data queues
* @setup_tc: setup TC ndo
@@ -122,6 +123,9 @@ struct nfp_app_type {
int (*start)(struct nfp_app *app);
void (*stop)(struct nfp_app *app);
+ int (*netdev_event)(struct nfp_app *app, struct net_device *netdev,
+ unsigned long event, void *ptr);
+
void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb);
void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data,
unsigned int len);
@@ -151,6 +155,7 @@ struct nfp_app_type {
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
* @ctrl_mtu: MTU to set on the control vNIC (set in .init())
+ * @netdev_nb: Netdevice notifier block
* @priv: app-specific priv data
*/
struct nfp_app {
@@ -163,6 +168,9 @@ struct nfp_app {
const struct nfp_app_type *type;
unsigned int ctrl_mtu;
+
+ struct notifier_block netdev_nb;
+
void *priv;
};
@@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
return app->type->repr_change_mtu(app, netdev, new_mtu);
}
-static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl)
-{
- app->ctrl = ctrl;
- if (!app->type->start)
- return 0;
- return app->type->start(app);
-}
-
-static inline void nfp_app_stop(struct nfp_app *app)
-{
- if (!app->type->stop)
- return;
- app->type->stop(app);
-}
-
static inline const char *nfp_app_name(struct nfp_app *app)
{
if (!app)
@@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
+int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl);
+void nfp_app_stop(struct nfp_app *app);
/* Callbacks shared between apps */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 6f0c37d09256..dda02fefc806 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -851,7 +851,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
void __iomem *ctrl_bar);
struct nfp_net *
-nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
unsigned int max_tx_rings, unsigned int max_rx_rings);
void nfp_net_free(struct nfp_net *nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6bddfcfdec34..a0343f25068a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -890,8 +890,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_end(&r_vec->tx_sync);
}
- netdev_tx_sent_queue(nd_q, txbuf->real_len);
-
skb_tx_timestamp(skb);
tx_ring->wr_p += nr_frags + 1;
@@ -899,7 +897,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (!skb->xmit_more || netif_xmit_stopped(nd_q))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -3560,6 +3558,7 @@ void nfp_net_info(struct nfp_net *nn)
/**
* nfp_net_alloc() - Allocate netdev and related structure
* @pdev: PCI device
+ * @ctrl_bar: PCI IOMEM with vNIC config memory
* @needs_netdev: Whether to allocate a netdev for this vNIC
* @max_tx_rings: Maximum number of TX rings supported by device
* @max_rx_rings: Maximum number of RX rings supported by device
@@ -3570,11 +3569,12 @@ void nfp_net_info(struct nfp_net *nn)
*
* Return: NFP Net device structure, or ERR_PTR on error.
*/
-struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
- unsigned int max_tx_rings,
- unsigned int max_rx_rings)
+struct nfp_net *
+nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev,
+ unsigned int max_tx_rings, unsigned int max_rx_rings)
{
struct nfp_net *nn;
+ int err;
if (needs_netdev) {
struct net_device *netdev;
@@ -3594,6 +3594,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
}
nn->dp.dev = &pdev->dev;
+ nn->dp.ctrl_bar = ctrl_bar;
nn->pdev = pdev;
nn->max_tx_rings = max_tx_rings;
@@ -3616,7 +3617,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
+ err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
+ &nn->tlv_caps);
+ if (err)
+ goto err_free_nn;
+
return nn;
+
+err_free_nn:
+ if (nn->dp.netdev)
+ free_netdev(nn->dp.netdev);
+ else
+ vfree(nn);
+ return ERR_PTR(err);
}
/**
@@ -3889,11 +3902,6 @@ int nfp_net_init(struct nfp_net *nn)
nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
}
- err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
- &nn->tlv_caps);
- if (err)
- return err;
-
if (nn->dp.netdev)
nfp_net_netdev_init(nn);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 1e7d20468a34..08f5fdbd8e41 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
/* Allocate and initialise the vNIC */
- nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings);
+ nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev,
+ n_tx_rings, n_rx_rings);
if (IS_ERR(nn))
return nn;
nn->app = pf->app;
nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
- nn->dp.ctrl_bar = ctrl_bar;
nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
nn->dp.is_vf = 0;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index d2c1e9ea5668..1145849ca7ba 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
rx_bar_off = NFP_PCIE_QUEUE(startq);
/* Allocate and initialise the netdev */
- nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings);
+ nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings);
if (IS_ERR(nn)) {
err = PTR_ERR(nn);
goto err_ctrl_unmap;
@@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
vf->nn = nn;
nn->fw_ver = fw_ver;
- nn->dp.ctrl_bar = ctrl_bar;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 25382f8fbb70..bd8695a4faaa 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev)
phy_set_max_speed(phydev, SPEED_100);
- phydev->advertising = phydev->supported;
-
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index a9f1bc013364..1450f386bc65 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = {
"Transmit ring full",
"SPI errors",
"Write verify errors",
+ "Buffer available errors",
};
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index d5310504f436..97f92953bdb9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca)
qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available);
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ }
+
while (qca->txr.skb[qca->txr.head]) {
pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN;
@@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca)
netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n",
available);
- if (available == 0) {
+ if (available > QCASPI_HW_BUF_LEN) {
+ /* This could only happen by interferences on the SPI line.
+ * So retry later ...
+ */
+ qca->stats.buf_avail_err++;
+ return -1;
+ } else if (available == 0) {
netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n");
return -1;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index 2d2c49726492..eb9af45fcc5e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -74,6 +74,7 @@ struct qcaspi_stats {
u64 ring_full;
u64 spi_err;
u64 write_verify_failed;
+ u64 buf_avail_err;
};
struct qcaspi {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1fd01688d37b..b3010cc51cdd 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -224,7 +224,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
+ { PCI_DEVICE(PCI_VENDOR_ID_USR, 0x0116), 0, 0, RTL_CFG_0 },
{ PCI_VENDOR_ID_LINKSYS, 0x1032,
PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
{ 0x0001, 0x8168,
@@ -6584,7 +6584,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
phy_set_max_speed(phydev, SPEED_100);
/* Ensure to advertise everything, incl. pause */
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
phy_attached_info(phydev);
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 7eeac3d6cfe8..b6a50058bb8d 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
{ NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
{ NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
{ NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+ /* MUM and SUC firmware share the same partition type */
+ { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" },
+ { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" }
};
static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
@@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
part->common.mtd.flags = MTD_CAP_NORFLASH;
part->common.mtd.size = size;
part->common.mtd.erasesize = erase_size;
+ /* sfc_status is read-only */
+ if (!erase_size)
+ part->common.mtd.flags |= MTD_NO_ERASE;
return 0;
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c3ad564ac4c0..22eb059086f7 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
goto err;
- /* Update BQL */
- netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
-
efx_tx_maybe_stop_queue(tx_queue);
/* Pass off to hardware */
- if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index d9d0d03e4ce7..bba9733b5119 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -234,6 +234,9 @@
#define DESC_NUM 256
+#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define NETSEC_RX_BUF_SZ 1536
+
#define DESC_SZ sizeof(struct netsec_de)
#define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000)
@@ -571,34 +574,10 @@ static const struct ethtool_ops netsec_ethtool_ops = {
/************* NETDEV_OPS FOLLOW *************/
-static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv,
- struct netsec_desc *desc)
-{
- struct sk_buff *skb;
-
- if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) {
- skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len);
- } else {
- desc->len = L1_CACHE_ALIGN(desc->len);
- skb = netdev_alloc_skb(priv->ndev, desc->len);
- }
- if (!skb)
- return NULL;
-
- desc->addr = skb->data;
- desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->dev, desc->dma_addr)) {
- dev_kfree_skb_any(skb);
- return NULL;
- }
- return skb;
-}
static void netsec_set_rx_de(struct netsec_priv *priv,
struct netsec_desc_ring *dring, u16 idx,
- const struct netsec_desc *desc,
- struct sk_buff *skb)
+ const struct netsec_desc *desc)
{
struct netsec_de *de = dring->vaddr + DESC_SZ * idx;
u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) |
@@ -617,59 +596,6 @@ static void netsec_set_rx_de(struct netsec_priv *priv,
dring->desc[idx].dma_addr = desc->dma_addr;
dring->desc[idx].addr = desc->addr;
dring->desc[idx].len = desc->len;
- dring->desc[idx].skb = skb;
-}
-
-static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv,
- struct netsec_desc_ring *dring,
- u16 idx,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc, u16 *len)
-{
- struct netsec_de de = {};
-
- memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ);
-
- *len = de.buf_len_info >> 16;
-
- rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
- rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) &
- NETSEC_RX_PKT_ERR_MASK;
- *desc = dring->desc[idx];
- return desc->skb;
-}
-
-static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv,
- struct netsec_rx_pkt_info *rxpi,
- struct netsec_desc *desc,
- u16 *len)
-{
- struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct sk_buff *tmp_skb, *skb = NULL;
- struct netsec_desc td;
- int tail;
-
- *rxpi = (struct netsec_rx_pkt_info){};
-
- td.len = priv->ndev->mtu + 22;
-
- tmp_skb = netsec_alloc_skb(priv, &td);
-
- tail = dring->tail;
-
- if (!tmp_skb) {
- netsec_set_rx_de(priv, dring, tail, &dring->desc[tail],
- dring->desc[tail].skb);
- } else {
- skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len);
- netsec_set_rx_de(priv, dring, tail, &td, tmp_skb);
- }
-
- /* move tail ahead */
- dring->tail = (dring->tail + 1) % DESC_NUM;
-
- return skb;
}
static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget)
@@ -736,19 +662,65 @@ static int netsec_process_tx(struct netsec_priv *priv, int budget)
return done;
}
+static void *netsec_alloc_rx_data(struct netsec_priv *priv,
+ dma_addr_t *dma_handle, u16 *desc_len)
+{
+ size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ size_t payload_len = NETSEC_RX_BUF_SZ;
+ dma_addr_t mapping;
+ void *buf;
+
+ total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD);
+
+ buf = napi_alloc_frag(total_len);
+ if (!buf)
+ return NULL;
+
+ mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, mapping)))
+ goto err_out;
+
+ *dma_handle = mapping;
+ *desc_len = payload_len;
+
+ return buf;
+
+err_out:
+ skb_free_frag(buf);
+ return NULL;
+}
+
+static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num)
+{
+ struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
+ u16 idx = from;
+
+ while (num) {
+ netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]);
+ idx++;
+ if (idx >= DESC_NUM)
+ idx = 0;
+ num--;
+ }
+}
+
static int netsec_process_rx(struct netsec_priv *priv, int budget)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct net_device *ndev = priv->ndev;
struct netsec_rx_pkt_info rx_info;
- int done = 0;
- struct netsec_desc desc;
struct sk_buff *skb;
- u16 len;
+ int done = 0;
while (done < budget) {
u16 idx = dring->tail;
struct netsec_de *de = dring->vaddr + (DESC_SZ * idx);
+ struct netsec_desc *desc = &dring->desc[idx];
+ u16 pkt_len, desc_len;
+ dma_addr_t dma_handle;
+ void *buf_addr;
+ u32 truesize;
if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) {
/* reading the register clears the irq */
@@ -762,18 +734,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
*/
dma_rmb();
done++;
- skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len);
- if (unlikely(!skb) || rx_info.err_flag) {
+
+ pkt_len = de->buf_len_info >> 16;
+ rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) &
+ NETSEC_RX_PKT_ERR_MASK;
+ rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1;
+ if (rx_info.err_flag) {
netif_err(priv, drv, priv->ndev,
- "%s: rx fail err(%d)\n",
- __func__, rx_info.err_code);
+ "%s: rx fail err(%d)\n", __func__,
+ rx_info.err_code);
ndev->stats.rx_dropped++;
+ dring->tail = (dring->tail + 1) % DESC_NUM;
+ /* reuse buffer page frag */
+ netsec_rx_fill(priv, idx, 1);
continue;
}
+ rx_info.rx_cksum_result =
+ (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3;
- dma_unmap_single(priv->dev, desc.dma_addr, desc.len,
- DMA_FROM_DEVICE);
- skb_put(skb, len);
+ /* allocate a fresh buffer and map it to the hardware.
+ * This will eventually replace the old buffer in the hardware
+ */
+ buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len);
+ if (unlikely(!buf_addr))
+ break;
+
+ dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(desc->addr);
+
+ truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ skb = build_skb(desc->addr, truesize);
+ if (unlikely(!skb)) {
+ /* free the newly allocated buffer, we are not going to
+ * use it
+ */
+ dma_unmap_single(priv->dev, dma_handle, desc_len,
+ DMA_FROM_DEVICE);
+ skb_free_frag(buf_addr);
+ netif_err(priv, drv, priv->ndev,
+ "rx failed to build skb\n");
+ break;
+ }
+ dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Update the descriptor with the new buffer we allocated */
+ desc->len = desc_len;
+ desc->dma_addr = dma_handle;
+ desc->addr = buf_addr;
+
+ skb_reserve(skb, NETSEC_SKB_PAD);
+ skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, priv->ndev);
if (priv->rx_cksum_offload_flag &&
@@ -782,8 +795,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) {
ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += len;
+ ndev->stats.rx_bytes += pkt_len;
}
+
+ netsec_rx_fill(priv, idx, 1);
+ dring->tail = (dring->tail + 1) % DESC_NUM;
}
return done;
@@ -946,7 +962,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id)
dma_unmap_single(priv->dev, desc->dma_addr, desc->len,
id == NETSEC_RING_RX ? DMA_FROM_DEVICE :
DMA_TO_DEVICE);
- dev_kfree_skb(desc->skb);
+ if (id == NETSEC_RING_RX)
+ skb_free_frag(desc->addr);
+ else if (id == NETSEC_RING_TX)
+ dev_kfree_skb(desc->skb);
}
memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM);
@@ -977,47 +996,50 @@ static void netsec_free_dring(struct netsec_priv *priv, int id)
static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id)
{
struct netsec_desc_ring *dring = &priv->desc_ring[id];
- int ret = 0;
dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM,
&dring->desc_dma, GFP_KERNEL);
- if (!dring->vaddr) {
- ret = -ENOMEM;
+ if (!dring->vaddr)
goto err;
- }
dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL);
- if (!dring->desc) {
- ret = -ENOMEM;
+ if (!dring->desc)
goto err;
- }
return 0;
err:
netsec_free_dring(priv, id);
- return ret;
+ return -ENOMEM;
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
- struct netsec_desc desc;
- struct sk_buff *skb;
- int n;
+ int i;
- desc.len = priv->ndev->mtu + 22;
+ for (i = 0; i < DESC_NUM; i++) {
+ struct netsec_desc *desc = &dring->desc[i];
+ dma_addr_t dma_handle;
+ void *buf;
+ u16 len;
- for (n = 0; n < DESC_NUM; n++) {
- skb = netsec_alloc_skb(priv, &desc);
- if (!skb) {
+ buf = netsec_alloc_rx_data(priv, &dma_handle, &len);
+ if (!buf) {
netsec_uninit_pkt_dring(priv, NETSEC_RING_RX);
- return -ENOMEM;
+ goto err_out;
}
- netsec_set_rx_de(priv, dring, n, &desc, skb);
+ desc->dma_addr = dma_handle;
+ desc->addr = buf;
+ desc->len = len;
}
+ netsec_rx_fill(priv, 0, DESC_NUM);
+
return 0;
+
+err_out:
+ return -ENOMEM;
}
static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg,
@@ -1377,6 +1399,8 @@ static int netsec_netdev_init(struct net_device *ndev)
int ret;
u16 data;
+ BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM);
+
ret = netsec_alloc_dring(priv, NETSEC_RING_TX);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 6732f5cbde08..9e7391faa1dc 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1117,7 +1117,7 @@ static void ave_phy_adjust_link(struct net_device *ndev)
if (phydev->asym_pause)
rmt_adv |= LPA_PAUSE_ASYM;
- lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising);
+ lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
if (cap & FLOW_CTRL_TX)
txcr |= AVE_TXCR_FLOCTR;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864fa809..d1f61c25d82b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return;
} else {
- if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
- !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ netdev->phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ netdev->phydev->supported))
return;
}
@@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (!adv_lp.pause)
return -EOPNOTSUPP;
} else {
- if (!(phy->supported & SUPPORTED_Pause) ||
- !(phy->supported & SUPPORTED_Asym_Pause))
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phy->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phy->supported))
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 500f7ed8c58c..e4aa030f1726 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -283,7 +283,7 @@ struct cpsw_ss_regs {
#define CTRL_V2_TS_BITS \
(TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
- TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
@@ -293,7 +293,7 @@ struct cpsw_ss_regs {
#define CTRL_V3_TS_BITS \
(TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
- TS_LTYPE1_EN)
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
@@ -466,6 +466,8 @@ struct cpsw_priv {
bool mqprio_hw;
int fifo_bw[CPSW_TC_NUM];
int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
u32 emac_port;
struct cpsw_common *cpsw;
};
@@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
(func)(slave++, ##arg); \
} while (0)
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid);
+
static inline int cpsw_get_slave_port(u32 slave_num)
{
return slave_num + 1;
}
-static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
-{
- struct cpsw_common *cpsw = priv->cpsw;
-
- if (cpsw->data.dual_emac) {
- struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
- ALE_VLAN, slave->port_vlan, 0);
- return;
- }
-
- cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
-}
-
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
- __dev_mc_unsync(ndev, NULL);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
}
}
-static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret;
+
+ if (vid < 0) {
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ else
+ vid = 0;
+ }
+
+ mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
- cpsw_add_mcast(priv, addr);
return 0;
}
-static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
- int vid, flags;
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
- if (cpsw->data.dual_emac) {
- vid = cpsw->slaves[priv->emac_port].port_vlan;
- flags = ALE_VLAN;
- } else {
- vid = 0;
- flags = 0;
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
}
- cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
return 0;
}
@@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
/* Restore allmulti on vlans if necessary */
cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
- __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
}
static void cpsw_intr_enable(struct cpsw_common *cpsw)
@@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct net_device *ndev = skb->dev;
int ret = 0, port;
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv;
if (cpsw->data.dual_emac) {
port = CPDMA_RX_SOURCE_PORT(status);
@@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status)
skb_put(skb, len);
if (status & CPDMA_RX_VLAN_ENCAP)
cpsw_rx_vlan_encap(skb);
- cpts_rx_timestamp(cpsw->cpts, skb);
+ priv = netdev_priv(ndev);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
@@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, tx_prio_map, tx_prio_rg);
}
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
/* restore resources after port reset */
static void cpsw_restore(struct cpsw_priv *priv)
{
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
/* restore MQPRIO offload */
for_each_slave(priv, cpsw_mqprio_resume, priv);
@@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
struct cpsw_common *cpsw = priv->cpsw;
cpsw_info(priv, ifdown, "shutting down cpsw device\n");
- __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
@@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
}
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
q_idx = skb_get_queue_mapping(skb);
@@ -2047,13 +2175,13 @@ fail:
#if IS_ENABLED(CONFIG_TI_CPTS)
-static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
{
+ struct cpsw_common *cpsw = priv->cpsw;
struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
u32 ts_en, seq_id;
- if (!cpts_is_tx_enabled(cpsw->cpts) &&
- !cpts_is_rx_enabled(cpsw->cpts)) {
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
slave_write(slave, 0, CPSW1_TS_CTL);
return;
}
@@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ts_en |= CPSW_V1_TS_TX_EN;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ts_en |= CPSW_V1_TS_RX_EN;
slave_write(slave, ts_en, CPSW1_TS_CTL);
@@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
case CPSW_VERSION_2:
ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V2_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V2_RX_TS_BITS;
break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
- if (cpts_is_tx_enabled(cpsw->cpts))
+ if (priv->tx_ts_enabled)
ctrl |= CTRL_V3_TX_TS_BITS;
- if (cpts_is_rx_enabled(cpsw->cpts))
+ if (priv->rx_ts_enabled)
ctrl |= CTRL_V3_RX_TS_BITS;
break;
}
@@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
slave_write(slave, ctrl, CPSW2_CONTROL);
writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
}
static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
@@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
struct cpsw_common *cpsw = priv->cpsw;
- struct cpts *cpts = cpsw->cpts;
if (cpsw->version != CPSW_VERSION_1 &&
cpsw->version != CPSW_VERSION_2 &&
@@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ priv->rx_ts_enabled = 0;
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
}
- cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
switch (cpsw->version) {
case CPSW_VERSION_1:
- cpsw_hwtstamp_v1(cpsw);
+ cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
@@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
struct cpsw_common *cpsw = ndev_to_cpsw(dev);
- struct cpts *cpts = cpsw->cpts;
+ struct cpsw_priv *priv = netdev_priv(dev);
struct hwtstamp_config cfg;
if (cpsw->version != CPSW_VERSION_1 &&
@@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index b96b93c686bf..054f78295d1d 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts)
return removed ? 0 : -1;
}
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
{
struct sk_buff *skb, *tmp;
@@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
if (time_after(jiffies, skb_cb->tmo)) {
/* timeout any expired skbs over 1s */
- dev_dbg(cpts->dev,
- "expiring tx timestamp mtype %u seqid %04x\n",
- mtype, seqid);
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
__skb_unlink(skb, &cpts->txq);
dev_consume_skb_any(skb);
}
@@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp)
spin_lock_irqsave(&cpts->lock, flags);
ts = ns_to_timespec64(timecounter_read(&cpts->tc));
- if (!skb_queue_empty(&cpts->txq))
- delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
spin_unlock_irqrestore(&cpts->lock, flags);
pr_debug("cpts overflow check at %lld.%09ld\n",
@@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
u64 ns;
struct skb_shared_hwtstamps *ssh;
- if (!cpts->rx_enable)
- return;
ns = cpts_find_ts(cpts, skb, CPTS_EV_RX);
if (!ns)
return;
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
index 73d73faf0f38..d2c7decd59b6 100644
--- a/drivers/net/ethernet/ti/cpts.h
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
void cpts_release(struct cpts *cpts);
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
- cpts->rx_enable = enable;
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return !!cpts->rx_enable;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
- cpts->tx_enable = enable;
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return !!cpts->tx_enable;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
unsigned int class = ptp_classify_raw(skb);
@@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts)
{
}
-static inline void cpts_rx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_rx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
-static inline void cpts_tx_enable(struct cpts *cpts, int enable)
-{
-}
-
-static inline bool cpts_is_tx_enabled(struct cpts *cpts)
-{
- return false;
-}
-
static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
{
return false;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 0397ccb6597e..20d81e0b1c29 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -763,6 +763,8 @@ struct gbe_priv {
int cpts_registered;
struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
};
struct gbe_intf {
@@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
- !cpts_is_tx_enabled(gbe_dev->cpts))
+ !gbe_dev->tx_ts_enabled)
return 0;
/* If phy has the txtstamp api, assume it will do it.
@@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
return 0;
}
- cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
p_info->rxtstamp_complete = true;
return 0;
@@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
return -EOPNOTSUPP;
cfg.flags = 0;
- cfg.tx_type = cpts_is_tx_enabled(cpts) ?
- HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
- cpts->rx_enable : HWTSTAMP_FILTER_NONE);
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
struct gbe_slave *slave = gbe_intf->slave;
u32 ts_en, seq_id, ctl;
- if (!cpts_is_rx_enabled(gbe_dev->cpts) &&
- !cpts_is_tx_enabled(gbe_dev->cpts)) {
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
return;
}
@@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
(slave->ts_ctl.uni ? TS_UNI_EN :
slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
- if (cpts_is_tx_enabled(gbe_dev->cpts))
+ if (gbe_dev->tx_ts_enabled)
ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
- if (cpts_is_rx_enabled(gbe_dev->cpts))
+ if (gbe_dev->rx_ts_enabled)
ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
@@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.tx_type) {
case HWTSTAMP_TX_OFF:
- cpts_tx_enable(cpts, 0);
+ gbe_dev->tx_ts_enabled = 0;
break;
case HWTSTAMP_TX_ON:
- cpts_tx_enable(cpts, 1);
+ gbe_dev->tx_ts_enabled = 1;
break;
default:
return -ERANGE;
@@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
switch (cfg.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- cpts_rx_enable(cpts, 0);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6a71c2c0f17d..c50a9772f4af 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev)
static int tc_mii_probe(struct net_device *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
struct tc35815_local *lp = netdev_priv(dev);
struct phy_device *phydev;
- u32 dropmask;
phydev = phy_find_first(lp->mii_bus);
if (!phydev) {
@@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev)
/* mask with MAC supported features */
phy_set_max_speed(phydev, SPEED_100);
- dropmask = 0;
- if (options.speed == 10)
- dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
- else if (options.speed == 100)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
- if (options.duplex == 1)
- dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
- else if (options.duplex == 2)
- dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
- phydev->supported &= ~dropmask;
- phydev->advertising = phydev->supported;
+ if (options.speed == 10) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.speed == 100) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ }
+ if (options.duplex == 1) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ } else if (options.duplex == 2) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+ }
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
lp->link = 0;
lp->speed = 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index a0cd1c41cf5f..58bbba8582b0 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -70,6 +70,7 @@ struct geneve_dev {
bool collect_md;
bool use_udp6_rx_checksums;
bool ttl_inherit;
+ enum ifla_geneve_df df;
};
struct geneve_sock {
@@ -387,6 +388,59 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */
+static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct genevehdr *geneveh;
+ struct geneve_sock *gs;
+ u8 zero_vni[3] = { 0 };
+ u8 *vni = zero_vni;
+
+ if (skb->len < GENEVE_BASE_HLEN)
+ return -EINVAL;
+
+ geneveh = geneve_hdr(skb);
+ if (geneveh->ver != GENEVE_VER)
+ return -EINVAL;
+
+ if (geneveh->proto_type != htons(ETH_P_TEB))
+ return -EINVAL;
+
+ gs = rcu_dereference_sk_user_data(sk);
+ if (!gs)
+ return -ENOENT;
+
+ if (geneve_get_sk_family(gs) == AF_INET) {
+ struct iphdr *iph = ip_hdr(skb);
+ __be32 addr4 = 0;
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr4 = iph->daddr;
+ }
+
+ return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT;
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (geneve_get_sk_family(gs) == AF_INET6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct in6_addr addr6;
+
+ memset(&addr6, 0, sizeof(struct in6_addr));
+
+ if (!gs->collect_md) {
+ vni = geneve_hdr(skb)->vni;
+ addr6 = ip6h->daddr;
+ }
+
+ return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT;
+ }
+#endif
+
+ return -EPFNOSUPPORT;
+}
+
static struct socket *geneve_create_sock(struct net *net, bool ipv6,
__be16 port, bool ipv6_rx_csum)
{
@@ -544,6 +598,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
tunnel_cfg.gro_receive = geneve_gro_receive;
tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
+ tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
list_add(&gs->list, &gn->sock_list);
@@ -823,8 +878,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct rtable *rt;
struct flowi4 fl4;
__u8 tos, ttl;
+ __be16 df = 0;
__be16 sport;
- __be16 df;
int err;
rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info);
@@ -838,6 +893,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->collect_md) {
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
+
+ df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb);
if (geneve->ttl_inherit)
@@ -845,8 +902,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
else
ttl = key->ttl;
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+
+ if (geneve->df == GENEVE_DF_SET) {
+ df = htons(IP_DF);
+ } else if (geneve->df == GENEVE_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6) {
+ df = htons(IP_DF);
+ } else if (ntohs(eth->h_proto) == ETH_P_IP) {
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->frag_off & htons(IP_DF))
+ df = htons(IP_DF);
+ }
+ }
}
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr));
if (unlikely(err))
@@ -1093,6 +1164,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 },
[IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 },
[IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 },
+ [IFLA_GENEVE_DF] = { .type = NLA_U8 },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1128,6 +1200,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_GENEVE_DF]) {
+ enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
+ if (df < 0 || df > GENEVE_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -1173,7 +1255,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
struct netlink_ext_ack *extack,
const struct ip_tunnel_info *info,
bool metadata, bool ipv6_rx_csum,
- bool ttl_inherit)
+ bool ttl_inherit, enum ifla_geneve_df df)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -1223,6 +1305,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
geneve->collect_md = metadata;
geneve->use_udp6_rx_checksums = ipv6_rx_csum;
geneve->ttl_inherit = ttl_inherit;
+ geneve->df = df;
err = register_netdevice(dev);
if (err)
@@ -1242,7 +1325,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack,
struct ip_tunnel_info *info, bool *metadata,
bool *use_udp6_rx_checksums, bool *ttl_inherit,
- bool changelink)
+ enum ifla_geneve_df *df, bool changelink)
{
int attrtype;
@@ -1330,6 +1413,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_GENEVE_TOS])
info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+ if (data[IFLA_GENEVE_DF])
+ *df = nla_get_u8(data[IFLA_GENEVE_DF]);
+
if (data[IFLA_GENEVE_LABEL]) {
info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) &
IPV6_FLOWLABEL_MASK;
@@ -1448,6 +1534,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
+ enum ifla_geneve_df df = GENEVE_DF_UNSET;
bool use_udp6_rx_checksums = false;
struct ip_tunnel_info info;
bool ttl_inherit = false;
@@ -1456,12 +1543,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
init_tnl_info(&info, GENEVE_UDP_PORT);
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, false);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, false);
if (err)
return err;
err = geneve_configure(net, dev, extack, &info, metadata,
- use_udp6_rx_checksums, ttl_inherit);
+ use_udp6_rx_checksums, ttl_inherit, df);
if (err)
return err;
@@ -1524,6 +1611,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
+ enum ifla_geneve_df df;
bool ttl_inherit;
int err;
@@ -1539,7 +1627,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
use_udp6_rx_checksums = geneve->use_udp6_rx_checksums;
ttl_inherit = geneve->ttl_inherit;
err = geneve_nl2info(tb, data, extack, &info, &metadata,
- &use_udp6_rx_checksums, &ttl_inherit, true);
+ &use_udp6_rx_checksums, &ttl_inherit, &df, true);
if (err)
return err;
@@ -1572,6 +1660,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */
nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
@@ -1620,6 +1709,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df))
+ goto nla_put_failure;
+
if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst))
goto nla_put_failure;
@@ -1666,12 +1758,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
memset(tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &geneve_link_ops, tb);
+ &geneve_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
init_tnl_info(&info, dst_port);
- err = geneve_configure(net, dev, NULL, &info, true, true, false);
+ err = geneve_configure(net, dev, NULL, &info,
+ true, true, false, GENEVE_DF_UNSET);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c
index 6fe5dc9201d0..9d0504f3e3b2 100644
--- a/drivers/net/phy/amd.c
+++ b/drivers/net/phy/amd.c
@@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { {
.name = "AM79C874",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = am79c_config_init,
.ack_interrupt = am79c_ack_interrupt,
.config_intr = am79c_config_intr,
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index 632472cab3bb..beb3309bb0f0 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -25,15 +25,10 @@
#define PHY_ID_AQR107 0x03a1b4e0
#define PHY_ID_AQR405 0x03a1b4b0
-#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
- SUPPORTED_1000baseT_Full | \
- SUPPORTED_100baseT_Full | \
- PHY_DEFAULT_FEATURES)
-
static int aquantia_config_aneg(struct phy_device *phydev)
{
- phydev->supported = PHY_AQUANTIA_FEATURES;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->supported, phy_10gbit_features);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ1202",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQ2104",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR105",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR106",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR107",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
@@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "Aquantia AQR405",
.features = PHY_10GBIT_FULL_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index e74a047a846e..f9432d053a22 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index d95bffdec4c1..a88dd14a25c0 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
int reg, err;
/* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
reg = phy_read(phydev, MII_BCM63XX_IR);
if (reg < 0)
@@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id_mask = 0xfffffc00,
.name = "Broadcom BCM63XX (1)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
@@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = {
.phy_id = 0x002bdc00,
.phy_id_mask = 0xfffffc00,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm63xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm63xx_config_intr,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b2b6307d64a4..712224cc442d 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"),
@@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
{ PHY_ID_BCM7268, 0xfffffff0, },
{ PHY_ID_BCM7271, 0xfffffff0, },
diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c
index f7ebdcff53e4..1b350183bffb 100644
--- a/drivers/net/phy/bcm87xx.c
+++ b/drivers/net/phy/bcm87xx.c
@@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev)
static int bcm87xx_config_init(struct phy_device *phydev)
{
- phydev->supported = SUPPORTED_10000baseR_FEC;
- phydev->advertising = ADVERTISED_10000baseR_FEC;
+ linkmode_zero(phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->supported);
+ linkmode_zero(phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ phydev->advertising);
phydev->state = PHY_NOLINK;
phydev->autoneg = AUTONEG_DISABLE;
@@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8706,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8706",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
@@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = {
.phy_id = PHY_ID_BCM8727,
.phy_id_mask = 0xffffffff,
.name = "Broadcom BCM8727",
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm87xx_config_init,
.config_aneg = bcm87xx_config_aneg,
.read_status = bcm87xx_read_status,
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 704537010453..aa73c5cc5f86 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5411",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5421",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54210E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54612E",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5464",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5481",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54810",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.config_aneg = bcm5481_config_aneg,
.ack_interrupt = bcm_phy_ack_intr,
@@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5482",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm5482_config_init,
.read_status = bcm5482_read_status,
.ack_interrupt = bcm_phy_ack_intr,
@@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM50610M",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM57780",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
@@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5241",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = brcm_fet_config_init,
.ack_interrupt = brcm_fet_ack_interrupt,
.config_intr = brcm_fet_config_intr,
@@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM89610",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = bcm54xx_config_init,
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index c05af00bf4b6..fea61c81bda9 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8201",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
@@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = {
.name = "Cicada Cis8204",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &cis820x_config_init,
.ack_interrupt = &cis820x_ack_interrupt,
.config_intr = &cis820x_config_intr,
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 5ee99b3b428c..97162008f42b 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161E",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161B/C",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9161A",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dm9161_config_init,
.config_aneg = dm9161_config_aneg,
.ack_interrupt = dm9161_ack_interrupt,
@@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = {
.name = "Davicom DM9131",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = dm9161_ack_interrupt,
.config_intr = dm9161_config_intr,
} };
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index edd4d44a386d..18b41bc345ab 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83640",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
.soft_reset = dp83640_soft_reset,
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 6e8a2a4f3a6e..24c7f149f3e6 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83822",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83822_config_init,
.soft_reset = dp83822_phy_reset,
.get_wol = dp83822_get_wol,
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 6e8e42361fd5..a6b55909d1dc 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
.phy_id_mask = 0xfffffff0, \
.name = _name, \
.features = PHY_BASIC_FEATURES, \
- .flags = PHY_HAS_INTERRUPT, \
\
.soft_reset = genphy_soft_reset, \
.config_init = _config_init, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index b3935778b19f..da6a67d47ce9 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83867",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83867_config_init,
.soft_reset = dp83867_phy_reset,
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 78cad134a79e..da13356999e5 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = {
.phy_id_mask = 0xfffffff0,
.name = "TI DP83TC811",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = dp83811_config_init,
.config_aneg = dp83811_config_aneg,
.soft_reset = dp83811_phy_reset,
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 67b260877f30..f7fb62712cd8 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -223,14 +223,23 @@ struct phy_device *fixed_phy_register(unsigned int irq,
switch (status->speed) {
case SPEED_1000:
- phy->supported = PHY_1000BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_100:
- phy->supported = PHY_100BT_FEATURES;
- break;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phy->supported);
+ /* fall through */
case SPEED_10:
default:
- phy->supported = PHY_10BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phy->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phy->supported);
}
ret = phy_device_register(phy);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 791587a49215..7d5938b87660 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -25,6 +25,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/property.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -36,14 +37,34 @@ MODULE_LICENSE("GPL");
/* IP101A/G - IP1001 */
#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
-#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */
-#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */
+#define IP1001_RXPHASE_SEL BIT(0) /* Add delay on RX_CLK */
+#define IP1001_TXPHASE_SEL BIT(1) /* Add delay on TX_CLK */
#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
-#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
+#define IP101A_G_APS_ON BIT(1) /* IP101A/G APS Mode bit */
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
-#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
-#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
+#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */
+#define IP101A_G_IRQ_ALL_MASK BIT(11) /* IRQ's inactive */
+#define IP101A_G_IRQ_SPEED_CHANGE BIT(2)
+#define IP101A_G_IRQ_DUPLEX_CHANGE BIT(1)
+#define IP101A_G_IRQ_LINK_CHANGE BIT(0)
+
+#define IP101G_DIGITAL_IO_SPEC_CTRL 0x1d
+#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32 BIT(2)
+
+/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin
+ * (pin number 21). The hardware default is RXER (receive error) mode. But it
+ * can be configured to interrupt mode manually.
+ */
+enum ip101gr_sel_intr32 {
+ IP101GR_SEL_INTR32_KEEP,
+ IP101GR_SEL_INTR32_INTR,
+ IP101GR_SEL_INTR32_RXER,
+};
+
+struct ip101a_g_phy_priv {
+ enum ip101gr_sel_intr32 sel_intr32;
+};
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -162,18 +183,92 @@ static int ip1001_config_init(struct phy_device *phydev)
return 0;
}
+static int ip175c_read_status(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_read_status(phydev);
+ else
+ /* Don't need to read status for switch ports */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
+ return 0;
+}
+
+static int ip175c_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->mdio.addr == 4) /* WAN port */
+ genphy_config_aneg(phydev);
+
+ return 0;
+}
+
+static int ip101a_g_probe(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->mdio.dev;
+ struct ip101a_g_phy_priv *priv;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /* Both functions (RX error and interrupt status) are sharing the same
+ * pin on the 32-pin IP101GR, so this is an exclusive choice.
+ */
+ if (device_property_read_bool(dev, "icplus,select-rx-error") &&
+ device_property_read_bool(dev, "icplus,select-interrupt")) {
+ dev_err(dev,
+ "RXER and INTR mode cannot be selected together\n");
+ return -EINVAL;
+ }
+
+ if (device_property_read_bool(dev, "icplus,select-rx-error"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_RXER;
+ else if (device_property_read_bool(dev, "icplus,select-interrupt"))
+ priv->sel_intr32 = IP101GR_SEL_INTR32_INTR;
+ else
+ priv->sel_intr32 = IP101GR_SEL_INTR32_KEEP;
+
+ phydev->priv = priv;
+
+ return 0;
+}
+
static int ip101a_g_config_init(struct phy_device *phydev)
{
- int c;
+ struct ip101a_g_phy_priv *priv = phydev->priv;
+ int err, c;
c = ip1xx_reset(phydev);
if (c < 0)
return c;
- /* INTR pin used: speed/link/duplex will cause an interrupt */
- c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
- if (c < 0)
- return c;
+ /* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */
+ switch (priv->sel_intr32) {
+ case IP101GR_SEL_INTR32_RXER:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0);
+ if (err < 0)
+ return err;
+ break;
+
+ case IP101GR_SEL_INTR32_INTR:
+ err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32,
+ IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32);
+ if (err < 0)
+ return err;
+ break;
+
+ default:
+ /* Don't touch IP101G_DIGITAL_IO_SPEC_CTRL because it's not
+ * documented on IP101A and it's not clear whether this would
+ * cause problems.
+ * For the 32-pin IP101GR we simply keep the SEL_INTR32
+ * configuration as set by the bootloader when not configured
+ * to one of the special functions.
+ */
+ break;
+ }
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
@@ -182,23 +277,29 @@ static int ip101a_g_config_init(struct phy_device *phydev)
return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
}
-static int ip175c_read_status(struct phy_device *phydev)
+static int ip101a_g_config_intr(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_read_status(phydev);
+ u16 val;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ /* INTR pin used: Speed/link/duplex will cause an interrupt */
+ val = IP101A_G_IRQ_PIN_USED;
else
- /* Don't need to read status for switch ports */
- phydev->irq = PHY_IGNORE_INTERRUPT;
+ val = IP101A_G_IRQ_ALL_MASK;
- return 0;
+ return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val);
}
-static int ip175c_config_aneg(struct phy_device *phydev)
+static int ip101a_g_did_interrupt(struct phy_device *phydev)
{
- if (phydev->mdio.addr == 4) /* WAN port */
- genphy_config_aneg(phydev);
+ int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
- return 0;
+ if (val < 0)
+ return 0;
+
+ return val & (IP101A_G_IRQ_SPEED_CHANGE |
+ IP101A_G_IRQ_DUPLEX_CHANGE |
+ IP101A_G_IRQ_LINK_CHANGE);
}
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
@@ -234,7 +335,9 @@ static struct phy_driver icplus_driver[] = {
.name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
+ .probe = ip101a_g_probe,
+ .config_intr = ip101a_g_config_intr,
+ .did_interrupt = ip101a_g_did_interrupt,
.ack_interrupt = ip101a_g_ack_interrupt,
.config_init = &ip101a_g_config_init,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
index 7d936fb61c22..fc0f5024a29e 100644
--- a/drivers/net/phy/intel-xway.c
+++ b/drivers/net/phy/intel-xway.c
@@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.3",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.4",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.config_aneg = xway_gphy14_config_aneg,
.ack_interrupt = xway_gphy_ack_interrupt,
@@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.1 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.1 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY11G (xRX v1.2 integrated)",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
@@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = {
.phy_id_mask = 0xffffffff,
.name = "Intel XWAY PHY22F (xRX v1.2 integrated)",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = xway_gphy_config_init,
.ack_interrupt = xway_gphy_ack_interrupt,
.did_interrupt = xway_gphy_did_interrupt,
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index c14b254b2879..c8bb29ae1a2a 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
*/
} while (lpa == adv && retry--);
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
lpa &= adv;
@@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
}
return 0;
@@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT970",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = lxt970_config_init,
.ack_interrupt = lxt970_ack_interrupt,
.config_intr = lxt970_config_intr,
@@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = {
.name = "LXT971",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = lxt971_ack_interrupt,
.config_intr = lxt971_config_intr,
}, {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index cbec296107bd..6a9881942e53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
}
/**
- * ethtool_adv_to_fiber_adv_t
- * @ethadv: the ethtool advertisement settings
+ * linkmode_adv_to_fiber_adv_t
+ * @advertise: the linkmode advertisement settings
*
- * A small helper function that translates ethtool advertisement
- * settings to phy autonegotiation advertisements for the
- * MII_ADV register for fiber link.
+ * A small helper function that translates linkmode advertisement
+ * settings to phy autonegotiation advertisements for the MII_ADV
+ * register for fiber link.
*/
-static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv)
+static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise)
{
u32 result = 0;
- if (ethadv & ADVERTISED_1000baseT_Half)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise))
result |= ADVERTISE_FIBER_1000HALF;
- if (ethadv & ADVERTISED_1000baseT_Full)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise))
result |= ADVERTISE_FIBER_1000FULL;
- if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP))
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= LPA_PAUSE_ASYM_FIBER;
- else if (ethadv & ADVERTISE_PAUSE_CAP)
+ else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise))
result |= (ADVERTISE_PAUSE_FIBER
& (~ADVERTISE_PAUSE_ASYM_FIBER));
@@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
int changed = 0;
int err;
int adv, oldadv;
- u32 advertise;
if (phydev->autoneg != AUTONEG_ENABLE)
return genphy_setup_forced(phydev);
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
/* Setup fiber advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL
| LPA_PAUSE_FIBER);
- adv |= ethtool_adv_to_fiber_adv_t(advertise);
+ adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- u32 pause;
/* Select page 18 */
err = marvell_set_page(phydev, 18);
@@ -878,9 +877,14 @@ static int m88e1510_config_init(struct phy_device *phydev)
* This means we can never be truely sure what was advertised,
* so disable Pause support.
*/
- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported &= ~pause;
- phydev->advertising &= ~pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
}
return m88e1318_config_init(phydev);
@@ -1043,22 +1047,21 @@ static int m88e1145_config_init(struct phy_device *phydev)
}
/**
- * fiber_lpa_to_ethtool_lpa_t
+ * fiber_lpa_to_linkmode_lpa_t
+ * @advertising: the linkmode advertisement settings
* @lpa: value of the MII_LPA register for fiber link
*
* A small helper function that translates MII_LPA
- * bits to ethtool LP advertisement settings.
+ * bits to linkmode LP advertisement settings.
*/
-static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa)
+static void fiber_lpa_to_linkmode_lpa_t(unsigned long *advertising, u32 lpa)
{
- u32 result = 0;
-
if (lpa & LPA_FIBER_1000HALF)
- result |= ADVERTISED_1000baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ advertising);
if (lpa & LPA_FIBER_1000FULL)
- result |= ADVERTISED_1000baseT_Full;
-
- return result;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising);
}
/**
@@ -1134,9 +1137,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
if (!fiber) {
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb) |
- mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, lpagb);
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
@@ -1144,7 +1146,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
} else {
/* The fiber link is only 1000M capable */
- phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa);
+ fiber_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
if (phydev->duplex == DUPLEX_FULL) {
if (!(lpa & LPA_PAUSE_FIBER)) {
@@ -1183,7 +1185,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev)
phydev->pause = 0;
phydev->asym_pause = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
return 0;
}
@@ -1235,7 +1237,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE &&
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported) &&
phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
@@ -1278,7 +1281,8 @@ static int marvell_suspend(struct phy_device *phydev)
int err;
/* Suspend the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1312,7 +1316,8 @@ static int marvell_resume(struct phy_device *phydev)
int err;
/* Resume the fiber mode first */
- if (!(phydev->supported & SUPPORTED_FIBRE)) {
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported)) {
err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE);
if (err < 0)
goto error;
@@ -1463,7 +1468,8 @@ error:
static int marvell_get_sset_count(struct phy_device *phydev)
{
- if (phydev->supported & SUPPORTED_FIBRE)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported))
return ARRAY_SIZE(marvell_hw_stats);
else
return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
@@ -2005,7 +2011,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2024,7 +2029,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2043,7 +2047,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2063,7 +2066,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1118",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1118_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2082,7 +2084,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
@@ -2103,7 +2104,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1318S",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1318_config_init,
.config_aneg = &m88e1318_config_aneg,
@@ -2126,7 +2126,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1145_config_init,
.config_aneg = &m88e1101_config_aneg,
@@ -2146,7 +2145,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1149R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1149_config_init,
.config_aneg = &m88e1118_config_aneg,
@@ -2165,7 +2163,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1240",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
.config_aneg = &marvell_config_aneg,
@@ -2184,7 +2181,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1116R",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1116r_config_init,
.ack_interrupt = &marvell_ack_interrupt,
@@ -2202,7 +2198,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1510",
.features = PHY_GBIT_FIBRE_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2226,7 +2221,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
@@ -2248,7 +2242,6 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2268,7 +2261,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E3016",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e3016_config_init,
.aneg_done = &marvell_aneg_done,
@@ -2289,7 +2281,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6390",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = m88e6390_probe,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 1c9d039eec63..6f6e886fc836 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev)
static int mv3310_config_init(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
- u32 mask;
int val;
/* Check that the PHY interface type is compatible */
@@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev)
}
}
- if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported))
- phydev_warn(phydev,
- "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n",
- __ETHTOOL_LINK_MODE_MASK_NBITS, supported);
-
- phydev->supported &= mask;
- phydev->advertising &= phydev->supported;
+ linkmode_copy(phydev->supported, supported);
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
return 0;
}
@@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev)
static int mv3310_config_aneg(struct phy_device *phydev)
{
bool changed = false;
- u32 advertising;
+ u16 reg;
int ret;
/* We don't support manual MDI control */
@@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev)
return genphy_c45_an_disable_aneg(phydev);
}
- phydev->advertising &= phydev->supported;
- advertising = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_100BASE4 |
ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM,
- ethtool_adv_to_mii_adv_t(advertising));
+ linkmode_adv_to_mii_adv_t(phydev->advertising));
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
+ reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000,
- ADVERTISE_1000FULL | ADVERTISE_1000HALF,
- ethtool_adv_to_mii_ctrl1000_t(advertising));
+ ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg);
if (ret < 0)
return ret;
if (ret > 0)
changed = true;
/* 10G control register */
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->advertising))
+ reg = MDIO_AN_10GBT_CTRL_ADV10G;
+ else
+ reg = 0;
+
ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
- MDIO_AN_10GBT_CTRL_ADV10G,
- advertising & ADVERTISED_10000baseT_Full ?
- MDIO_AN_10GBT_CTRL_ADV10G : 0);
+ MDIO_AN_10GBT_CTRL_ADV10G, reg);
if (ret < 0)
return ret;
if (ret > 0)
@@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev)
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->link = 0;
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising, val);
if (phydev->autoneg == AUTONEG_ENABLE)
phy_resolve_aneg_linkmode(phydev);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index ddc2c5ea3787..b03bcf2c388a 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = {
.phy_id_mask = 0xfffffff0,
.name = "Meson GXL Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT,
+ .flags = PHY_IS_INTERNAL,
.config_init = meson_gxl_config_init,
.aneg_done = genphy_aneg_done,
.read_status = meson_gxl_read_status,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9265dea79412..c33384710d26 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev)
static int ksz8041_config_init(struct phy_device *phydev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
struct device_node *of_node = phydev->mdio.dev.of_node;
/* Limit supported and advertised modes in fiber mode */
if (of_property_read_bool(of_node, "micrel,fiber-mode")) {
phydev->dev_flags |= MICREL_PHY_FXEN;
- phydev->supported &= SUPPORTED_100baseT_Full |
- SUPPORTED_100baseT_Half;
- phydev->supported |= SUPPORTED_FIBRE;
- phydev->advertising &= ADVERTISED_100baseT_Full |
- ADVERTISED_100baseT_Half;
- phydev->advertising |= ADVERTISED_FIBRE;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask);
+
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_and(phydev->advertising, phydev->advertising, mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
phydev->autoneg = AUTONEG_DISABLE;
}
@@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ks8737_type,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
@@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8021 or KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.name = "Micrel KSZ8031",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = ksz8041_config_init,
@@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8051_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8001 or KS8721",
.phy_id_mask = 0x00fffffc,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8041_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8081 or KSZ8091",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
.probe = kszphy_probe,
.config_init = kszphy_config_init,
@@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ8061",
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
@@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x000ffffe,
.name = "Micrel KSZ9021 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9021_config_init,
@@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9031_config_init,
@@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.driver_data = &ksz9021_type,
.probe = kszphy_probe,
.config_init = ksz9131_config_init,
@@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
.read_status = ksz8873mll_read_status,
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 04b12e34da58..7557bebd5d7f 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = {
.name = "Microchip LAN88xx",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = lan88xx_probe,
.remove = lan88xx_remove,
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index c600a8509d60..3d09b471632c 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.name = "Microchip LAN87xx T1",
.features = PHY_BASIC_T1_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = genphy_config_init,
.config_aneg = genphy_config_aneg,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index a2e59f4f6f01..62269e578718 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -1833,7 +1833,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8530",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1859,7 +1858,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8531",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1885,7 +1883,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi FE VSC8540 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1911,7 +1908,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi VSC8541 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc85xx_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1937,7 +1933,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8574 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
@@ -1964,7 +1959,6 @@ static struct phy_driver vsc85xx_driver[] = {
.name = "Microsemi GE VSC8584 SyncE",
.phy_id_mask = 0xfffffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.soft_reset = &genphy_soft_reset,
.config_init = &vsc8584_config_init,
.config_aneg = &vsc85xx_config_aneg,
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 2b1e336961f9..139bed2c8ab4 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { {
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index d7636ff03bc7..03af927fa5ad 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
if (val < 0)
return val;
- phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
@@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
return val;
if (val & MDIO_AN_10GBT_STAT_LP10G)
- phydev->lp_advertising |= ADVERTISED_10000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->lp_advertising);
return 0;
}
@@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
+ linkmode_zero(phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index c7da4cbb1103..20fbd5eb56fd 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str);
* must be grouped by speed and sorted in descending match priority
* - iow, descending speed. */
static const struct phy_setting settings[] = {
+ /* 100G */
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ },
+ {
+ .speed = SPEED_100000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ },
+ /* 56G */
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_56000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
+ },
+ /* 50G */
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_50000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ },
+ /* 40G */
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ },
+ {
+ .speed = SPEED_40000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ },
+ /* 25G */
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ },
+ {
+ .speed = SPEED_25000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ },
+
+ /* 20G */
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ },
+ {
+ .speed = SPEED_20000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT,
+ },
+ /* 10G */
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
+ },
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
@@ -75,22 +193,51 @@ static const struct phy_setting settings[] = {
{
.speed = SPEED_10000,
.duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ },
+ {
+ .speed = SPEED_10000,
+ .duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
},
+ /* 5G */
+ {
+ .speed = SPEED_5000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ },
+
+ /* 2.5G */
{
.speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
},
{
- .speed = SPEED_1000,
+ .speed = SPEED_2500,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
},
+ /* 1G */
{
.speed = SPEED_1000,
.duplex = DUPLEX_FULL,
- .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
},
{
.speed = SPEED_1000,
@@ -103,6 +250,12 @@ static const struct phy_setting settings[] = {
.bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
},
{
+ .speed = SPEED_1000,
+ .duplex = DUPLEX_FULL,
+ .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ },
+ /* 100M */
+ {
.speed = SPEED_100,
.duplex = DUPLEX_FULL,
.bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
@@ -112,6 +265,7 @@ static const struct phy_setting settings[] = {
.duplex = DUPLEX_HALF,
.bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
},
+ /* 10M */
{
.speed = SPEED_10,
.duplex = DUPLEX_FULL,
@@ -129,7 +283,6 @@ static const struct phy_setting settings[] = {
* @speed: speed to match
* @duplex: duplex to match
* @mask: allowed link modes
- * @maxbit: bit size of link modes
* @exact: an exact match is required
*
* Search the settings array for a setting that matches the speed and
@@ -143,14 +296,14 @@ static const struct phy_setting settings[] = {
* they all fail, %NULL will be returned.
*/
const struct phy_setting *
-phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
- size_t maxbit, bool exact)
+phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact)
{
const struct phy_setting *p, *match = NULL, *last = NULL;
int i;
for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) {
- if (p->bit < maxbit && test_bit(p->bit, mask)) {
+ if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
+ test_bit(p->bit, mask)) {
last = p;
if (p->speed == speed && p->duplex == duplex) {
/* Exact match for speed and duplex */
@@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask,
EXPORT_SYMBOL_GPL(phy_lookup_setting);
size_t phy_speeds(unsigned int *speeds, size_t size,
- unsigned long *mask, size_t maxbit)
+ unsigned long *mask)
{
size_t count;
int i;
for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++)
- if (settings[i].bit < maxbit &&
+ if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS &&
test_bit(settings[i].bit, mask) &&
(count == 0 || speeds[count - 1] != settings[i].speed))
speeds[count++] = settings[i].speed;
@@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
*/
void phy_resolve_aneg_linkmode(struct phy_device *phydev)
{
- u32 common = phydev->lp_advertising & phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
- if (common & ADVERTISED_10000baseT_Full) {
+ linkmode_and(common, phydev->lp_advertising, phydev->advertising);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) {
phydev->speed = SPEED_10000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_5000;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ common)) {
+ phydev->speed = SPEED_2500;
+ phydev->duplex = DUPLEX_FULL;
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_1000baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_1000;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_100baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_100baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_100;
phydev->duplex = DUPLEX_HALF;
- } else if (common & ADVERTISED_10baseT_Full) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_FULL;
- } else if (common & ADVERTISED_10baseT_Half) {
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ common)) {
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
}
if (phydev->duplex == DUPLEX_FULL) {
- phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause);
- phydev->asym_pause = !!(phydev->lp_advertising &
- ADVERTISED_Asym_Pause);
+ phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising);
+ phydev->asym_pause = linkmode_test_bit(
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising);
}
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1d73ac3309ce..376a0d8a2b61 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st)
{
switch (st) {
PHY_STATE_STR(DOWN)
- PHY_STATE_STR(STARTING)
PHY_STATE_STR(READY)
- PHY_STATE_STR(PENDING)
PHY_STATE_STR(UP)
- PHY_STATE_STR(AN)
PHY_STATE_STR(RUNNING)
PHY_STATE_STR(NOLINK)
PHY_STATE_STR(FORCING)
@@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st)
return NULL;
}
+static void phy_link_up(struct phy_device *phydev)
+{
+ phydev->phy_link_change(phydev, true, true);
+ phy_led_trigger_change_speed(phydev);
+}
+
+static void phy_link_down(struct phy_device *phydev, bool do_carrier)
+{
+ phydev->phy_link_change(phydev, false, do_carrier);
+ phy_led_trigger_change_speed(phydev);
+}
/**
* phy_print_status - Convenience function to print out the current phy status
@@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev)
*
* Returns 0 on success or < 0 on error.
*/
-static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+static int phy_config_interrupt(struct phy_device *phydev, bool interrupts)
{
- phydev->interrupts = interrupts;
+ phydev->interrupts = interrupts ? 1 : 0;
if (phydev->drv->config_intr)
return phydev->drv->config_intr(phydev);
@@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done);
* settings were found.
*/
static const struct phy_setting *
-phy_find_valid(int speed, int duplex, u32 supported)
+phy_find_valid(int speed, int duplex, unsigned long *supported)
{
- unsigned long mask = supported;
-
- return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false);
+ return phy_lookup_setting(speed, duplex, supported, false);
}
/**
@@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
unsigned int *speeds,
unsigned int size)
{
- unsigned long supported = phy->supported;
-
- return phy_speeds(speeds, size, &supported, BITS_PER_LONG);
+ return phy_speeds(speeds, size, phy->supported);
}
/**
@@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
*
* Description: Returns true if there is a valid setting, false otherwise.
*/
-static inline bool phy_check_valid(int speed, int duplex, u32 features)
+static inline bool phy_check_valid(int speed, int duplex,
+ unsigned long *features)
{
- unsigned long mask = features;
-
- return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true);
+ return !!phy_lookup_setting(speed, duplex, features, true);
}
/**
@@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features)
static void phy_sanitize_settings(struct phy_device *phydev)
{
const struct phy_setting *setting;
- u32 features = phydev->supported;
/* Sanitize settings based on PHY capabilities */
- if ((features & SUPPORTED_Autoneg) == 0)
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported))
phydev->autoneg = AUTONEG_DISABLE;
- setting = phy_find_valid(phydev->speed, phydev->duplex, features);
+ setting = phy_find_valid(phydev->speed, phydev->duplex,
+ phydev->supported);
if (setting) {
phydev->speed = setting->speed;
phydev->duplex = setting->duplex;
@@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev)
*/
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u32 speed = ethtool_cmd_speed(cmd);
if (cmd->phy_address != phydev->mdio.addr)
return -EINVAL;
/* We make sure that we don't pass unsupported values in to the PHY */
- cmd->advertising &= phydev->supported;
+ ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising);
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
@@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
phydev->speed = speed;
- phydev->advertising = cmd->advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (AUTONEG_ENABLE == cmd->autoneg)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = cmd->duplex;
@@ -304,25 +311,24 @@ EXPORT_SYMBOL(phy_ethtool_sset);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
u8 autoneg = cmd->base.autoneg;
u8 duplex = cmd->base.duplex;
u32 speed = cmd->base.speed;
- u32 advertising;
if (cmd->base.phy_address != phydev->mdio.addr)
return -EINVAL;
- ethtool_convert_link_mode_to_legacy_u32(&advertising,
- cmd->link_modes.advertising);
+ linkmode_copy(advertising, cmd->link_modes.advertising);
/* We make sure that we don't pass unsupported values in to the PHY */
- advertising &= phydev->supported;
+ linkmode_and(advertising, advertising, phydev->supported);
/* Verify the settings we care about. */
if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
return -EINVAL;
- if (autoneg == AUTONEG_ENABLE && advertising == 0)
+ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
return -EINVAL;
if (autoneg == AUTONEG_DISABLE &&
@@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
phydev->speed = speed;
- phydev->advertising = advertising;
+ linkmode_copy(phydev->advertising, advertising);
if (autoneg == AUTONEG_ENABLE)
- phydev->advertising |= ADVERTISED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
else
- phydev->advertising &= ~ADVERTISED_Autoneg;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
phydev->duplex = duplex;
@@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- phydev->supported);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- phydev->advertising);
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
- phydev->lp_advertising);
+ linkmode_copy(cmd->link_modes.supported, phydev->supported);
+ linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
+ linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
cmd->base.speed = phydev->speed;
cmd->base.duplex = phydev->duplex;
@@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
break;
case MII_ADVERTISE:
- phydev->advertising = mii_adv_to_ethtool_adv_t(val);
+ mii_adv_to_linkmode_adv_t(phydev->advertising,
+ val);
change_autoneg = true;
break;
default:
@@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_mii_ioctl);
+static void phy_queue_state_machine(struct phy_device *phydev,
+ unsigned int secs)
+{
+ mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+ secs * HZ);
+}
+
+static void phy_trigger_machine(struct phy_device *phydev)
+{
+ phy_queue_state_machine(phydev, 0);
+}
+
static int phy_config_aneg(struct phy_device *phydev)
{
if (phydev->drv->config_aneg)
@@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev)
}
/**
+ * phy_check_link_status - check link status and set state accordingly
+ * @phydev: the phy_device struct
+ *
+ * Description: Check for link and whether autoneg was triggered / is running
+ * and set state accordingly
+ */
+static int phy_check_link_status(struct phy_device *phydev)
+{
+ int err;
+
+ WARN_ON(!mutex_is_locked(&phydev->lock));
+
+ err = phy_read_status(phydev);
+ if (err)
+ return err;
+
+ if (phydev->link && phydev->state != PHY_RUNNING) {
+ phydev->state = PHY_RUNNING;
+ phy_link_up(phydev);
+ } else if (!phydev->link && phydev->state != PHY_NOLINK) {
+ phydev->state = PHY_NOLINK;
+ phy_link_down(phydev, true);
+ }
+
+ return 0;
+}
+
+/**
* phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
@@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev)
*/
int phy_start_aneg(struct phy_device *phydev)
{
- bool trigger = 0;
int err;
if (!phydev->drv)
@@ -504,7 +547,7 @@ int phy_start_aneg(struct phy_device *phydev)
phy_sanitize_settings(phydev);
/* Invalidate LP advertising flags */
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
err = phy_config_aneg(phydev);
if (err < 0)
@@ -512,32 +555,16 @@ int phy_start_aneg(struct phy_device *phydev)
if (phydev->state != PHY_HALTED) {
if (AUTONEG_ENABLE == phydev->autoneg) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
+ err = phy_check_link_status(phydev);
} else {
phydev->state = PHY_FORCING;
phydev->link_timeout = PHY_FORCE_TIMEOUT;
}
}
- /* Re-schedule a PHY state machine to check PHY status because
- * negotiation may already be done and aneg interrupt may not be
- * generated.
- */
- if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) {
- err = phy_aneg_done(phydev);
- if (err > 0) {
- trigger = true;
- err = 0;
- }
- }
-
out_unlock:
mutex_unlock(&phydev->lock);
- if (trigger)
- phy_trigger_machine(phydev);
-
return err;
}
EXPORT_SYMBOL(phy_start_aneg);
@@ -573,20 +600,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
*/
int phy_speed_down(struct phy_device *phydev, bool sync)
{
- u32 adv = phydev->lp_advertising & phydev->supported;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int ret;
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- if (adv & PHY_10BT_FEATURES)
- phydev->advertising &= ~(PHY_100BT_FEATURES |
- PHY_1000BT_FEATURES);
- else if (adv & PHY_100BT_FEATURES)
- phydev->advertising &= ~PHY_1000BT_FEATURES;
+ linkmode_copy(adv_old, phydev->advertising);
+ linkmode_copy(adv, phydev->lp_advertising);
+ linkmode_and(adv, adv, phydev->supported);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ adv) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ adv)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->advertising);
+ }
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
ret = phy_config_aneg(phydev);
@@ -605,28 +650,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down);
*/
int phy_speed_up(struct phy_device *phydev)
{
- u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES;
- u32 adv_old = phydev->advertising;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds);
+
+ linkmode_copy(adv_old, phydev->advertising);
if (phydev->autoneg != AUTONEG_ENABLE)
return 0;
- phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds);
+
+ linkmode_andnot(not_speeds, adv_old, all_speeds);
+ linkmode_copy(supported, phydev->supported);
+ linkmode_and(speeds, supported, all_speeds);
+ linkmode_or(phydev->advertising, not_speeds, speeds);
- if (phydev->advertising == adv_old)
+ if (linkmode_equal(phydev->advertising, adv_old))
return 0;
return phy_config_aneg(phydev);
}
EXPORT_SYMBOL_GPL(phy_speed_up);
-static void phy_queue_state_machine(struct phy_device *phydev,
- unsigned int secs)
-{
- mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
- secs * HZ);
-}
-
/**
* phy_start_machine - start PHY state machine tracking
* @phydev: the phy_device struct
@@ -644,20 +697,6 @@ void phy_start_machine(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_start_machine);
/**
- * phy_trigger_machine - trigger the state machine to run
- *
- * @phydev: the phy_device struct
- *
- * Description: There has been a change in state which requires that the
- * state machine runs.
- */
-
-void phy_trigger_machine(struct phy_device *phydev)
-{
- phy_queue_state_machine(phydev, 0);
-}
-
-/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
*
@@ -711,30 +750,26 @@ static int phy_disable_interrupts(struct phy_device *phydev)
}
/**
- * phy_change - Called by the phy_interrupt to handle PHY changes
- * @phydev: phy_device struct that interrupted
+ * phy_interrupt - PHY interrupt handler
+ * @irq: interrupt line
+ * @phy_dat: phy_device pointer
+ *
+ * Description: Handle PHY interrupt
*/
-static irqreturn_t phy_change(struct phy_device *phydev)
+static irqreturn_t phy_interrupt(int irq, void *phy_dat)
{
- if (phy_interrupt_is_valid(phydev)) {
- if (phydev->drv->did_interrupt &&
- !phydev->drv->did_interrupt(phydev))
- return IRQ_NONE;
-
- if (phydev->state == PHY_HALTED)
- if (phy_disable_interrupts(phydev))
- goto phy_err;
- }
+ struct phy_device *phydev = phy_dat;
- mutex_lock(&phydev->lock);
- if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
- phydev->state = PHY_CHANGELINK;
- mutex_unlock(&phydev->lock);
+ if (PHY_HALTED == phydev->state)
+ return IRQ_NONE; /* It can't be ours. */
+
+ if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev))
+ return IRQ_NONE;
/* reschedule state queue work to run as soon as possible */
phy_trigger_machine(phydev);
- if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
+ if (phy_clear_interrupt(phydev))
goto phy_err;
return IRQ_HANDLED;
@@ -744,36 +779,6 @@ phy_err:
}
/**
- * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
- * @work: work_struct that describes the work to be done
- */
-void phy_change_work(struct work_struct *work)
-{
- struct phy_device *phydev =
- container_of(work, struct phy_device, phy_queue);
-
- phy_change(phydev);
-}
-
-/**
- * phy_interrupt - PHY interrupt handler
- * @irq: interrupt line
- * @phy_dat: phy_device pointer
- *
- * Description: When a PHY interrupt occurs, the handler disables
- * interrupts, and uses phy_change to handle the interrupt.
- */
-static irqreturn_t phy_interrupt(int irq, void *phy_dat)
-{
- struct phy_device *phydev = phy_dat;
-
- if (PHY_HALTED == phydev->state)
- return IRQ_NONE; /* It can't be ours. */
-
- return phy_change(phydev);
-}
-
-/**
* phy_enable_interrupts - Enable the interrupts from the PHY side
* @phydev: target phy_device struct
*/
@@ -851,7 +856,7 @@ out_unlock:
phy_state_machine(&phydev->state_queue.work);
/* Cannot call flush_scheduled_work() here as desired because
- * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
+ * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler
* will not reenable interrupts.
*/
}
@@ -874,9 +879,6 @@ void phy_start(struct phy_device *phydev)
mutex_lock(&phydev->lock);
switch (phydev->state) {
- case PHY_STARTING:
- phydev->state = PHY_PENDING;
- break;
case PHY_READY:
phydev->state = PHY_UP;
break;
@@ -902,18 +904,6 @@ void phy_start(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_start);
-static void phy_link_up(struct phy_device *phydev)
-{
- phydev->phy_link_change(phydev, true, true);
- phy_led_trigger_change_speed(phydev);
-}
-
-static void phy_link_down(struct phy_device *phydev, bool do_carrier)
-{
- phydev->phy_link_change(phydev, false, do_carrier);
- phy_led_trigger_change_speed(phydev);
-}
-
/**
* phy_state_machine - Handle the state machine
* @work: work_struct that describes the work to be done
@@ -936,63 +926,17 @@ void phy_state_machine(struct work_struct *work)
switch (phydev->state) {
case PHY_DOWN:
- case PHY_STARTING:
case PHY_READY:
- case PHY_PENDING:
break;
case PHY_UP:
needs_aneg = true;
- phydev->link_timeout = PHY_AN_TIMEOUT;
-
- break;
- case PHY_AN:
- err = phy_read_status(phydev);
- if (err < 0)
- break;
-
- /* If the link is down, give up on negotiation for now */
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- break;
- }
-
- /* Check if negotiation is done. Break if there's an error */
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- /* If AN is done, we're running */
- if (err > 0) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else if (0 == phydev->link_timeout--)
- needs_aneg = true;
break;
case PHY_NOLINK:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0)
- break;
-
- if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- }
+ case PHY_RUNNING:
+ case PHY_CHANGELINK:
+ case PHY_RESUMING:
+ err = phy_check_link_status(phydev);
break;
case PHY_FORCING:
err = genphy_update_link(phydev);
@@ -1008,32 +952,6 @@ void phy_state_machine(struct work_struct *work)
phy_link_down(phydev, false);
}
break;
- case PHY_RUNNING:
- if (!phy_polling_mode(phydev))
- break;
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (!phydev->link) {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
- case PHY_CHANGELINK:
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, true);
- }
- break;
case PHY_HALTED:
if (phydev->link) {
phydev->link = 0;
@@ -1041,30 +959,6 @@ void phy_state_machine(struct work_struct *work)
do_suspend = true;
}
break;
- case PHY_RESUMING:
- if (AUTONEG_ENABLE == phydev->autoneg) {
- err = phy_aneg_done(phydev);
- if (err < 0) {
- break;
- } else if (!err) {
- phydev->state = PHY_AN;
- phydev->link_timeout = PHY_AN_TIMEOUT;
- break;
- }
- }
-
- err = phy_read_status(phydev);
- if (err)
- break;
-
- if (phydev->link) {
- phydev->state = PHY_RUNNING;
- phy_link_up(phydev);
- } else {
- phydev->state = PHY_NOLINK;
- phy_link_down(phydev, false);
- }
- break;
}
mutex_unlock(&phydev->lock);
@@ -1104,10 +998,34 @@ void phy_state_machine(struct work_struct *work)
void phy_mac_interrupt(struct phy_device *phydev)
{
/* Trigger a state machine change */
- queue_work(system_power_efficient_wq, &phydev->phy_queue);
+ phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_mac_interrupt);
+static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv)
+{
+ linkmode_zero(advertising);
+
+ if (eee_adv & MDIO_EEE_100TX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000T)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_1000KX)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKX4)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
+ advertising);
+ if (eee_adv & MDIO_EEE_10GKR)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ advertising);
+}
+
/**
* phy_init_eee - init and check the EEE feature
* @phydev: target phy_device struct
@@ -1126,9 +1044,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
/* According to 802.3az,the EEE is supported only in full duplex-mode.
*/
if (phydev->duplex == DUPLEX_FULL) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(common);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(lp);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(adv);
int eee_lp, eee_cap, eee_adv;
- u32 lp, cap, adv;
int status;
+ u32 cap;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
@@ -1155,9 +1076,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if (eee_adv <= 0)
goto eee_exit_err;
- adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
- lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
- if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+ mmd_eee_adv_to_linkmode(adv, eee_adv);
+ mmd_eee_adv_to_linkmode(lp, eee_lp);
+ linkmode_and(common, adv, lp);
+
+ if (!phy_check_valid(phydev->speed, phydev->duplex, common))
goto eee_exit_err;
if (clk_stop_enable) {
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index ab33d1777132..e06613f2d431 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_MII_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_ports_array);
static const int phy_fibre_port_array[] = {
ETHTOOL_LINK_MODE_FIBRE_BIT,
};
+EXPORT_SYMBOL_GPL(phy_fibre_port_array);
static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_Autoneg_BIT,
@@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = {
ETHTOOL_LINK_MODE_BNC_BIT,
ETHTOOL_LINK_MODE_Backplane_BIT,
};
+EXPORT_SYMBOL_GPL(phy_all_ports_features_array);
-static const int phy_10_100_features_array[] = {
+const int phy_10_100_features_array[4] = {
ETHTOOL_LINK_MODE_10baseT_Half_BIT,
ETHTOOL_LINK_MODE_10baseT_Full_BIT,
ETHTOOL_LINK_MODE_100baseT_Half_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10_100_features_array);
-static const int phy_basic_t1_features_array[] = {
+const int phy_basic_t1_features_array[2] = {
ETHTOOL_LINK_MODE_TP_BIT,
ETHTOOL_LINK_MODE_100baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_basic_t1_features_array);
-static const int phy_gbit_features_array[] = {
+const int phy_gbit_features_array[2] = {
ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_gbit_features_array);
-static const int phy_10gbit_features_array[] = {
+const int phy_10gbit_features_array[1] = {
ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
};
+EXPORT_SYMBOL_GPL(phy_10gbit_features_array);
__ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init;
EXPORT_SYMBOL_GPL(phy_10gbit_full_features);
@@ -587,7 +594,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
mutex_init(&dev->lock);
INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
- INIT_WORK(&dev->phy_queue, phy_change_work);
/* Request the appropriate module unconditionally; don't
* bother trying to do so only if it isn't already loaded,
@@ -1442,8 +1448,13 @@ static int genphy_config_advert(struct phy_device *phydev)
int err, changed = 0;
/* Only allow advertising what this PHY supports */
- phydev->advertising &= phydev->supported;
- advertise = phydev->advertising;
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertise,
+ phydev->advertising))
+ phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS,
+ phydev->advertising);
/* Setup standard advertisement */
adv = phy_read(phydev, MII_ADVERTISE);
@@ -1482,10 +1493,11 @@ static int genphy_config_advert(struct phy_device *phydev)
oldadv = adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (phydev->supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported))
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
- }
if (adv != oldadv)
changed = 1;
@@ -1690,11 +1702,13 @@ int genphy_read_status(struct phy_device *phydev)
if (err)
return err;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
if (AUTONEG_ENABLE == phydev->autoneg) {
- if (phydev->supported & (SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full)) {
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported)) {
lpagb = phy_read(phydev, MII_STAT1000);
if (lpagb < 0)
return lpagb;
@@ -1711,8 +1725,8 @@ int genphy_read_status(struct phy_device *phydev)
return -ENOLINK;
}
- phydev->lp_advertising =
- mii_stat1000_to_ethtool_lpa_t(lpagb);
+ mii_stat1000_to_linkmode_lpa_t(phydev->lp_advertising,
+ lpagb);
common_adv_gb = lpagb & adv << 2;
}
@@ -1720,7 +1734,7 @@ int genphy_read_status(struct phy_device *phydev)
if (lpa < 0)
return lpa;
- phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa);
adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
@@ -1801,11 +1815,13 @@ EXPORT_SYMBOL(genphy_soft_reset);
int genphy_config_init(struct phy_device *phydev)
{
int val;
- u32 features;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
- features = (SUPPORTED_TP | SUPPORTED_MII
- | SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
@@ -1813,16 +1829,16 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & BMSR_ANEGCAPABLE)
- features |= SUPPORTED_Autoneg;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
if (val & BMSR_100FULL)
- features |= SUPPORTED_100baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features);
if (val & BMSR_100HALF)
- features |= SUPPORTED_100baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features);
if (val & BMSR_10FULL)
- features |= SUPPORTED_10baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features);
if (val & BMSR_10HALF)
- features |= SUPPORTED_10baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features);
if (val & BMSR_ESTATEN) {
val = phy_read(phydev, MII_ESTATUS);
@@ -1830,13 +1846,15 @@ int genphy_config_init(struct phy_device *phydev)
return val;
if (val & ESTATUS_1000_TFULL)
- features |= SUPPORTED_1000baseT_Full;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ features);
if (val & ESTATUS_1000_THALF)
- features |= SUPPORTED_1000baseT_Half;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ features);
}
- phydev->supported &= features;
- phydev->advertising &= features;
+ linkmode_and(phydev->supported, phydev->supported, features);
+ linkmode_and(phydev->advertising, phydev->advertising, features);
return 0;
}
@@ -1880,20 +1898,37 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
- PHY_10BT_FEATURES);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds) = { 0, };
+
+ linkmode_set_bit_array(phy_10_100_features_array,
+ ARRAY_SIZE(phy_10_100_features_array),
+ speeds);
+ linkmode_set_bit_array(phy_gbit_features_array,
+ ARRAY_SIZE(phy_gbit_features_array),
+ speeds);
+
+ linkmode_andnot(phydev->supported, phydev->supported, speeds);
switch (max_speed) {
default:
return -ENOTSUPP;
case SPEED_1000:
- phydev->supported |= PHY_1000BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported);
/* fall through */
case SPEED_100:
- phydev->supported |= PHY_100BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ phydev->supported);
/* fall through */
case SPEED_10:
- phydev->supported |= PHY_10BT_FEATURES;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ phydev->supported);
}
return 0;
@@ -1907,7 +1942,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
if (err)
return err;
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
return 0;
}
@@ -1924,10 +1959,8 @@ EXPORT_SYMBOL(phy_set_max_speed);
*/
void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode)
{
- WARN_ON(link_mode > 31);
-
- phydev->supported &= ~BIT(link_mode);
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(link_mode, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_remove_link_mode);
@@ -1940,9 +1973,9 @@ EXPORT_SYMBOL(phy_remove_link_mode);
*/
void phy_support_sym_pause(struct phy_device *phydev)
{
- phydev->supported &= ~SUPPORTED_Asym_Pause;
- phydev->supported |= SUPPORTED_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_sym_pause);
@@ -1954,8 +1987,9 @@ EXPORT_SYMBOL(phy_support_sym_pause);
*/
void phy_support_asym_pause(struct phy_device *phydev)
{
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->advertising = phydev->supported;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported);
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_support_asym_pause);
@@ -1973,12 +2007,13 @@ EXPORT_SYMBOL(phy_support_asym_pause);
void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
bool autoneg)
{
- phydev->supported &= ~SUPPORTED_Pause;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported);
if (rx && tx && autoneg)
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
}
EXPORT_SYMBOL(phy_set_sym_pause);
@@ -1995,20 +2030,29 @@ EXPORT_SYMBOL(phy_set_sym_pause);
*/
void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx)
{
- u16 oldadv = phydev->advertising;
- u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv);
- if (rx)
- newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- if (tx)
- newadv ^= SUPPORTED_Asym_Pause;
+ linkmode_copy(oldadv, phydev->advertising);
- if (oldadv != newadv) {
- phydev->advertising = newadv;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
- if (phydev->autoneg)
- phy_start_aneg(phydev);
+ if (rx) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
}
+
+ if (tx)
+ linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ if (!linkmode_equal(oldadv, phydev->advertising) &&
+ phydev->autoneg)
+ phy_start_aneg(phydev);
}
EXPORT_SYMBOL(phy_set_asym_pause);
@@ -2024,8 +2068,10 @@ EXPORT_SYMBOL(phy_set_asym_pause);
bool phy_validate_pause(struct phy_device *phydev,
struct ethtool_pauseparam *pp)
{
- if (!(phydev->supported & SUPPORTED_Pause) ||
- (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported) ||
+ (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported) &&
pp->rx_pause != pp->tx_pause))
return false;
return true;
@@ -2074,6 +2120,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+static bool phy_drv_supports_irq(struct phy_driver *phydrv)
+{
+ return phydrv->config_intr && phydrv->ack_interrupt;
+}
+
/**
* phy_probe - probe and init a PHY device
* @dev: device to probe and init
@@ -2095,8 +2146,7 @@ static int phy_probe(struct device *dev)
/* Disable the interrupt if the PHY doesn't support it
* but the interrupt is still a valid one
*/
- if (!(phydrv->flags & PHY_HAS_INTERRUPT) &&
- phy_interrupt_is_valid(phydev))
+ if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev))
phydev->irq = PHY_POLL;
if (phydrv->flags & PHY_IS_INTERNAL)
@@ -2109,9 +2159,9 @@ static int phy_probe(struct device *dev)
* or both of these values
*/
ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features);
- phydev->supported = features;
+ linkmode_copy(phydev->supported, phydrv->features);
of_set_phy_supported(phydev);
- phydev->advertising = phydev->supported;
+ linkmode_copy(phydev->advertising, phydev->supported);
/* Get the EEE modes we want to prohibit. We will ask
* the PHY stop advertising these mode later on
@@ -2131,14 +2181,22 @@ static int phy_probe(struct device *dev)
*/
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) ||
test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) {
- phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features))
- phydev->supported |= SUPPORTED_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydrv->features))
- phydev->supported |= SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
} else {
- phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->supported);
}
/* Set the state to READY by default */
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1bf5c4..263385b75bba 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed);
static void phy_led_trigger_format_name(struct phy_device *phy, char *buf,
- size_t size, char *suffix)
+ size_t size, const char *suffix)
{
snprintf(buf, size, PHY_ID_FMT ":%s",
phy->mdio.bus->id, phy->mdio.addr, suffix);
@@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy,
struct phy_led_trigger *plt,
unsigned int speed)
{
- char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE];
-
plt->speed = speed;
-
- if (speed < SPEED_1000)
- snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed);
- else if (speed == SPEED_2500)
- snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps");
- else
- snprintf(name_suffix, sizeof(name_suffix), "%dGbps",
- DIV_ROUND_CLOSEST(speed, 1000));
-
phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name),
- name_suffix);
+ phy_speed_to_str(speed));
plt->trigger.name = plt->name;
return led_trigger_register(&plt->trigger);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9b8dd0d0ee42..e7becc7379d7 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl,
phylink_validate(pl, pl->supported, &pl->link_config);
s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, true);
+ pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
if (s) {
@@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
{
struct phylink_link_state config;
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
- u32 advertising;
int ret;
memset(&config, 0, sizeof(config));
- ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported);
- ethtool_convert_legacy_u32_to_link_mode(config.advertising,
- phy->advertising);
+ linkmode_copy(supported, phy->supported);
+ linkmode_copy(config.advertising, phy->advertising);
config.interface = pl->link_config.interface;
/*
@@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
linkmode_copy(pl->link_config.advertising, config.advertising);
/* Restrict the phy advertisement according to the MAC support. */
- ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising);
- phy->advertising = advertising;
+ linkmode_copy(phy->advertising, config.advertising);
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
netdev_dbg(pl->netdev,
- "phy: setting supported %*pb advertising 0x%08x\n",
+ "phy: setting supported %*pb advertising %*pb\n",
__ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported,
- phy->advertising);
+ __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising);
phy_start_machine(phy);
if (phy->irq > 0)
@@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS, false);
+ pl->supported, false);
if (!s)
return -EINVAL;
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index 889a4dce1648..cfe2313dbefd 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { {
.name = "QS6612",
.phy_id_mask = 0xfffffff0,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = qs6612_config_init,
.ack_interrupt = qs6612_ack_interrupt,
.config_intr = qs6612_config_intr,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 271e8adc39f1..c6010fb1aa0f 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
static struct phy_driver realtek_drvs[] = {
{
- .phy_id = 0x00008201,
+ PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .phy_id_mask = 0x0000ffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
}, {
- .phy_id = 0x001cc816,
+ PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc910,
+ PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc912,
+ PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
@@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = {
.suspend = rtl8211b_suspend,
.resume = rtl8211b_resume,
}, {
- .phy_id = 0x001cc913,
+ PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
- .phy_id = 0x001cc914,
+ PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc915,
+ PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
- .phy_id = 0x001cc916,
+ PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- .phy_id = 0x001cc961,
+ PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .phy_id_mask = 0x001fffff,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &rtl8366rb_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = {
module_phy_driver(realtek_drvs);
-static struct mdio_device_id __maybe_unused realtek_tbl[] = {
- { 0x001cc816, 0x001fffff },
- { 0x001cc910, 0x001fffff },
- { 0x001cc912, 0x001fffff },
- { 0x001cc913, 0x001fffff },
- { 0x001cc914, 0x001fffff },
- { 0x001cc915, 0x001fffff },
- { 0x001cc916, 0x001fffff },
- { 0x001cc961, 0x001fffff },
+static const struct mdio_device_id __maybe_unused realtek_tbl[] = {
+ { PHY_ID_MATCH_VENDOR(0x001cc800) },
{ }
};
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index c328208388da..f9477ff55545 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN83C185",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8187",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8700",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN911x Internal PHY",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
@@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8710/LAN8720",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN,
+ .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
@@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = {
.name = "SMSC LAN8740",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.probe = smsc_phy_probe,
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 2fe9a87b55b5..33d733684f5b 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xfffffff0,
.name = "STe101p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
@@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = {
.phy_id_mask = 0xffffffff,
.name = "STe100p",
.features = PHY_BASIC_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = ste10Xp_config_init,
.ack_interrupt = ste10Xp_ack_interrupt,
.config_intr = ste10Xp_config_intr,
diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c
index 55f48ee3595a..1e4fc42e4629 100644
--- a/drivers/net/phy/uPD60620.c
+++ b/drivers/net/phy/uPD60620.c
@@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev)
return phy_state;
phydev->link = 0;
- phydev->lp_advertising = 0;
+ linkmode_zero(phydev->lp_advertising);
phydev->pause = 0;
phydev->asym_pause = 0;
@@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev)
if (phy_state < 0)
return phy_state;
- phydev->lp_advertising
- = mii_lpa_to_ethtool_lpa_t(phy_state);
+ mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising,
+ phy_state);
if (phydev->duplex == DUPLEX_FULL) {
if (phy_state & LPA_PAUSE_CAP)
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index fbf9ad429593..4ca513feba0e 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -399,7 +399,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8234",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -409,7 +408,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -419,7 +417,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8514",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -429,7 +426,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8572",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -439,7 +435,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8574",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -449,7 +444,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8601",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8601_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -494,7 +488,6 @@ static struct phy_driver vsc82xx_driver[] = {
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc824x_config_init,
.config_aneg = &vsc82x4_config_aneg,
.ack_interrupt = &vsc824x_ack_interrupt,
@@ -505,7 +498,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8221",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
@@ -515,7 +507,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
.name = "Vitesse VSC8211",
.features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
.config_init = &vsc8221_config_init,
.ack_interrupt = &vsc824x_ack_interrupt,
.config_intr = &vsc82xx_config_intr,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e244f5d7512a..56575f88d1fd 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -188,6 +188,11 @@ struct tun_file {
struct xdp_rxq_info xdp_rxq;
};
+struct tun_page {
+ struct page *page;
+ int count;
+};
+
struct tun_flow_entry {
struct hlist_node hash_link;
struct rcu_head rcu;
@@ -1473,23 +1478,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
- struct page_frag *pfrag = &current->task_frag;
size_t fragsz = it->iov[i].iov_len;
+ struct page *page;
+ void *frag;
if (fragsz == 0 || fragsz > PAGE_SIZE) {
err = -EINVAL;
goto free;
}
-
- if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
+ frag = netdev_alloc_frag(fragsz);
+ if (!frag) {
err = -ENOMEM;
goto free;
}
-
- skb_fill_page_desc(skb, i - 1, pfrag->page,
- pfrag->offset, fragsz);
- page_ref_inc(pfrag->page);
- pfrag->offset += fragsz;
+ page = virt_to_head_page(frag);
+ skb_fill_page_desc(skb, i - 1, page,
+ frag - page_address(page), fragsz);
}
return skb;
@@ -2381,9 +2385,16 @@ static void tun_sock_write_space(struct sock *sk)
kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
}
+static void tun_put_page(struct tun_page *tpage)
+{
+ if (tpage->page)
+ __page_frag_cache_drain(tpage->page, tpage->count);
+}
+
static int tun_xdp_one(struct tun_struct *tun,
struct tun_file *tfile,
- struct xdp_buff *xdp, int *flush)
+ struct xdp_buff *xdp, int *flush,
+ struct tun_page *tpage)
{
struct tun_xdp_hdr *hdr = xdp->data_hard_start;
struct virtio_net_hdr *gso = &hdr->gso;
@@ -2394,6 +2405,7 @@ static int tun_xdp_one(struct tun_struct *tun,
int buflen = hdr->buflen;
int err = 0;
bool skb_xdp = false;
+ struct page *page;
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
@@ -2420,7 +2432,14 @@ static int tun_xdp_one(struct tun_struct *tun,
case XDP_PASS:
break;
default:
- put_page(virt_to_head_page(xdp->data));
+ page = virt_to_head_page(xdp->data);
+ if (tpage->page == page) {
+ ++tpage->count;
+ } else {
+ tun_put_page(tpage);
+ tpage->page = page;
+ tpage->count = 1;
+ }
return 0;
}
}
@@ -2452,7 +2471,8 @@ build:
goto out;
}
- if (!rcu_dereference(tun->steering_prog))
+ if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
+ !tfile->detached)
rxhash = __skb_get_hash_symmetric(skb);
skb_record_rx_queue(skb, tfile->queue_index);
@@ -2484,15 +2504,18 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -EBADFD;
if (ctl && (ctl->type == TUN_MSG_PTR)) {
+ struct tun_page tpage;
int n = ctl->num;
int flush = 0;
+ memset(&tpage, 0, sizeof(tpage));
+
local_bh_disable();
rcu_read_lock();
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
- tun_xdp_one(tun, tfile, xdp, &flush);
+ tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
}
if (flush)
@@ -2501,6 +2524,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
rcu_read_unlock();
local_bh_enable();
+ tun_put_page(&tpage);
+
ret = total_len;
goto out;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index be1917be28f2..3c8bdac78866 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -13,6 +13,7 @@
#include <linux/slab.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/linkmode.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net,
dev->fc_request_control |= FLOW_CTRL_TX;
if (ecmd.base.autoneg) {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
u32 mii_adv;
- u32 advertising;
- ethtool_convert_link_mode_to_legacy_u32(
- &advertising, ecmd.link_modes.advertising);
-
- advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ecmd.link_modes.advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ecmd.link_modes.advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
-
- ethtool_convert_legacy_u32_to_link_mode(
- ecmd.link_modes.advertising, advertising);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(ecmd.link_modes.advertising, fc,
+ ecmd.link_modes.advertising);
phy_ethtool_ksettings_set(phydev, &ecmd);
}
@@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev)
static int lan78xx_phy_init(struct lan78xx_net *dev)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
int ret;
u32 mii_adv;
struct phy_device *phydev;
@@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
/* support both flow controls */
dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
- phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
- phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
+ mii_adv_to_linkmode_adv_t(fc, mii_adv);
+ linkmode_or(phydev->advertising, fc, phydev->advertising);
if (phydev->mdio.dev.of_node) {
u32 reg;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb6f958..e3d08626828e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -618,9 +618,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
return;
}
- memcpy(&intdata, urb->transfer_buffer, 4);
- le32_to_cpus(&intdata);
-
+ intdata = get_unaligned_le32(urb->transfer_buffer);
netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
@@ -1295,6 +1293,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
smsc95xx_init_mac_address(dev);
@@ -1933,8 +1932,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
unsigned char *packet;
u16 size;
- memcpy(&header, skb->data, sizeof(header));
- le32_to_cpus(&header);
+ header = get_unaligned_le32(skb->data);
skb_pull(skb, 4 + NET_IP_ALIGN);
packet = skb->data;
@@ -2011,12 +2009,30 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
return (high_16 << 16) | low_16;
}
+/* The TX CSUM won't work if the checksum lies in the last 4 bytes of the
+ * transmission. This is fairly unlikely, only seems to trigger with some
+ * short TCP ACK packets sent.
+ *
+ * Note, this calculation should probably check for the alignment of the
+ * data as well, but a straight check for csum being in the last four bytes
+ * of the packet should be ok for now.
+ */
+static bool smsc95xx_can_tx_checksum(struct sk_buff *skb)
+{
+ unsigned int len = skb->len - skb_checksum_start_offset(skb);
+
+ if (skb->len <= 45)
+ return false;
+ return skb->csum_offset < (len - (4 + 1));
+}
+
static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
u32 tx_cmd_a, tx_cmd_b;
+ void *ptr;
/* We do not advertise SG, so skbs should be already linearized */
BUG_ON(skb_shinfo(skb)->nr_frags);
@@ -2030,8 +2046,11 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
return NULL;
}
+ tx_cmd_b = (u32)skb->len;
+ tx_cmd_a = tx_cmd_b | TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_;
+
if (csum) {
- if (skb->len <= 45) {
+ if (!smsc95xx_can_tx_checksum(skb)) {
/* workaround - hardware tx checksum does not work
* properly with extremely small packets */
long csstart = skb_checksum_start_offset(skb);
@@ -2043,24 +2062,18 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
csum = false;
} else {
u32 csum_preamble = smsc95xx_calc_csum_preamble(skb);
- skb_push(skb, 4);
- cpu_to_le32s(&csum_preamble);
- memcpy(skb->data, &csum_preamble, 4);
+ ptr = skb_push(skb, 4);
+ put_unaligned_le32(csum_preamble, ptr);
+
+ tx_cmd_a += 4;
+ tx_cmd_b += 4;
+ tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
}
}
- skb_push(skb, 4);
- tx_cmd_b = (u32)(skb->len - 4);
- if (csum)
- tx_cmd_b |= TX_CMD_B_CSUM_ENABLE;
- cpu_to_le32s(&tx_cmd_b);
- memcpy(skb->data, &tx_cmd_b, 4);
-
- skb_push(skb, 4);
- tx_cmd_a = (u32)(skb->len - 8) | TX_CMD_A_FIRST_SEG_ |
- TX_CMD_A_LAST_SEG_;
- cpu_to_le32s(&tx_cmd_a);
- memcpy(skb->data, &tx_cmd_a, 4);
+ ptr = skb_push(skb, 8);
+ put_unaligned_le32(tx_cmd_a, ptr);
+ put_unaligned_le32(tx_cmd_b, ptr+4);
return skb;
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 890fa5b905e2..f412ea1cef18 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return PTR_ERR(net);
peer = rtnl_create_link(net, ifname, name_assign_type,
- &veth_link_ops, tbp);
+ &veth_link_ops, tbp, extack);
if (IS_ERR(peer)) {
put_net(net);
return PTR_ERR(peer);
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 69b7227c637e..21ad4b1d7f03 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -981,24 +981,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
struct sk_buff *skb)
{
int orig_iif = skb->skb_iif;
- bool need_strict;
+ bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
+ bool is_ndisc = ipv6_ndisc_frame(skb);
- /* loopback traffic; do not push through packet taps again.
- * Reset pkt_type for upper layers to process skb
+ /* loopback, multicast & non-ND link-local traffic; do not push through
+ * packet taps again. Reset pkt_type for upper layers to process skb
*/
- if (skb->pkt_type == PACKET_LOOPBACK) {
+ if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
- skb->pkt_type = PACKET_HOST;
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->pkt_type = PACKET_HOST;
goto out;
}
- /* if packet is NDISC or addressed to multicast or link-local
- * then keep the ingress interface
- */
- need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
- if (!ipv6_ndisc_frame(skb) && !need_strict) {
+ /* if packet is NDISC then keep the ingress interface */
+ if (!is_ndisc) {
vrf_rx_stats(vrf_dev, skb->len);
skb->dev = vrf_dev;
skb->skb_iif = vrf_dev->ifindex;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 297cdeaef479..c3e65e78f015 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1552,6 +1552,34 @@ drop:
return 0;
}
+/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */
+static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan;
+ struct vxlan_sock *vs;
+ struct vxlanhdr *hdr;
+ __be32 vni;
+
+ if (skb->len < VXLAN_HLEN)
+ return -EINVAL;
+
+ hdr = vxlan_hdr(skb);
+
+ if (!(hdr->vx_flags & VXLAN_HF_VNI))
+ return -EINVAL;
+
+ vs = rcu_dereference_sk_user_data(sk);
+ if (!vs)
+ return -ENOENT;
+
+ vni = vxlan_vni(hdr->vx_vni);
+ vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni);
+ if (!vxlan)
+ return -ENOENT;
+
+ return 0;
+}
+
static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
@@ -2250,13 +2278,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- /* Bypass encapsulation if the destination is local */
if (!info) {
+ /* Bypass encapsulation if the destination is local */
err = encap_bypass_if_local(skb, dev, vxlan, dst,
dst_port, ifindex, vni,
&rt->dst, rt->rt_flags);
if (err)
goto out_unlock;
+
+ if (vxlan->cfg.df == VXLAN_DF_SET) {
+ df = htons(IP_DF);
+ } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) {
+ struct ethhdr *eth = eth_hdr(skb);
+
+ if (ntohs(eth->h_proto) == ETH_P_IPV6 ||
+ (ntohs(eth->h_proto) == ETH_P_IP &&
+ old_iph->frag_off & htons(IP_DF)))
+ df = htons(IP_DF);
+ }
} else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
df = htons(IP_DF);
}
@@ -2809,6 +2848,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
[IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG },
+ [IFLA_VXLAN_DF] = { .type = NLA_U8 },
};
static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -2865,6 +2905,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[],
}
}
+ if (data[IFLA_VXLAN_DF]) {
+ enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
+ if (df < 0 || df > VXLAN_DF_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF],
+ "Invalid DF attribute");
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -2948,6 +2998,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
+ tunnel_cfg.encap_err_lookup = vxlan_err_lookup;
tunnel_cfg.encap_destroy = NULL;
tunnel_cfg.gro_receive = vxlan_gro_receive;
tunnel_cfg.gro_complete = vxlan_gro_complete;
@@ -3509,6 +3560,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
conf->mtu = nla_get_u32(tb[IFLA_MTU]);
}
+ if (data[IFLA_VXLAN_DF])
+ conf->df = nla_get_u8(data[IFLA_VXLAN_DF]);
+
return 0;
}
@@ -3601,6 +3655,7 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */
nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
@@ -3667,6 +3722,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT,
!!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) ||
+ nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) ||
nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) ||
nla_put_u8(skb, IFLA_VXLAN_LEARNING,
!!(vxlan->cfg.flags & VXLAN_F_LEARN)) ||
@@ -3749,7 +3805,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
memset(&tb, 0, sizeof(tb));
dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
+ &vxlan_link_ops, tb, NULL);
if (IS_ERR(dev))
return dev;
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 4d6409605207..7a42336c8af8 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return -ENOMEM;
}
+ netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
/* Start from the next BD that should be filled */
@@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
+ unsigned int bytes_sent = 0;
+ int howmany = 0;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
int tx_restart = 0;
@@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
+ howmany++;
+ bytes_sent += skb->len;
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
@@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
if (tx_restart)
hdlc_tx_restart(priv);
+ netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
@@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev)
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
napi_enable(&priv->napi);
+ netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
}
@@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev)
free_irq(priv->ut_info->uf_info.irq, priv);
netif_stop_queue(dev);
+ netdev_reset_queue(dev);
priv->hdlc_busy = 0;
return 0;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 3e37c8cf82c6..b2ad2122c8c4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev,
return err;
}
+static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr,
+ struct mmc_command *mc, int sg_cnt, int req_sz,
+ int func_blk_sz, u32 *addr,
+ struct brcmf_sdio_dev *sdiodev,
+ struct sdio_func *func, int write)
+{
+ int ret;
+
+ md->sg_len = sg_cnt;
+ md->blocks = req_sz / func_blk_sz;
+ mc->arg |= (*addr & 0x1FFFF) << 9; /* address */
+ mc->arg |= md->blocks & 0x1FF; /* block count */
+ /* incrementing addr for function 1 */
+ if (func->num == 1)
+ *addr += req_sz;
+
+ mmc_set_data_timeout(md, func->card);
+ mmc_wait_for_req(func->card->host, mr);
+
+ ret = mc->error ? mc->error : md->error;
+ if (ret == -ENOMEDIUM) {
+ brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
+ } else if (ret != 0) {
+ brcmf_err("CMD53 sg block %s failed %d\n",
+ write ? "write" : "read", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
/**
* brcmf_sdiod_sglist_rw - SDIO interface function for block data access
* @sdiodev: brcmfmac sdio device
@@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
struct sk_buff_head *pktlist)
{
unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset;
- unsigned int max_req_sz, orig_offset, dst_offset;
- unsigned short max_seg_cnt, seg_sz;
+ unsigned int max_req_sz, src_offset, dst_offset;
unsigned char *pkt_data, *orig_data, *dst_data;
- struct sk_buff *pkt_next = NULL, *local_pkt_next;
struct sk_buff_head local_list, *target_list;
+ struct sk_buff *pkt_next = NULL, *src;
+ unsigned short max_seg_cnt;
struct mmc_request mmc_req;
struct mmc_command mmc_cmd;
struct mmc_data mmc_dat;
@@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
max_req_sz = sdiodev->max_request_size;
max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count,
target_list->qlen);
- seg_sz = target_list->qlen;
- pkt_offset = 0;
- pkt_next = target_list->next;
memset(&mmc_req, 0, sizeof(struct mmc_request));
memset(&mmc_cmd, 0, sizeof(struct mmc_command));
@@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
mmc_req.cmd = &mmc_cmd;
mmc_req.data = &mmc_dat;
- while (seg_sz) {
- req_sz = 0;
- sg_cnt = 0;
- sgl = sdiodev->sgtable.sgl;
- /* prep sg table */
- while (pkt_next != (struct sk_buff *)target_list) {
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
+ skb_queue_walk(target_list, pkt_next) {
+ pkt_offset = 0;
+ while (pkt_offset < pkt_next->len) {
pkt_data = pkt_next->data + pkt_offset;
sg_data_sz = pkt_next->len - pkt_offset;
if (sg_data_sz > sdiodev->max_segment_size)
@@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev,
sg_data_sz = max_req_sz - req_sz;
sg_set_buf(sgl, pkt_data, sg_data_sz);
-
sg_cnt++;
+
sgl = sg_next(sgl);
req_sz += sg_data_sz;
pkt_offset += sg_data_sz;
- if (pkt_offset == pkt_next->len) {
- pkt_offset = 0;
- pkt_next = pkt_next->next;
+ if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) {
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+ if (ret)
+ goto exit_queue_walk;
+ req_sz = 0;
+ sg_cnt = 0;
+ sgl = sdiodev->sgtable.sgl;
}
-
- if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt)
- break;
- }
- seg_sz -= sg_cnt;
-
- if (req_sz % func_blk_sz != 0) {
- brcmf_err("sg request length %u is not %u aligned\n",
- req_sz, func_blk_sz);
- ret = -ENOTBLK;
- goto exit;
- }
-
- mmc_dat.sg_len = sg_cnt;
- mmc_dat.blocks = req_sz / func_blk_sz;
- mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */
- mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */
- /* incrementing addr for function 1 */
- if (func->num == 1)
- addr += req_sz;
-
- mmc_set_data_timeout(&mmc_dat, func->card);
- mmc_wait_for_req(func->card->host, &mmc_req);
-
- ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error;
- if (ret == -ENOMEDIUM) {
- brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM);
- break;
- } else if (ret != 0) {
- brcmf_err("CMD53 sg block %s failed %d\n",
- write ? "write" : "read", ret);
- ret = -EIO;
- break;
}
}
-
+ if (sg_cnt)
+ ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd,
+ sg_cnt, req_sz, func_blk_sz,
+ &addr, sdiodev, func, write);
+exit_queue_walk:
if (!write && sdiodev->settings->bus.sdio.broken_sg_support) {
- local_pkt_next = local_list.next;
- orig_offset = 0;
+ src = __skb_peek(&local_list);
+ src_offset = 0;
skb_queue_walk(pktlist, pkt_next) {
dst_offset = 0;
- do {
- req_sz = local_pkt_next->len - orig_offset;
- req_sz = min_t(uint, pkt_next->len - dst_offset,
- req_sz);
- orig_data = local_pkt_next->data + orig_offset;
+
+ /* This is safe because we must have enough SKB data
+ * in the local list to cover everything in pktlist.
+ */
+ while (1) {
+ req_sz = pkt_next->len - dst_offset;
+ if (req_sz > src->len - src_offset)
+ req_sz = src->len - src_offset;
+
+ orig_data = src->data + src_offset;
dst_data = pkt_next->data + dst_offset;
memcpy(dst_data, orig_data, req_sz);
- orig_offset += req_sz;
- dst_offset += req_sz;
- if (orig_offset == local_pkt_next->len) {
- orig_offset = 0;
- local_pkt_next = local_pkt_next->next;
+
+ src_offset += req_sz;
+ if (src_offset == src->len) {
+ src_offset = 0;
+ src = skb_peek_next(src, &local_list);
}
+ dst_offset += req_sz;
if (dst_offset == pkt_next->len)
break;
- } while (!skb_queue_empty(&local_list));
+ }
}
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index f17f602e6171..a8303afa15f1 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
return;
}
- wmb(); /* barrier so backend seens requests */
-
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
if (notify)
notify_remote_via_irq(queue->rx_irq);