aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c7
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c187
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h13
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c8
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c56
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c25
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c6
-rw-r--r--drivers/net/ethernet/cavium/Kconfig1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c40
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c25
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c95
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h18
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h59
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c450
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c96
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c339
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c696
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c612
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c7
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h68
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h17
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c839
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c449
-rw-r--r--drivers/net/ethernet/intel/igc/igc_regs.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h10
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c212
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h92
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c253
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c60
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c29
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c541
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c203
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c26
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c131
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c29
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c62
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h8
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c6
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h3
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c24
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c9
-rw-r--r--drivers/net/ethernet/rdc/r6040.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c4
-rw-r--r--drivers/net/ethernet/sfc/tx.c12
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c3
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c3
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c22
-rw-r--r--drivers/net/geneve.c3
-rw-r--r--drivers/net/gtp.c4
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c10
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c7
-rw-r--r--drivers/net/macsec.c33
-rw-r--r--drivers/net/macvlan.c48
-rw-r--r--drivers/net/net_failover.c8
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/aquantia_main.c512
-rw-r--r--drivers/net/phy/at803x.c26
-rw-r--r--drivers/net/phy/bcm-cygnus.c147
-rw-r--r--drivers/net/phy/bcm-phy-lib.c52
-rw-r--r--drivers/net/phy/bcm-phy-lib.h20
-rw-r--r--drivers/net/phy/bcm7xxx.c76
-rw-r--r--drivers/net/phy/marvell.c108
-rw-r--r--drivers/net/phy/marvell10g.c14
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c7
-rw-r--r--drivers/net/phy/phy-c45.c20
-rw-r--r--drivers/net/phy/phy.c8
-rw-r--r--drivers/net/phy/phy_device.c99
-rw-r--r--drivers/net/phy/realtek.c26
-rw-r--r--drivers/net/phy/rockchip.c31
-rw-r--r--drivers/net/team/team.c14
-rw-r--r--drivers/net/tun.c20
-rw-r--r--drivers/net/usb/cdc_mbim.c1
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c3
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netfront.c3
201 files changed, 6553 insertions, 2374 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7a96d168efc4..bc42f131f47c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -505,6 +505,7 @@ source "drivers/net/hyperv/Kconfig"
config NETDEVSIM
tristate "Simulated networking device"
depends on DEBUG_FS
+ select NET_DEVLINK
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b59708c35faf..8ddbada9e281 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4114,8 +4114,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index c8e3f05e1d72..4ccb3239f5f7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1188,10 +1188,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (ret)
goto out_mdio;
- pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n",
- priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
- priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
- priv->core, priv->irq0, priv->irq1);
+ dev_info(&pdev->dev,
+ "Starfighter 2 top: %x.%02x, core: %x.%02x, IRQs: %d, %d\n",
+ priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff,
+ priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff,
+ priv->irq0, priv->irq1);
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index f16e1d7d8615..c026d15721f6 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -1144,6 +1144,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
interface = PHY_INTERFACE_MODE_GMII;
if (gbit)
break;
+ /* fall through */
case 0:
interface = PHY_INTERFACE_MODE_MII;
break;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f4e2db44ad91..65da6709a173 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -4631,14 +4631,6 @@ static int mv88e6xxx_smi_init(struct mv88e6xxx_chip *chip,
return 0;
}
-static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
-{
- int i;
-
- for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
- chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
-}
-
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4675,8 +4667,6 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
if (err)
goto free;
- mv88e6xxx_ports_cmode_init(chip);
-
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
mutex_unlock(&chip->reg_lock);
@@ -4915,7 +4905,6 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
if (err)
goto out;
- mv88e6xxx_ports_cmode_init(chip);
mv88e6xxx_phy_init(chip);
if (chip->info->ops->get_eeprom) {
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index c7bed263a0f4..39c85e98fb92 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,7 +52,6 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
-#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a6eacf2099c3..7e40d14682f7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2236,7 +2236,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- if (netif_xmit_stopped(txq) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more()) {
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
@@ -2258,8 +2258,7 @@ error_drop_packet:
}
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
u16 qid;
/* we suspect that this is good for in--kernel network services that
@@ -2269,7 +2268,7 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
if (skb_rx_queue_recorded(skb))
qid = skb_get_rx_queue(skb);
else
- qid = fallback(dev, skb, NULL);
+ qid = netdev_pick_tx(dev, skb, NULL);
return qid;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 4666084eda16..d5fd49dd25f3 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1887,7 +1887,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xgbe_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index 7d623e90dc19..12472c5bb34d 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -17,7 +17,8 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
- depends on PCI && X86_64
+ depends on PCI
+ depends on X86_64 || ARM64 || COMPILE_TEST
---help---
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 3944ce7f0870..8f35c3f883f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -16,7 +16,7 @@
#define AQ_CFG_TCS_DEF 1U
#define AQ_CFG_TXDS_DEF 4096U
-#define AQ_CFG_RXDS_DEF 1024U
+#define AQ_CFG_RXDS_DEF 2048U
#define AQ_CFG_IS_POLLING_DEF 0U
@@ -34,10 +34,16 @@
#define AQ_CFG_TCS_MAX 8U
#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
-#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (2U * 1024U)
#define AQ_CFG_TX_CLEAN_BUDGET 256U
+#define AQ_CFG_RX_REFILL_THRES 32U
+
+#define AQ_CFG_RX_HDR_SIZE 256U
+
+#define AQ_CFG_RX_PAGEORDER 0U
+
/* LRO */
#define AQ_CFG_IS_LRO_DEF 1U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ff83667410bd..059df86e8e37 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -73,6 +73,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->tx_itr = aq_itr_tx;
cfg->rx_itr = aq_itr_rx;
+ cfg->rxpageorder = AQ_CFG_RX_PAGEORDER;
cfg->is_rss = AQ_CFG_IS_RSS_DEF;
cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 8e34c1e49bf2..b1372430f62f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -31,6 +31,7 @@ struct aq_nic_cfg_s {
u32 itr;
u16 rx_itr;
u16 tx_itr;
+ u32 rxpageorder;
u32 num_rss_queues;
u32 mtu;
u32 flow_control;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index e2ffb159cbe2..c64e2fb5a4f1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -12,10 +12,89 @@
#include "aq_ring.h"
#include "aq_nic.h"
#include "aq_hw.h"
+#include "aq_hw_utils.h"
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+static inline void aq_free_rxpage(struct aq_rxpage *rxpage, struct device *dev)
+{
+ unsigned int len = PAGE_SIZE << rxpage->order;
+
+ dma_unmap_page(dev, rxpage->daddr, len, DMA_FROM_DEVICE);
+
+ /* Drop the ref for being in the ring. */
+ __free_pages(rxpage->page, rxpage->order);
+ rxpage->page = NULL;
+}
+
+static int aq_get_rxpage(struct aq_rxpage *rxpage, unsigned int order,
+ struct device *dev)
+{
+ struct page *page;
+ dma_addr_t daddr;
+ int ret = -ENOMEM;
+
+ page = dev_alloc_pages(order);
+ if (unlikely(!page))
+ goto err_exit;
+
+ daddr = dma_map_page(dev, page, 0, PAGE_SIZE << order,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(dev, daddr)))
+ goto free_page;
+
+ rxpage->page = page;
+ rxpage->daddr = daddr;
+ rxpage->order = order;
+ rxpage->pg_off = 0;
+
+ return 0;
+
+free_page:
+ __free_pages(page, order);
+
+err_exit:
+ return ret;
+}
+
+static int aq_get_rxpages(struct aq_ring_s *self, struct aq_ring_buff_s *rxbuf,
+ int order)
+{
+ int ret;
+
+ if (rxbuf->rxdata.page) {
+ /* One means ring is the only user and can reuse */
+ if (page_ref_count(rxbuf->rxdata.page) > 1) {
+ /* Try reuse buffer */
+ rxbuf->rxdata.pg_off += AQ_CFG_RX_FRAME_MAX;
+ if (rxbuf->rxdata.pg_off + AQ_CFG_RX_FRAME_MAX <=
+ (PAGE_SIZE << order)) {
+ self->stats.rx.pg_flips++;
+ } else {
+ /* Buffer exhausted. We have other users and
+ * should release this page and realloc
+ */
+ aq_free_rxpage(&rxbuf->rxdata,
+ aq_nic_get_dev(self->aq_nic));
+ self->stats.rx.pg_losts++;
+ }
+ } else {
+ rxbuf->rxdata.pg_off = 0;
+ self->stats.rx.pg_reuses++;
+ }
+ }
+
+ if (!rxbuf->rxdata.page) {
+ ret = aq_get_rxpage(&rxbuf->rxdata, order,
+ aq_nic_get_dev(self->aq_nic));
+ return ret;
+ }
+
+ return 0;
+}
+
static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
struct aq_nic_s *aq_nic)
{
@@ -81,6 +160,11 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
self->idx = idx;
self->size = aq_nic_cfg->rxds;
self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+ self->page_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
+ (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+
+ if (aq_nic_cfg->rxpageorder > self->page_order)
+ self->page_order = aq_nic_cfg->rxpageorder;
self = aq_ring_alloc(self, aq_nic);
if (!self) {
@@ -201,22 +285,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
int budget)
{
struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
- int err = 0;
bool is_rsc_completed = true;
+ int err = 0;
for (; (self->sw_head != self->hw_head) && budget;
self->sw_head = aq_ring_next_dx(self, self->sw_head),
--budget, ++(*work_done)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct aq_ring_buff_s *buff_ = NULL;
struct sk_buff *skb = NULL;
unsigned int next_ = 0U;
unsigned int i = 0U;
- struct aq_ring_buff_s *buff_ = NULL;
+ u16 hdr_len;
- if (buff->is_error) {
- __free_pages(buff->page, 0);
+ if (buff->is_error)
continue;
- }
if (buff->is_cleaned)
continue;
@@ -246,45 +329,66 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
}
}
+ dma_sync_single_range_for_cpu(aq_nic_get_dev(self->aq_nic),
+ buff->rxdata.daddr,
+ buff->rxdata.pg_off,
+ buff->len, DMA_FROM_DEVICE);
+
/* for single fragment packets use build_skb() */
if (buff->is_eop &&
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
- skb = build_skb(page_address(buff->page),
+ skb = build_skb(aq_buf_vaddr(&buff->rxdata),
AQ_CFG_RX_FRAME_MAX);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
-
skb_put(skb, buff->len);
+ page_ref_inc(buff->rxdata.page);
} else {
- skb = netdev_alloc_skb(ndev, ETH_HLEN);
+ skb = napi_alloc_skb(napi, AQ_CFG_RX_HDR_SIZE);
if (unlikely(!skb)) {
err = -ENOMEM;
goto err_exit;
}
- skb_put(skb, ETH_HLEN);
- memcpy(skb->data, page_address(buff->page), ETH_HLEN);
- skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
- buff->len - ETH_HLEN,
- SKB_TRUESIZE(buff->len - ETH_HLEN));
+ hdr_len = buff->len;
+ if (hdr_len > AQ_CFG_RX_HDR_SIZE)
+ hdr_len = eth_get_headlen(aq_buf_vaddr(&buff->rxdata),
+ AQ_CFG_RX_HDR_SIZE);
+
+ memcpy(__skb_put(skb, hdr_len), aq_buf_vaddr(&buff->rxdata),
+ ALIGN(hdr_len, sizeof(long)));
+
+ if (buff->len - hdr_len > 0) {
+ skb_add_rx_frag(skb, 0, buff->rxdata.page,
+ buff->rxdata.pg_off + hdr_len,
+ buff->len - hdr_len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff->rxdata.page);
+ }
if (!buff->is_eop) {
- for (i = 1U, next_ = buff->next,
- buff_ = &self->buff_ring[next_];
- true; next_ = buff_->next,
- buff_ = &self->buff_ring[next_], ++i) {
- skb_add_rx_frag(skb, i,
- buff_->page, 0,
+ buff_ = buff;
+ i = 1U;
+ do {
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_];
+
+ dma_sync_single_range_for_cpu(
+ aq_nic_get_dev(self->aq_nic),
+ buff_->rxdata.daddr,
+ buff_->rxdata.pg_off,
buff_->len,
- SKB_TRUESIZE(buff->len -
- ETH_HLEN));
+ DMA_FROM_DEVICE);
+ skb_add_rx_frag(skb, i++,
+ buff_->rxdata.page,
+ buff_->rxdata.pg_off,
+ buff_->len,
+ AQ_CFG_RX_FRAME_MAX);
+ page_ref_inc(buff_->rxdata.page);
buff_->is_cleaned = 1;
-
- if (buff_->is_eop)
- break;
- }
+ } while (!buff_->is_eop);
}
}
@@ -310,12 +414,15 @@ err_exit:
int aq_ring_rx_fill(struct aq_ring_s *self)
{
- unsigned int pages_order = fls(AQ_CFG_RX_FRAME_MAX / PAGE_SIZE +
- (AQ_CFG_RX_FRAME_MAX % PAGE_SIZE ? 1 : 0)) - 1;
+ unsigned int page_order = self->page_order;
struct aq_ring_buff_s *buff = NULL;
int err = 0;
int i = 0;
+ if (aq_ring_avail_dx(self) < min_t(unsigned int, AQ_CFG_RX_REFILL_THRES,
+ self->size / 2))
+ return err;
+
for (i = aq_ring_avail_dx(self); i--;
self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
buff = &self->buff_ring[self->sw_tail];
@@ -323,30 +430,15 @@ int aq_ring_rx_fill(struct aq_ring_s *self)
buff->flags = 0U;
buff->len = AQ_CFG_RX_FRAME_MAX;
- buff->page = alloc_pages(GFP_ATOMIC | __GFP_COMP, pages_order);
- if (!buff->page) {
- err = -ENOMEM;
+ err = aq_get_rxpages(self, buff, page_order);
+ if (err)
goto err_exit;
- }
-
- buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
- buff->page, 0,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
- err = -ENOMEM;
- goto err_exit;
- }
+ buff->pa = aq_buf_daddr(&buff->rxdata);
buff = NULL;
}
err_exit:
- if (err < 0) {
- if (buff && buff->page)
- __free_pages(buff->page, 0);
- }
-
return err;
}
@@ -359,10 +451,7 @@ void aq_ring_rx_deinit(struct aq_ring_s *self)
self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
- dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
- AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
-
- __free_pages(buff->page, 0);
+ aq_free_rxpage(&buff->rxdata, aq_nic_get_dev(self->aq_nic));
}
err_exit:;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index ac1329f4051d..cfffc301e746 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -17,6 +17,13 @@
struct page;
struct aq_nic_cfg_s;
+struct aq_rxpage {
+ struct page *page;
+ dma_addr_t daddr;
+ unsigned int order;
+ unsigned int pg_off;
+};
+
/* TxC SOP DX EOP
* +----------+----------+----------+-----------
* 8bytes|len l3,l4 | pa | pa | pa
@@ -31,28 +38,21 @@ struct aq_nic_cfg_s;
*/
struct __packed aq_ring_buff_s {
union {
+ /* RX/TX */
+ dma_addr_t pa;
/* RX */
struct {
u32 rss_hash;
u16 next;
u8 is_hash_l4;
u8 rsvd1;
- struct page *page;
+ struct aq_rxpage rxdata;
};
/* EOP */
struct {
dma_addr_t pa_eop;
struct sk_buff *skb;
};
- /* DX */
- struct {
- dma_addr_t pa;
- };
- /* SOP */
- struct {
- dma_addr_t pa_sop;
- u32 len_pkt_sop;
- };
/* TxC */
struct {
u32 mss;
@@ -91,6 +91,9 @@ struct aq_ring_stats_rx_s {
u64 bytes;
u64 lro_packets;
u64 jumbo_packets;
+ u64 pg_losts;
+ u64 pg_flips;
+ u64 pg_reuses;
};
struct aq_ring_stats_tx_s {
@@ -116,6 +119,7 @@ struct aq_ring_s {
unsigned int size; /* descriptors number */
unsigned int dx_size; /* TX or RX descriptor size, */
/* stored here for fater math */
+ unsigned int page_order;
union aq_ring_stats_s stats;
dma_addr_t dx_ring_pa;
};
@@ -126,6 +130,16 @@ struct aq_ring_param_s {
cpumask_t affinity_mask;
};
+static inline void *aq_buf_vaddr(struct aq_rxpage *rxpage)
+{
+ return page_to_virt(rxpage->page) + rxpage->pg_off;
+}
+
+static inline dma_addr_t aq_buf_daddr(struct aq_rxpage *rxpage)
+{
+ return rxpage->daddr + rxpage->pg_off;
+}
+
static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
unsigned int dx)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index d335c334fa56..a2e4ca1782ae 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -353,6 +353,9 @@ void aq_vec_add_stats(struct aq_vec_s *self,
stats_rx->errors += rx->errors;
stats_rx->jumbo_packets += rx->jumbo_packets;
stats_rx->lro_packets += rx->lro_packets;
+ stats_rx->pg_losts += rx->pg_losts;
+ stats_rx->pg_flips += rx->pg_flips;
+ stats_rx->pg_reuses += rx->pg_reuses;
stats_tx->packets += tx->packets;
stats_tx->bytes += tx->bytes;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index f6f8338153a2..65ffaa7ad69e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -619,8 +619,6 @@ err_exit:
static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -687,8 +685,6 @@ static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
is_err &= ~0x18U;
is_err &= ~0x04U;
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if (is_err || rxd_wb->type & 0x1000U) {
/* status error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index b31dba1b1a55..7e95804e2180 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -259,7 +259,13 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_time_base_divider_set(self, 0x61AU);
hw_atl_rpo_lro_inactive_interval_set(self, 0);
- hw_atl_rpo_lro_max_coalescing_interval_set(self, 2);
+ /* the LRO timebase divider is 5 uS (0x61a),
+ * which is multiplied by 50(0x32)
+ * to get a maximum coalescing interval of 250 uS,
+ * which is the default value
+ */
+ hw_atl_rpo_lro_max_coalescing_interval_set(self, 50);
+
hw_atl_rpo_lro_qsessions_lim_set(self, 1U);
@@ -273,6 +279,10 @@ static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
hw_atl_rpo_lro_en_set(self,
aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ hw_atl_itr_rsc_en_set(self,
+ aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+
+ hw_atl_itr_rsc_delay_set(self, 1U);
}
return aq_hw_err_from_flags(self);
}
@@ -654,8 +664,6 @@ err_exit:
static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
struct aq_ring_s *ring)
{
- struct device *ndev = aq_nic_get_dev(ring->aq_nic);
-
for (; ring->hw_head != ring->sw_tail;
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
struct aq_ring_buff_s *buff = NULL;
@@ -697,8 +705,6 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
buff->is_cso_err = 0U;
}
- dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
-
if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
/* MAC error or DMA error */
buff->is_error = 1U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index b318eefd36ae..ea98a08d7820 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -78,7 +78,7 @@
#define HW_ATL_B0_TC_MAX 1U
#define HW_ATL_B0_RSS_MAX 8U
-#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_LRO_RXD_MAX 16U
#define HW_ATL_B0_RS_SLIP_ENABLED 0U
/* (256k -1(max pay_len) - 54(header)) */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 0722b8e01964..9442deff98a8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -315,6 +315,21 @@ void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
HW_ATL_ITR_RES_SHIFT, res_irq);
}
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable)
+{
+ aq_hw_write_reg(aq_hw, HW_ATL_ITR_RSC_EN_ADR, enable);
+}
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay)
+{
+ aq_hw_write_reg_bit(aq_hw, HW_ATL_ITR_RSC_DELAY_ADR,
+ HW_ATL_ITR_RSC_DELAY_MSK,
+ HW_ATL_ITR_RSC_DELAY_SHIFT,
+ delay);
+}
+
/* rdm */
void hw_atl_rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
index d46351890b16..4cfa4bd80ad3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -152,6 +152,12 @@ u32 hw_atl_itr_res_irq_get(struct aq_hw_s *aq_hw);
/* set reset interrupt */
void hw_atl_itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+/* set RSC interrupt */
+void hw_atl_itr_rsc_en_set(struct aq_hw_s *aq_hw, u32 enable);
+
+/* set RSC delay */
+void hw_atl_itr_rsc_delay_set(struct aq_hw_s *aq_hw, u32 delay);
+
/* rdm */
/* set cpu id */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index fb45bc2d99cf..430bbd45b2f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -95,6 +95,19 @@
#define HW_ATL_ITR_RES_MSK 0x80000000
/* lower bit position of bitfield itr_reset */
#define HW_ATL_ITR_RES_SHIFT 31
+
+/* register address for bitfield rsc_en */
+#define HW_ATL_ITR_RSC_EN_ADR 0x00002200
+
+/* register address for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_ADR 0x00002204
+/* bitmask for bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_MSK 0x0000000f
+/* width of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_WIDTH 4
+/* lower bit position of bitfield rsc_delay */
+#define HW_ATL_ITR_RSC_DELAY_SHIFT 0
+
/* register address for bitfield dca{d}_cpuid[7:0] */
#define HW_ATL_RDM_DCADCPUID_ADR(dca) (0x00006100 + (dca) * 0x4)
/* bitmask for bitfield dca{d}_cpuid[7:0] */
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 6f56276015a4..f62deeb6e941 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -404,6 +404,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int dma_len;
unsigned int align;
unsigned int next;
+ bool xmit_more;
if (atomic_read(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
@@ -423,9 +424,10 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+ xmit_more = netdev_xmit_more();
if (atomic_dec_return(&priv->tx_free) <= NB8800_DESC_LOW) {
netif_stop_queue(dev);
- skb->xmit_more = 0;
+ xmit_more = false;
}
next = priv->tx_next;
@@ -450,7 +452,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
desc->n_addr = priv->tx_bufs[next].dma_desc;
desc->config = DESC_BTS(2) | DESC_DS | DESC_EOF | dma_len;
- if (!skb->xmit_more)
+ if (!xmit_more)
desc->config |= DESC_EOC;
txb->skb = skb;
@@ -468,7 +470,7 @@ static int nb8800_xmit(struct sk_buff *skb, struct net_device *dev)
priv->tx_next = next;
- if (!skb->xmit_more) {
+ if (!xmit_more) {
smp_wmb();
priv->tx_chain->ready = true;
priv->tx_chain = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 716bfbba59cf..461b2c0b2ed6 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -196,6 +196,7 @@ config BNXT
depends on PCI
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
Ethernet cards. To compile this driver as a module, choose M here:
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index bc3ac369cbe3..dfe46dacf5cf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2274,8 +2274,7 @@ static const struct ethtool_ops bcm_sysport_ethtool_ops = {
};
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
u16 queue = skb_get_queue_mapping(skb);
@@ -2283,7 +2282,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
unsigned int q, port;
if (!netdev_uses_dsa(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
/* DSA tagging layer will have configured the correct queue */
q = BRCM_TAG_GET_QUEUE(queue);
@@ -2291,7 +2290,7 @@ static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
if (unlikely(!tx_ring))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
return tx_ring->index;
}
@@ -2599,11 +2598,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT%s" REV_FMT
- " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ "Broadcom SYSTEMPORT%s " REV_FMT
+ " (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
- priv->base, priv->irq0, priv->irq1, txq, rxq);
+ priv->irq0, priv->irq1, txq, rxq);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ecb1bd7eb508..6012fe61735e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1909,8 +1909,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1932,7 +1931,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb, NULL) %
+ return netdev_pick_tx(dev, skb, NULL) %
(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 2462e7aa0c5d..7f8df08a7a4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -498,8 +498,7 @@ int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index d9057c8bbeef..78326a6c0aba 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -3024,7 +3024,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 13
-#define BCM_5710_FW_REVISION_VERSION 1
+#define BCM_5710_FW_REVISION_VERSION 11
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3639,8 +3639,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE (0x1<<3)
+#define CLIENT_INIT_RX_DATA_TPA_OVER_VLAN_DISABLE_SHIFT 3
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0xF<<4)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 4
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3831,7 +3833,7 @@ struct eth_classify_cmd_header {
*/
struct eth_classify_header {
u8 rule_cnt;
- u8 reserved0;
+ u8 warning_on_error;
__le16 reserved1;
__le32 echo;
};
@@ -4752,6 +4754,8 @@ struct tpa_update_ramrod_data {
__le32 sge_page_base_hi;
__le16 sge_pause_thr_low;
__le16 sge_pause_thr_high;
+ u8 tpa_over_vlan_disable;
+ u8 reserved[7];
};
@@ -4946,7 +4950,7 @@ struct fairness_vars_per_port {
u32 upper_bound;
u32 fair_threshold;
u32 fairness_timeout;
- u32 reserved0;
+ u32 size_thr;
};
/*
@@ -5415,7 +5419,9 @@ struct function_start_data {
u8 sd_vlan_force_pri_val;
u8 c2s_pri_tt_valid;
u8 c2s_pri_default;
- u8 reserved2[6];
+ u8 tx_vlan_filtering_enable;
+ u8 tx_vlan_filtering_use_pvid;
+ u8 reserved2[4];
struct c2s_pri_trans_table_entry c2s_pri_trans_table;
};
@@ -5448,7 +5454,8 @@ struct function_update_data {
u8 reserved1;
__le16 sd_vlan_tag;
__le16 sd_vlan_eth_type;
- __le16 reserved0;
+ u8 tx_vlan_filtering_pvid_change_flg;
+ u8 reserved0;
__le32 reserved2;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0bb9d7b3a2b6..4feac114b779 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -551,7 +551,7 @@ normal_tx:
prod = NEXT_TX(prod);
txr->tx_prod = prod;
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
bnxt_db_write(bp, &txr->tx_db, prod);
tx_done:
@@ -559,7 +559,7 @@ tx_done:
mmiowb();
if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
- if (skb->xmit_more && !tx_buf->is_push)
+ if (netdev_xmit_more() && !tx_buf->is_push)
bnxt_db_write(bp, &txr->tx_db, prod);
netif_tx_stop_queue(txq);
@@ -10048,23 +10048,6 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
return rc;
}
-static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
- size_t len)
-{
- struct bnxt *bp = netdev_priv(dev);
- int rc;
-
- /* The PF and it's VF-reps only support the switchdev framework */
- if (!BNXT_PF(bp))
- return -EOPNOTSUPP;
-
- rc = snprintf(buf, len, "p%d", bp->pf.port_id);
-
- if (rc >= len)
- return -EOPNOTSUPP;
- return 0;
-}
-
int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
@@ -10083,6 +10066,13 @@ int bnxt_get_port_parent_id(struct net_device *dev,
return 0;
}
+static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ return &bp->dl_port;
+}
+
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
@@ -10114,8 +10104,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_bpf = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
- .ndo_get_port_parent_id = bnxt_get_port_parent_id,
- .ndo_get_phys_port_name = bnxt_get_phys_port_name
+ .ndo_get_devlink_port = bnxt_get_devlink_port,
};
static void bnxt_remove_one(struct pci_dev *pdev)
@@ -10439,6 +10428,26 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
return rc;
}
+static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
+{
+ struct pci_dev *pdev = bp->pdev;
+ int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ u32 dw;
+
+ if (!pos) {
+ netdev_info(bp->dev, "Unable do read adapter's DSN");
+ return -EOPNOTSUPP;
+ }
+
+ /* DSN (two dw) is at an offset of 4 from the cap pos */
+ pos += 4;
+ pci_read_config_dword(pdev, pos, &dw);
+ put_unaligned_le32(dw, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 4, &dw);
+ put_unaligned_le32(dw, &dsn[4]);
+ return 0;
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -10579,6 +10588,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err_pci_clean;
}
+ /* Read the adapter's DSN to use as the eswitch switch_id */
+ rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_vnic_qcaps(bp);
bnxt_hwrm_port_led_qcaps(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index e1feb97bcd81..549c90d3e465 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <net/devlink.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
@@ -228,6 +229,9 @@ int bnxt_dl_register(struct bnxt *bp)
goto err_dl_unreg;
}
+ devlink_port_attrs_set(&bp->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ bp->pf.port_id, false, 0,
+ bp->switch_id, sizeof(bp->switch_id));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 2bdd2da9aac7..f760921389a3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -406,26 +406,6 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->min_mtu = ETH_ZLEN;
}
-static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
-{
- struct pci_dev *pdev = bp->pdev;
- int pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
- u32 dw;
-
- if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN");
- return -EOPNOTSUPP;
- }
-
- /* DSN (two dw) is at an offset of 4 from the cap pos */
- pos += 4;
- pci_read_config_dword(pdev, pos, &dw);
- put_unaligned_le32(dw, &dsn[0]);
- pci_read_config_dword(pdev, pos + 4, &dw);
- put_unaligned_le32(dw, &dsn[4]);
- return 0;
-}
-
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
@@ -490,11 +470,6 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
}
}
- /* Read the adapter's DSN to use as the eswitch switch_id */
- rc = bnxt_pcie_dsn_get(bp, bp->switch_id);
- if (rc)
- goto err;
-
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 983245c0867c..4fd973571e4c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1665,7 +1665,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
/* Packets are ready, update producer index */
bcmgenet_tdma_ring_writel(priv, ring->index,
ring->prod_index, TDMA_PROD_INDEX);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 328373e0578f..d029468738ed 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8156,7 +8156,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_wake_queue(txq);
}
- if (!skb->xmit_more || netif_xmit_stopped(txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
/* Packets are ready, update Tx producer idx on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
mmiowb();
@@ -12763,9 +12763,6 @@ static int tg3_set_phys_id(struct net_device *dev,
{
struct tg3 *tp = netdev_priv(dev);
- if (!netif_running(tp->dev))
- return -EAGAIN;
-
switch (state) {
case ETHTOOL_ID_ACTIVE:
return 1; /* cycle on/off once per second */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1522aee81884..a44171fddf47 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4360,8 +4360,7 @@ static int __maybe_unused macb_resume(struct device *dev)
static int __maybe_unused macb_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
@@ -4377,8 +4376,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
static int __maybe_unused macb_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *netdev = platform_get_drvdata(pdev);
+ struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
if (!(device_may_wakeup(&bp->dev->dev))) {
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 6650e2a5f171..7612ab6b286d 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -68,6 +68,7 @@ config LIQUIDIO
imply PTP_1588_CLOCK
select FW_LOADER
select LIBCRC32C
+ select NET_DEVLINK
---help---
This driver supports Cavium LiquidIO Intelligent Server Adapters
based on CN66XX, CN68XX and CN23XX chips.
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index fb6f813cff65..eab805579f96 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2522,7 +2522,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 54b245797d2e..db0b90555acb 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1585,7 +1585,7 @@ static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
}
- xmit_more = skb->xmit_more;
+ xmit_more = netdev_xmit_more();
if (unlikely(cmdsetup.s.timestamp))
status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 0e9182d3f02c..b3e4118a15e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -443,9 +443,9 @@ found:
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
- int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
+ int i;
- d = kvzalloc(size, GFP_KERNEL);
+ d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
if (!d)
return NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c2fd323c4078..ea75f275023f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -75,8 +75,8 @@ struct l2t_data {
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
- struct l2t_entry l2tab[0];
struct rcu_head rcu_head; /* to handle rcu cleanup */
+ struct l2t_entry l2tab[];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 956219c178e1..a8fe0808823d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1575,9 +1575,11 @@ int t4_slow_intr_handler(struct adapter *adapter);
int t4_wait_dev_ready(void __iomem *regs);
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc);
int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout);
+ u8 sleep_ok, int timeout);
static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
unsigned int port, struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index bec4711005cc..9e589302af90 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -442,7 +442,7 @@ static unsigned int speed_to_fw_caps(int speed)
* Link Mode Mask.
*/
static void fw_caps_to_lmm(enum fw_port_type port_type,
- unsigned int fw_caps,
+ fw_port_cap32_t fw_caps,
unsigned long *link_mode_mask)
{
#define SET_LMM(__lmm_name) \
@@ -632,7 +632,10 @@ static int get_link_ksettings(struct net_device *dev,
fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
link_ksettings->link_modes.supported);
- fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
+ fw_caps_to_lmm(pi->port_type,
+ t4_link_acaps(pi->adapter,
+ pi->lport,
+ &pi->link_cfg),
link_ksettings->link_modes.advertising);
fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
link_ksettings->link_modes.lp_advertising);
@@ -642,22 +645,6 @@ static int get_link_ksettings(struct net_device *dev,
: SPEED_UNKNOWN);
base->duplex = DUPLEX_FULL;
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 89179e316687..3339f1f4bcdd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -979,8 +979,7 @@ freeout:
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
int txq;
@@ -1022,7 +1021,7 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq;
}
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
}
static int closest_timer(const struct sge *s, int time)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a3544041ad32..f9b70be59792 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3964,6 +3964,14 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
}
+/* The ADVERT_MASK is used to mask out all of the Advertised Firmware Port
+ * Capabilities which we control with separate controls -- see, for instance,
+ * Pause Frames and Forward Error Correction. In order to determine what the
+ * full set of Advertised Port Capabilities are, the base Advertised Port
+ * Capabilities (masked by ADVERT_MASK) must be combined with the Advertised
+ * Port Capabilities associated with those other controls. See
+ * t4_link_acaps() for how this is done.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
FW_PORT_CAP32_ANEG)
@@ -4061,6 +4069,9 @@ static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
/* Translate Common Code Pause specification into Firmware Port Capabilities */
static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
{
+ /* Translate orthogonal RX/TX Pause Controls for L1 Configure
+ * commands, etc.
+ */
fw_port_cap32_t fw_pause = 0;
if (cc_pause & PAUSE_RX)
@@ -4070,6 +4081,19 @@ static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
if (!(cc_pause & PAUSE_AUTONEG))
fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
+ /* Translate orthogonal Pause controls into IEEE 802.3 Pause,
+ * Asymetrical Pause for use in reporting to upper layer OS code, etc.
+ * Note that these bits are ignored in L1 Configure commands.
+ */
+ if (cc_pause & PAUSE_RX) {
+ if (cc_pause & PAUSE_TX)
+ fw_pause |= FW_PORT_CAP32_802_3_PAUSE;
+ else
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ } else if (cc_pause & PAUSE_TX) {
+ fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR;
+ }
+
return fw_pause;
}
@@ -4100,31 +4124,22 @@ static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
}
/**
- * t4_link_l1cfg - apply link configuration to MAC/PHY
+ * t4_link_acaps - compute Link Advertised Port Capabilities
* @adapter: the adapter
- * @mbox: the Firmware Mailbox to use
* @port: the Port ID
* @lc: the Port's Link Configuration
- * @sleep_ok: if true we may sleep while awaiting command completion
- * @timeout: time to wait for command to finish before timing out
- * (negative implies @sleep_ok=false)
*
- * Set up a port's MAC and PHY according to a desired link configuration.
- * - If the PHY can auto-negotiate first decide what to advertise, then
- * enable/disable auto-negotiation as desired, and reset.
- * - If the PHY does not auto-negotiate just reset it.
- * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
- * otherwise do it later based on the outcome of auto-negotiation.
+ * Synthesize the Advertised Port Capabilities we'll be using based on
+ * the base Advertised Port Capabilities (which have been filtered by
+ * ADVERT_MASK) plus the individual controls for things like Pause
+ * Frames, Forward Error Correction, MDI, etc.
*/
-int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
- unsigned int port, struct link_config *lc,
- bool sleep_ok, int timeout)
+fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
+ struct link_config *lc)
{
- unsigned int fw_caps = adapter->params.fw_caps_support;
- fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
- struct fw_port_cmd cmd;
+ fw_port_cap32_t fw_fc, fw_fec, acaps;
unsigned int fw_mdi;
- int ret;
+ char cc_fec;
fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
@@ -4151,18 +4166,15 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* init_link_config().
*/
if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
- if (lc->autoneg == AUTONEG_ENABLE)
- return -EINVAL;
-
- rcap = lc->acaps | fw_fc | fw_fec;
+ acaps = lc->acaps | fw_fc | fw_fec;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else if (lc->autoneg == AUTONEG_DISABLE) {
- rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
lc->fec = cc_fec;
} else {
- rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
+ acaps = lc->acaps | fw_fc | fw_fec | fw_mdi;
}
/* Some Requested Port Capabilities are trivially wrong if they exceed
@@ -4173,15 +4185,50 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
* we need to exclude this from this check in order to maintain
* compatibility ...
*/
- if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
- dev_err(adapter->pdev_dev,
- "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
- rcap, lc->pcaps);
+ if ((acaps & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
+ dev_err(adapter->pdev_dev, "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
+ acaps, lc->pcaps);
+ return -EINVAL;
+ }
+
+ return acaps;
+}
+
+/**
+ * t4_link_l1cfg_core - apply link configuration to MAC/PHY
+ * @adapter: the adapter
+ * @mbox: the Firmware Mailbox to use
+ * @port: the Port ID
+ * @lc: the Port's Link Configuration
+ * @sleep_ok: if true we may sleep while awaiting command completion
+ * @timeout: time to wait for command to finish before timing out
+ * (negative implies @sleep_ok=false)
+ *
+ * Set up a port's MAC and PHY according to a desired link configuration.
+ * - If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired, and reset.
+ * - If the PHY does not auto-negotiate just reset it.
+ * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
+ unsigned int port, struct link_config *lc,
+ u8 sleep_ok, int timeout)
+{
+ unsigned int fw_caps = adapter->params.fw_caps_support;
+ struct fw_port_cmd cmd;
+ fw_port_cap32_t rcap;
+ int ret;
+
+ if (!(lc->pcaps & FW_PORT_CAP32_ANEG) &&
+ lc->autoneg == AUTONEG_ENABLE) {
return -EINVAL;
}
- /* And send that on to the Firmware ...
+ /* Compute our Requested Port Capabilities and send that on to the
+ * Firmware.
*/
+ rcap = t4_link_acaps(adapter, port, lc);
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
@@ -4211,7 +4258,7 @@ int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
rcap, -ret);
return ret;
}
- return ret;
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 9125ddd89dd1..a02b1dff403e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x16
-#define T4FW_VERSION_MICRO 0x09
+#define T4FW_VERSION_MINOR 0x17
+#define T4FW_VERSION_MICRO 0x03
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x16
-#define T5FW_VERSION_MICRO 0x09
+#define T5FW_VERSION_MINOR 0x17
+#define T5FW_VERSION_MICRO 0x03
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x16
-#define T6FW_VERSION_MICRO 0x09
+#define T6FW_VERSION_MINOR 0x17
+#define T6FW_VERSION_MICRO 0x03
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index adc4d481815b..a8c4e0c851e7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1479,22 +1479,6 @@ static int cxgb4vf_get_link_ksettings(struct net_device *dev,
base->duplex = DUPLEX_UNKNOWN;
}
- if (pi->link_cfg.fc & PAUSE_RX) {
- if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Pause);
- } else {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
- } else if (pi->link_cfg.fc & PAUSE_TX) {
- ethtool_link_ksettings_add_link_mode(link_ksettings,
- advertising,
- Asym_Pause);
- }
-
base->autoneg = pi->link_cfg.autoneg;
if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
ethtool_link_ksettings_add_link_mode(link_ksettings,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 84dff74ca9cd..8a389d617a23 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -313,7 +313,17 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
return ret;
}
+/* In the Physical Function Driver Common Code, the ADVERT_MASK is used to
+ * mask out bits in the Advertised Port Capabilities which are managed via
+ * separate controls, like Pause Frames and Forward Error Correction. In the
+ * Virtual Function Common Code, since we never perform L1 Configuration on
+ * the Link, the only things we really need to filter out are things which
+ * we decode and report separately like Speed.
+ */
#define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
+ FW_PORT_CAP32_802_3_PAUSE | \
+ FW_PORT_CAP32_802_3_ASM_DIR | \
+ FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M) | \
FW_PORT_CAP32_ANEG)
/**
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 733d9172425b..acb2856936d2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -897,7 +897,7 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
netif_tx_stop_queue(txq);
skb_tx_timestamp(skb);
- if (!skb->xmit_more || netif_xmit_stopped(txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(txq))
vnic_wq_doorbell(wq);
spin_unlock(&enic->wq_lock[txq_map]);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 3c7c04406a2b..e2f9fbced174 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1376,7 +1376,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
u16 q_idx = skb_get_queue_mapping(skb);
struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
struct be_wrb_params wrb_params = { 0 };
- bool flush = !skb->xmit_more;
+ bool flush = !netdev_xmit_more();
u16 wrb_cnt;
skb = be_xmit_workarounds(adapter, skb, &wrb_params);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index dc339dc1adb2..2055c97dc22b 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -435,7 +435,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
- napi_gro_receive(&ch->napi, skb);
+ list_add_tail(&skb->list, ch->rx_list);
return;
@@ -1113,12 +1113,16 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
struct dpaa2_eth_fq *fq, *txc_fq = NULL;
struct netdev_queue *nq;
int store_cleaned, work_done;
+ struct list_head rx_list;
int err;
ch = container_of(napi, struct dpaa2_eth_channel, napi);
ch->xdp.res = 0;
priv = ch->priv;
+ INIT_LIST_HEAD(&rx_list);
+ ch->rx_list = &rx_list;
+
do {
err = pull_channel(ch);
if (unlikely(err))
@@ -1162,6 +1166,8 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
work_done = max(rx_cleaned, 1);
out:
+ netif_receive_skb_list(ch->rx_list);
+
if (txc_fq && txc_fq->dq_frames) {
nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
netdev_tx_completed_queue(nq, txc_fq->dq_frames,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7879622aa3e6..a11ebfdc4a23 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -334,6 +334,7 @@ struct dpaa2_eth_channel {
struct dpaa2_eth_ch_stats stats;
struct dpaa2_eth_ch_xdp xdp;
struct xdp_rxq_info xdp_rxq;
+ struct list_head *rx_list;
};
struct dpaa2_eth_dist_fields {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 4cd86ba1f050..297b95c1b3c1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1962,8 +1962,7 @@ static void hns_nic_get_stats64(struct net_device *ndev,
static u16
hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
struct hns_nic_priv *priv = netdev_priv(ndev);
@@ -1973,7 +1972,7 @@ hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
is_multicast_ether_addr(eth_hdr->h_dest))
return 0;
else
- return fallback(ndev, skb, NULL);
+ return netdev_pick_tx(ndev, skb, NULL);
}
static const struct net_device_ops hns_nic_netdev_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 299b277bc7ae..9d3d45f02be1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -43,6 +43,7 @@ enum HCLGE_MBX_OPCODE {
HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf reset status */
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 17ab4f4af6ad..fa8b8506b120 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -76,8 +76,8 @@ static int hnae3_get_client_init_flag(struct hnae3_client *client,
return inited;
}
-static int hnae3_match_n_instantiate(struct hnae3_client *client,
- struct hnae3_ae_dev *ae_dev, bool is_reg)
+static int hnae3_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
{
int ret;
@@ -87,23 +87,27 @@ static int hnae3_match_n_instantiate(struct hnae3_client *client,
return 0;
}
- /* now, (un-)instantiate client by calling lower layer */
- if (is_reg) {
- ret = ae_dev->ops->init_client_instance(client, ae_dev);
- if (ret)
- dev_err(&ae_dev->pdev->dev,
- "fail to instantiate client, ret = %d\n", ret);
+ ret = ae_dev->ops->init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail to instantiate client, ret = %d\n", ret);
- return ret;
- }
+ return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+ return;
if (hnae3_get_client_init_flag(client, ae_dev)) {
ae_dev->ops->uninit_client_instance(client, ae_dev);
hnae3_set_client_init_flag(client, ae_dev, 0);
}
-
- return 0;
}
int hnae3_register_client(struct hnae3_client *client)
@@ -129,7 +133,7 @@ int hnae3_register_client(struct hnae3_client *client)
/* if the client could not be initialized on current port, for
* any error reasons, move on to next available port
*/
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed for port, ret = %d\n",
@@ -153,7 +157,7 @@ void hnae3_unregister_client(struct hnae3_client *client)
mutex_lock(&hnae3_common_lock);
/* un-initialize the client on every matched port */
list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
}
list_del(&client->node);
@@ -205,7 +209,7 @@ void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -243,7 +247,7 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
* un-initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -301,7 +305,7 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
* initialize the figure out client instance
*/
list_for_each_entry(client, &hnae3_client_list, node) {
- ret = hnae3_match_n_instantiate(client, ae_dev, true);
+ ret = hnae3_init_client_instance(client, ae_dev);
if (ret)
dev_err(&ae_dev->pdev->dev,
"match and instantiation failed, ret = %d\n",
@@ -343,7 +347,7 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
continue;
list_for_each_entry(client, &hnae3_client_list, node)
- hnae3_match_n_instantiate(client, ae_dev, false);
+ hnae3_uninit_client_instance(client, ae_dev);
ae_algo->ops->uninit_ae_dev(ae_dev);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 162cb9afa0e7..0f389a43ff8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -827,12 +827,12 @@ static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
*/
static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
{
-#define IANA_VXLAN_PORT 4789
union l4_hdr_info l4;
l4.hdr = skb_transport_header(skb);
- if (!(!skb->encapsulation && l4.udp->dest == htons(IANA_VXLAN_PORT)))
+ if (!(!skb->encapsulation &&
+ l4.udp->dest == htons(IANA_VXLAN_UDP_PORT)))
return false;
skb_checksum_help(skb);
@@ -1012,7 +1012,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct hns3_desc *desc = &ring->desc[ring->next_to_use];
struct device *dev = ring_to_dev(ring);
- u16 bdtp_fe_sc_vld_ra_ri = 0;
struct skb_frag_struct *frag;
unsigned int frag_buf_num;
int k, sizeoflast;
@@ -1080,12 +1079,30 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
desc_cb->length = size;
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
+
+ ring_ptr_move_fw(ring, next_to_use);
+ return 0;
+ }
+
frag_buf_num = hns3_tx_bd_count(size);
sizeoflast = size & HNS3_TX_LAST_SIZE_M;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
+ u16 bdtp_fe_sc_vld_ra_ri = 0;
+
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
desc_cb->priv = priv;
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
@@ -1574,6 +1591,9 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
struct hnae3_handle *h = hns3_get_handle(netdev);
int ret;
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
if (!h->ae_algo->ops->set_mtu)
return -EOPNOTSUPP;
@@ -2891,7 +2911,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
struct hns3_enet_tqp_vector *tqp_vector =
container_of(napi, struct hns3_enet_tqp_vector, napi);
bool clean_complete = true;
- int rx_budget;
+ int rx_budget = budget;
if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
napi_complete(napi);
@@ -2905,7 +2925,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
hns3_clean_tx_ring(ring);
/* make sure rx ring budget not smaller than 1 */
- rx_budget = max(budget / tqp_vector->num_tqps, 1);
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
hns3_for_each_ring(ring, tqp_vector->rx_group) {
int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
@@ -3773,12 +3794,13 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
struct netdev_hw_addr *ha, *tmp;
int ret = 0;
+ netif_addr_lock_bh(ndev);
/* go through and sync uc_addr entries to the device */
list = &ndev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_uc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
/* go through and sync mc_addr entries to the device */
@@ -3786,9 +3808,11 @@ static int hns3_recover_hw_addr(struct net_device *ndev)
list_for_each_entry_safe(ha, tmp, &list->list, list) {
ret = hns3_nic_mc_sync(ndev, ha->addr);
if (ret)
- return ret;
+ goto out;
}
+out:
+ netif_addr_unlock_bh(ndev);
return ret;
}
@@ -3799,6 +3823,7 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
hns3_nic_uc_unsync(netdev, netdev->dev_addr);
+ netif_addr_lock_bh(netdev);
/* go through and unsync uc_addr entries to the device */
list = &netdev->uc;
list_for_each_entry_safe(ha, tmp, &list->list, list)
@@ -3809,6 +3834,8 @@ static void hns3_remove_hw_addr(struct net_device *netdev)
list_for_each_entry_safe(ha, tmp, &list->list, list)
if (ha->refcount > 1)
hns3_nic_mc_unsync(netdev, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
}
static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
@@ -4101,7 +4128,7 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct hns3_nic_priv *priv = netdev_priv(netdev);
int ret;
- if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
}
@@ -4123,8 +4150,6 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
hns3_put_ring_config(priv);
priv->ring_data = NULL;
- clear_bit(HNS3_NIC_STATE_INITED, &priv->state);
-
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 75669cd0c311..025d0f7f860d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -577,18 +577,13 @@ union l4_hdr_info {
unsigned char *hdr;
};
-/* the distance between [begin, end) in a ring buffer
- * note: there is a unuse slot between the begin and the end
- */
-static inline int ring_dist(struct hns3_enet_ring *ring, int begin, int end)
-{
- return (end - begin + ring->desc_num) % ring->desc_num;
-}
-
static inline int ring_space(struct hns3_enet_ring *ring)
{
- return ring->desc_num -
- ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+ int begin = ring->next_to_clean;
+ int end = ring->next_to_use;
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
}
static inline int is_ring_empty(struct hns3_enet_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 359d4731fb2d..59ef272297ab 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -483,6 +483,11 @@ static void hns3_get_stats(struct net_device *netdev,
struct hnae3_handle *h = hns3_get_handle(netdev);
u64 *p = data;
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting, could not get stats\n");
+ return;
+ }
+
if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
netdev_err(netdev, "could not get any statistics\n");
return;
@@ -648,6 +653,10 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
+ /* Chip doesn't support this mode. */
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev)
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 3a093a92eac5..722bb3124bb6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -411,7 +411,7 @@ static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
spin_unlock(&ring->lock);
}
-void hclge_destroy_cmd_queue(struct hclge_hw *hw)
+static void hclge_destroy_cmd_queue(struct hclge_hw *hw)
{
hclge_destroy_queue(&hw->cmq.csq);
hclge_destroy_queue(&hw->cmq.crq);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index deda606c51e7..f51e4c01b670 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
#include <net/rtnetlink.h>
#include "hclge_cmd.h"
#include "hclge_dcb.h"
@@ -1015,6 +1016,23 @@ static int hclge_get_cap(struct hclge_dev *hdev)
return ret;
}
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC 64
+#define HCLGE_MIN_RX_DESC 64
+
+ if (!is_kdump_kernel())
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "Running kdump kernel. Using minimal resources\n");
+
+ /* minimal queue pairs equals to the number of vports */
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
static int hclge_configure(struct hclge_dev *hdev)
{
struct hclge_cfg cfg;
@@ -1074,6 +1092,8 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+ hclge_init_kdump_kernel_config(hdev);
+
return ret;
}
@@ -5942,8 +5962,11 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
/* check if we just hit the duplicate */
- if (!ret)
- ret = -EINVAL;
+ if (!ret) {
+ dev_warn(&hdev->pdev->dev, "VF %d mac(%pM) exists\n",
+ vport->vport_id, addr);
+ return 0;
+ }
dev_err(&hdev->pdev->dev,
"PF failed to add unicast entry(%pM) in the MAC table\n",
@@ -6293,7 +6316,8 @@ static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
return -EINVAL;
}
- if (!is_first && hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
+ if ((!is_first || is_kdump_kernel()) &&
+ hclge_rm_uc_addr(handle, hdev->hw.mac.mac_addr))
dev_warn(&hdev->pdev->dev,
"remove old uc mac address fail.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 306a23e486de..9aa9c643afcd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -385,24 +385,32 @@ static int hclge_get_vf_queue_depth(struct hclge_vport *vport,
HCLGE_TQPS_DEPTH_INFO_LEN);
}
+static int hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ u8 resp_data;
+
+ resp_data = hdev->hw.mac.media_type;
+ return hclge_gen_resp_to_vf(vport, mbx_req, 0, &resp_data,
+ sizeof(resp_data));
+}
+
static int hclge_get_link_info(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req)
{
struct hclge_dev *hdev = vport->back;
u16 link_status;
- u8 msg_data[10];
- u16 media_type;
+ u8 msg_data[8];
u8 dest_vfid;
u16 duplex;
/* mac.link can only be 0 or 1 */
link_status = (u16)hdev->hw.mac.link;
duplex = hdev->hw.mac.duplex;
- media_type = hdev->hw.mac.media_type;
memcpy(&msg_data[0], &link_status, sizeof(u16));
memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
memcpy(&msg_data[6], &duplex, sizeof(u16));
- memcpy(&msg_data[8], &media_type, sizeof(u16));
dest_vfid = mbx_req->mbx_src_vfid;
/* send this requested info to VF */
@@ -662,6 +670,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
hclge_rm_vport_all_vlan_table(vport, true);
mutex_unlock(&hdev->vport_cfg_mutex);
break;
+ case HCLGE_MBX_GET_MEDIA_TYPE:
+ ret = hclge_get_vf_media_type(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to media type for VF\n",
+ ret);
+ break;
default:
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %d\n",
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index 9441b453d38d..930b175a7090 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -27,26 +27,39 @@ static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
return ring->desc_num - used - 1;
}
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+ int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
{
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
- u16 ntc = csq->next_to_clean;
- struct hclgevf_desc *desc;
int clean = 0;
u32 head;
- desc = &csq->desc[ntc];
head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
- while (head != ntc) {
- memset(desc, 0, sizeof(*desc));
- ntc++;
- if (ntc == csq->desc_num)
- ntc = 0;
- desc = &csq->desc[ntc];
- clean++;
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%d, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EIO;
}
- csq->next_to_clean = ntc;
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
return clean;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 8bc28e6f465f..509ff93f8cf5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -307,6 +307,25 @@ static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
return qid_in_pf;
}
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ u8 resp_msg;
+ int ret;
+
+ ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_MEDIA_TYPE, 0, NULL, 0,
+ true, &resp_msg, sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg;
+
+ return 0;
+}
+
static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
{
struct hclgevf_tqp *tqp;
@@ -404,7 +423,7 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
}
}
-void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
{
#define HCLGEVF_ADVERTISING 0
#define HCLGEVF_SUPPORTED 1
@@ -1824,6 +1843,10 @@ static int hclgevf_configure(struct hclgevf_dev *hdev)
if (ret)
return ret;
+ ret = hclgevf_get_pf_media_type(hdev);
+ if (ret)
+ return ret;
+
/* get tc configuration from PF */
return hclgevf_get_tc_info(hdev);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
index 7dc3c9f79169..3bcf49bde11a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -272,7 +272,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
link_status = le16_to_cpu(msg_q[1]);
memcpy(&speed, &msg_q[2], sizeof(speed));
duplex = (u8)le16_to_cpu(msg_q[4]);
- hdev->hw.mac.media_type = (u8)le16_to_cpu(msg_q[5]);
/* update upper layer with new link link status */
hclgevf_update_link_status(hdev, link_status);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index e17bf33eba0c..0fbe8046824b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -518,7 +518,7 @@ process_sq_wqe:
flush_skbs:
netdev_txq = netdev_get_tx_queue(netdev, q_id);
- if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq)))
+ if ((!netdev_xmit_more()) || (netif_xmit_stopped(netdev_txq)))
hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0);
return err;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 90b62c1412c8..707c8ba120c2 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1463,7 +1463,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
memset(pr, 0, sizeof(struct ehea_port_res));
- pr->tx_bytes = rx_bytes;
+ pr->tx_bytes = tx_bytes;
pr->tx_packets = tx_packets;
pr->rx_bytes = rx_bytes;
pr->rx_packets = rx_packets;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 5e4e37132bf2..77ce17383aba 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -123,8 +123,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
int nr_of_cqe, u64 eq_handle, u32 cq_token)
{
struct ehea_cq *cq;
- struct h_epa epa;
- u64 *cq_handle_ref, hret, rpage;
+ u64 hret, rpage;
u32 counter;
int ret;
void *vpage;
@@ -139,8 +138,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
cq->adapter = adapter;
- cq_handle_ref = &cq->fw_handle;
-
hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
&cq->fw_handle, &cq->epas);
if (hret != H_SUCCESS) {
@@ -188,7 +185,6 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
}
hw_qeit_reset(&cq->hw_queue);
- epa = cq->epas.kernel;
ehea_reset_cq_ep(cq);
ehea_reset_cq_n1(cq);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index dd71d5db7274..d86b0e5895a6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -93,7 +93,7 @@ struct ibmveth_stat {
#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
-struct ibmveth_stat ibmveth_stats[] = {
+static struct ibmveth_stat ibmveth_stats[] = {
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
{ "replenish_add_buff_failure",
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 51cfe95f3e24..1de691e76b86 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -120,6 +120,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1968,13 +1969,11 @@ static void __ibmvnic_reset(struct work_struct *work)
{
struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
- struct net_device *netdev;
bool we_lock_rtnl = false;
u32 reset_state;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
- netdev = adapter->netdev;
/* netif_set_real_num_xx_queues needs to take rtnl lock here
* unless wait_for_reset is set, in which case the rtnl lock
@@ -2279,23 +2278,20 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
static int ibmvnic_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
- u32 supported, advertising;
+ struct ibmvnic_adapter *adapter = netdev_priv(netdev);
+ int rc;
- supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
- SUPPORTED_FIBRE);
- advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
- ADVERTISED_FIBRE);
- cmd->base.speed = SPEED_1000;
- cmd->base.duplex = DUPLEX_FULL;
+ rc = send_query_phys_parms(adapter);
+ if (rc) {
+ adapter->speed = SPEED_UNKNOWN;
+ adapter->duplex = DUPLEX_UNKNOWN;
+ }
+ cmd->base.speed = adapter->speed;
+ cmd->base.duplex = adapter->duplex;
cmd->base.port = PORT_FIBRE;
cmd->base.phy_address = 0;
cmd->base.autoneg = AUTONEG_ENABLE;
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
- advertising);
-
return 0;
}
@@ -4279,6 +4275,73 @@ out:
}
}
+static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
+{
+ union ibmvnic_crq crq;
+ int rc;
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_phys_parms.first = IBMVNIC_CRQ_CMD;
+ crq.query_phys_parms.cmd = QUERY_PHYS_PARMS;
+ init_completion(&adapter->fw_done);
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc)
+ return rc;
+ wait_for_completion(&adapter->fw_done);
+ return adapter->fw_done_rc ? -EIO : 0;
+}
+
+static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
+ struct ibmvnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int rc;
+
+ rc = crq->query_phys_parms_rsp.rc.code;
+ if (rc) {
+ netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
+ return rc;
+ }
+ switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
+ case IBMVNIC_10MBPS:
+ adapter->speed = SPEED_10;
+ break;
+ case IBMVNIC_100MBPS:
+ adapter->speed = SPEED_100;
+ break;
+ case IBMVNIC_1GBPS:
+ adapter->speed = SPEED_1000;
+ break;
+ case IBMVNIC_10GBP:
+ adapter->speed = SPEED_10000;
+ break;
+ case IBMVNIC_25GBPS:
+ adapter->speed = SPEED_25000;
+ break;
+ case IBMVNIC_40GBPS:
+ adapter->speed = SPEED_40000;
+ break;
+ case IBMVNIC_50GBPS:
+ adapter->speed = SPEED_50000;
+ break;
+ case IBMVNIC_100GBPS:
+ adapter->speed = SPEED_100000;
+ break;
+ default:
+ netdev_warn(netdev, "Unknown speed 0x%08x\n",
+ cpu_to_be32(crq->query_phys_parms_rsp.speed));
+ adapter->speed = SPEED_UNKNOWN;
+ }
+ if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
+ adapter->duplex = DUPLEX_FULL;
+ else if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_HALF_DUPLEX)
+ adapter->duplex = DUPLEX_HALF;
+ else
+ adapter->duplex = DUPLEX_UNKNOWN;
+
+ return rc;
+}
+
static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4427,6 +4490,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
case GET_VPD_RSP:
handle_vpd_rsp(crq, adapter);
break;
+ case QUERY_PHYS_PARMS_RSP:
+ adapter->fw_done_rc = handle_query_phys_parms_rsp(crq, adapter);
+ complete(&adapter->fw_done);
+ break;
default:
netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
gen_crq->cmd);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f2018dbebfa5..d5260a206708 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -377,11 +377,16 @@ struct ibmvnic_phys_parms {
u8 flags2;
#define IBMVNIC_LOGICAL_LNK_ACTIVE 0x80
__be32 speed;
-#define IBMVNIC_AUTONEG 0x80
-#define IBMVNIC_10MBPS 0x40
-#define IBMVNIC_100MBPS 0x20
-#define IBMVNIC_1GBPS 0x10
-#define IBMVNIC_10GBPS 0x08
+#define IBMVNIC_AUTONEG 0x80000000
+#define IBMVNIC_10MBPS 0x40000000
+#define IBMVNIC_100MBPS 0x20000000
+#define IBMVNIC_1GBPS 0x10000000
+#define IBMVNIC_10GBP 0x08000000
+#define IBMVNIC_40GBPS 0x04000000
+#define IBMVNIC_100GBPS 0x02000000
+#define IBMVNIC_25GBPS 0x01000000
+#define IBMVNIC_50GBPS 0x00800000
+#define IBMVNIC_200GBPS 0x00400000
__be32 mtu;
struct ibmvnic_rc rc;
} __packed __aligned(8);
@@ -999,6 +1004,9 @@ struct ibmvnic_adapter {
int phys_link_state;
int logical_link_state;
+ u32 speed;
+ u8 duplex;
+
/* login data */
struct ibmvnic_login_buffer *login_buf;
dma_addr_t login_buf_token;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 0fd268070fb4..a65d5a9ba7db 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2797,7 +2797,7 @@ static int e100_set_features(struct net_device *netdev,
netdev->features = features;
e100_exec_cb(nic, NULL, e100_configure);
- return 0;
+ return 1;
}
static const struct net_device_ops e100_netdev_ops = {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8fe9af0e2ab7..6f72ab139fd9 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -820,7 +820,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000_netdev_ops = {
@@ -3267,7 +3267,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, desc_needed);
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7acc61e4f645..a8fa4a1628f5 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -5897,7 +5897,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2));
- if (!skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring,
@@ -7003,7 +7003,7 @@ static int e1000_set_features(struct net_device *netdev,
else
e1000e_reset(adapter);
- return 0;
+ return 1;
}
static const struct net_device_ops e1000e_netdev_ops = {
@@ -7350,7 +7350,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
- if (pci_dev_run_wake(pdev))
+ if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
pm_runtime_put_noidle(&pdev->dev);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index ecef949f3baa..2325cee76211 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1037,7 +1037,7 @@ static void fm10k_tx_map(struct fm10k_ring *tx_ring,
fm10k_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6c97667d20ef..1a95223c9f99 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3469,7 +3469,7 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index 9b4d7cec2e18..b64187753ad6 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2358,7 +2358,7 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb,
first->next_to_watch = tx_desc;
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 89440775aea1..b819689da7e2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -42,10 +42,21 @@
extern const char ice_drv_ver[];
#define ICE_BAR0 0
-#define ICE_DFLT_NUM_DESC 128
#define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE
#define ICE_MAX_NUM_DESC 8160
+/* set default number of Rx/Tx descriptors to the minimum between
+ * ICE_MAX_NUM_DESC and the number of descriptors to fill up an entire page
+ */
+#define ICE_DFLT_NUM_RX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(union ice_32byte_rx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+#define ICE_DFLT_NUM_TX_DESC min_t(u16, ICE_MAX_NUM_DESC, \
+ ALIGN(PAGE_SIZE / \
+ sizeof(struct ice_tx_desc), \
+ ICE_REQ_DESC_MULTIPLE))
+
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
#define ICE_ETHTOOL_FWVER_LEN 32
@@ -114,6 +125,23 @@ extern const char ice_drv_ver[];
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
+ ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_UCAST_RX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
+#define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
+
+#define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
+ ICE_PROMISC_MCAST_RX | \
+ ICE_PROMISC_VLAN_TX | \
+ ICE_PROMISC_VLAN_RX)
+
struct ice_tc_info {
u16 qoffset;
u16 qcount_tx;
@@ -247,6 +275,7 @@ struct ice_vsi {
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
+ u8 vlan_ena;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -257,26 +286,33 @@ struct ice_vsi {
u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */
u16 num_rxq; /* Used Rx queues */
- u16 num_desc;
+ u16 num_rx_desc;
+ u16 num_tx_desc;
struct ice_tc_cfg tc_cfg;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
struct ice_q_vector {
struct ice_vsi *vsi;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- struct ice_ring_container rx;
- struct ice_ring_container tx;
- struct irq_affinity_notify affinity_notify;
+
u16 v_idx; /* index in the vsi->q_vector array. */
- u8 num_ring_tx; /* total number of Tx rings in vector */
u8 num_ring_rx; /* total number of Rx rings in vector */
- char name[ICE_INT_NAME_STR_LEN];
+ u8 num_ring_tx; /* total number of Tx rings in vector */
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
*/
u8 intrl;
+
+ struct napi_struct napi;
+
+ struct ice_ring_container rx;
+ struct ice_ring_container tx;
+
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+
+ char name[ICE_INT_NAME_STR_LEN];
} ____cacheline_internodealigned_in_smp;
enum ice_pf_flags {
@@ -355,8 +391,9 @@ struct ice_netdev_priv {
* @vsi: pointer to vsi struct, can be NULL
* @q_vector: pointer to q_vector, can be NULL
*/
-static inline void ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
- struct ice_q_vector *q_vector)
+static inline void
+ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
+ struct ice_q_vector *q_vector)
{
u32 vector = (vsi && q_vector) ? vsi->hw_base_vector + q_vector->v_idx :
((struct ice_pf *)hw->back)->hw_oicr_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 242c78469181..8ff438968199 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -953,8 +953,9 @@ struct ice_aqc_set_phy_cfg_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
u8 caps;
-#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
-#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define ICE_AQ_PHY_ENA_VALID_MASK ICE_M(0xef, 0)
+#define ICE_AQ_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define ICE_AQ_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
#define ICE_AQ_PHY_ENA_LOW_POWER BIT(2)
#define ICE_AQ_PHY_ENA_LINK BIT(3)
#define ICE_AQ_PHY_ENA_AUTO_LINK_UPDT BIT(5)
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 63f003441300..5e7a31421c0d 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -262,7 +262,7 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-static enum ice_status
+enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -331,7 +331,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return status;
+ return 0;
}
/**
@@ -358,22 +358,22 @@ static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_UDP_GRE, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_FIN, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
+ ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
+ ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_SYN, ICE_RXFLG_RST,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_PKT_DSI,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_EVLAN_x8100,
- ICE_RXFLG_EVLAN_x9100, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_VLAN, ICE_RXFLG_TNL_MAC,
- ICE_RXFLG_TNL0, idx++);
- ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_RXFLG_TNL1, ICE_RXFLG_TNL2,
- ICE_RXFLG_PKT_DSI, ICE_RXFLG_PKT_DSI, idx);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
+ ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
+ ICE_FLG_EVLAN_x9100, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
+ ICE_FLG_TNL0, idx++);
+ ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
+ ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
@@ -1100,8 +1100,9 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Dumps debug log about control command with descriptor contents.
*/
-void ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc,
- void *buf, u16 buf_len)
+void
+ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
+ u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
@@ -1415,13 +1416,15 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
}
/**
- * ice_get_guar_num_vsi - determine number of guar VSI for a PF
+ * ice_get_num_per_func - determine number of resources per PF
* @hw: pointer to the hw structure
+ * @max: value to be evenly split between each PF
*
* Determine the number of valid functions by going through the bitmap returned
- * from parsing capabilities and use this to calculate the number of VSI per PF.
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
*/
-static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
+static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
{
u8 funcs;
@@ -1432,7 +1435,7 @@ static u32 ice_get_guar_num_vsi(struct ice_hw *hw)
if (!funcs)
return 0;
- return ICE_MAX_VSI / funcs;
+ return max / funcs;
}
/**
@@ -1512,7 +1515,8 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
"HW caps: Dev.VSI cnt = %d\n",
dev_p->num_vsi_allocd_to_host);
} else if (func_p) {
- func_p->guar_num_vsi = ice_get_guar_num_vsi(hw);
+ func_p->guar_num_vsi =
+ ice_get_num_per_func(hw, ICE_MAX_VSI);
ice_debug(hw, ICE_DBG_INIT,
"HW caps: Func.VSI cnt = %d\n",
number);
@@ -1617,8 +1621,8 @@ ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* @hw: pointer to the hardware structure
* @opc: capabilities type to discover - pass in the command opcode
*/
-static enum ice_status ice_discover_caps(struct ice_hw *hw,
- enum ice_adminq_opc opc)
+static enum ice_status
+ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
{
enum ice_status status;
u32 cap_count;
@@ -1929,6 +1933,15 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
if (!cfg)
return ICE_ERR_PARAM;
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
+ ice_debug(hw, ICE_DBG_PHY,
+ "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
+ cfg->caps);
+
+ cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
+ }
+
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -2027,8 +2040,10 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
/* clear the old pause settings */
cfg.caps = pcaps->caps & ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
ICE_AQC_PHY_EN_RX_LINK_PAUSE);
+
/* set the new capabilities */
cfg.caps |= pause_mask;
+
/* If the capabilities have changed, then set the new config */
if (cfg.caps != pcaps->caps) {
int retry_count, retry_max = 10;
@@ -2136,6 +2151,32 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
}
/**
+ * ice_aq_set_event_mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ * @cd: pointer to command details structure or NULL
+ *
+ * Set event mask (0x0613)
+ */
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_event_mask *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
+
+ cmd->lport_num = port_num;
+
+ cmd->event_mask = cpu_to_le16(mask);
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
* ice_aq_set_port_id_led
* @pi: pointer to the port information
* @is_orig_mode: is this LED set to original mode (by the net-list)
@@ -2534,8 +2575,8 @@ do_aq:
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u8 src_byte, dest_byte, mask;
u8 *from, *dest;
@@ -2573,8 +2614,8 @@ static void ice_write_byte(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u16 src_word, mask;
__le16 dest_word;
@@ -2616,8 +2657,8 @@ static void ice_write_word(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u32 src_dword, mask;
__le32 dest_dword;
@@ -2667,8 +2708,8 @@ static void ice_write_dword(u8 *src_ctx, u8 *dest_ctx,
* @dest_ctx: the context to be written to
* @ce_info: a description of the struct to be filled
*/
-static void ice_write_qword(u8 *src_ctx, u8 *dest_ctx,
- const struct ice_ctx_ele *ce_info)
+static void
+ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
u64 src_qword, mask;
__le64 dest_qword;
@@ -2908,7 +2949,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u8 tc_bitmap,
mutex_lock(&pi->sched_lock);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
/* configuration is possible only if TC node is present */
if (!ice_sched_get_tc_node(pi, i))
continue;
@@ -3012,8 +3053,9 @@ void ice_replay_post(struct ice_hw *hw)
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat)
{
u64 new_data;
@@ -3043,8 +3085,9 @@ void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
* @prev_stat: ptr to previous loaded stat value
* @cur_stat: ptr to current stat value
*/
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat)
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat)
{
u32 new_data;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index d7c7c2ed8823..fbdfdee353bc 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -9,8 +9,8 @@
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
-void ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf,
- u16 buf_len);
+void
+ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw);
@@ -28,8 +28,8 @@ ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words,
- u16 *data);
+enum ice_status
+ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
enum ice_status
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
@@ -89,6 +89,12 @@ enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
enum ice_status
+ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
+ struct ice_link_status *link, struct ice_sq_cd *cd);
+enum ice_status
+ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
@@ -106,8 +112,10 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_qgrps,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
-void ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
- bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
-void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
- u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
+ bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat);
+void
+ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
+ u64 *prev_stat, u64 *cur_stat);
#endif /* _ICE_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index eb8d149e317c..4a1920e8f168 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -1156,8 +1156,9 @@ ice_get_settings_link_down(struct ethtool_link_ksettings *ks,
*
* Reports speed/duplex settings based on media_type
*/
-static int ice_get_link_ksettings(struct net_device *netdev,
- struct ethtool_link_ksettings *ks)
+static int
+ice_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
@@ -1400,13 +1401,12 @@ ice_set_link_ksettings(struct net_device *netdev,
return -EOPNOTSUPP;
/* Check if this is lan vsi */
- for (idx = 0 ; idx < pf->num_alloc_vsi ; idx++) {
+ ice_for_each_vsi(pf, idx)
if (pf->vsi[idx]->type == ICE_VSI_PF) {
if (np->vsi != pf->vsi[idx])
return -EOPNOTSUPP;
break;
}
- }
if (p->phy.media_type != ICE_MEDIA_BASET &&
p->phy.media_type != ICE_MEDIA_FIBER &&
@@ -1566,8 +1566,9 @@ done:
*
* Returns Success if the command is supported.
*/
-static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 __always_unused *rule_locs)
+static int
+ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
+ u32 __always_unused *rule_locs)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2024,8 +2025,9 @@ out:
* Returns -EINVAL if the table specifies an invalid queue id, otherwise
* returns 0 after programming the table.
*/
-static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
+static int
+ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2180,8 +2182,9 @@ ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_get_coalesce(netdev, ec, -1);
}
-static int ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_get_coalesce(netdev, ec, q_num);
}
@@ -2325,8 +2328,9 @@ ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec)
return __ice_set_coalesce(netdev, ec, -1);
}
-static int ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
- struct ethtool_coalesce *ec)
+static int
+ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
+ struct ethtool_coalesce *ec)
{
return __ice_set_coalesce(netdev, ec, q_num);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6bf5cc064270..af6f32358363 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -106,6 +106,16 @@
#define VPGEN_VFRTRIG_VFSWR_M BIT(0)
#define PFHMC_ERRORDATA 0x00520500
#define PFHMC_ERRORINFO 0x00520400
+#define GLINT_CTL 0x0016CC54
+#define GLINT_CTL_DIS_AUTOMASK_M BIT(0)
+#define GLINT_CTL_ITR_GRAN_200_S 16
+#define GLINT_CTL_ITR_GRAN_200_M ICE_M(0xF, 16)
+#define GLINT_CTL_ITR_GRAN_100_S 20
+#define GLINT_CTL_ITR_GRAN_100_M ICE_M(0xF, 20)
+#define GLINT_CTL_ITR_GRAN_50_S 24
+#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
+#define GLINT_CTL_ITR_GRAN_25_S 28
+#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
@@ -168,6 +178,8 @@
#define VPINT_ALLOC_PCI_LAST_S 12
#define VPINT_ALLOC_PCI_LAST_M ICE_M(0x7FF, 12)
#define VPINT_ALLOC_PCI_VALID_M BIT(31)
+#define VPINT_MBX_CTL(_VSI) (0x0016A000 + ((_VSI) * 4))
+#define VPINT_MBX_CTL_CAUSE_ENA_M BIT(30)
#define GLLAN_RCTL_0 0x002941F8
#define QRX_CONTEXT(_i, _QRX) (0x00280000 + ((_i) * 8192 + (_QRX) * 4))
#define QRX_CTRL(_QRX) (0x00120000 + ((_QRX) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index ef4c79b5aa32..a8c3fe87d7aa 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -208,23 +208,23 @@ enum ice_flex_rx_mdid {
ICE_RX_MDID_HASH_HIGH,
};
-/* Rx Flag64 packet flag bits */
-enum ice_rx_flg64_bits {
- ICE_RXFLG_PKT_DSI = 0,
- ICE_RXFLG_EVLAN_x8100 = 15,
- ICE_RXFLG_EVLAN_x9100,
- ICE_RXFLG_VLAN_x8100,
- ICE_RXFLG_TNL_MAC = 22,
- ICE_RXFLG_TNL_VLAN,
- ICE_RXFLG_PKT_FRG,
- ICE_RXFLG_FIN = 32,
- ICE_RXFLG_SYN,
- ICE_RXFLG_RST,
- ICE_RXFLG_TNL0 = 38,
- ICE_RXFLG_TNL1,
- ICE_RXFLG_TNL2,
- ICE_RXFLG_UDP_GRE,
- ICE_RXFLG_RSVD = 63
+/* RX/TX Flag64 packet flag bits */
+enum ice_flg64_bits {
+ ICE_FLG_PKT_DSI = 0,
+ ICE_FLG_EVLAN_x8100 = 15,
+ ICE_FLG_EVLAN_x9100,
+ ICE_FLG_VLAN_x8100,
+ ICE_FLG_TNL_MAC = 22,
+ ICE_FLG_TNL_VLAN,
+ ICE_FLG_PKT_FRG,
+ ICE_FLG_FIN = 32,
+ ICE_FLG_SYN,
+ ICE_FLG_RST,
+ ICE_FLG_TNL0 = 38,
+ ICE_FLG_TNL1,
+ ICE_FLG_TNL2,
+ ICE_FLG_UDP_GRE,
+ ICE_FLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
@@ -342,12 +342,12 @@ enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
ICE_TX_DESC_CMD_IL2TAG1 = 0x0008,
- ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */
- ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */
- ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */
+ ICE_TX_DESC_CMD_IIPT_IPV6 = 0x0020,
+ ICE_TX_DESC_CMD_IIPT_IPV4 = 0x0040,
+ ICE_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060,
+ ICE_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100,
+ ICE_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200,
+ ICE_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300,
};
#define ICE_TXD_QW1_OFFSET_S 16
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index fa61203bee26..45e361f72057 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -175,17 +175,14 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
int i;
for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
- u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q));
-
- if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
- break;
+ if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
+ QRX_CTRL_QENA_STAT_M))
+ return 0;
usleep_range(20, 40);
}
- if (i >= ICE_Q_WAIT_MAX_RETRY)
- return -ETIMEDOUT;
- return 0;
+ return -ETIMEDOUT;
}
/**
@@ -279,25 +276,50 @@ err_txrings:
}
/**
- * ice_vsi_set_num_qs - Set num queues, descriptors and vectors for a VSI
+ * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
* @vsi: the VSI being configured
+ */
+static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
+{
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
+ vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
+ break;
+ default:
+ dev_dbg(&vsi->back->pdev->dev,
+ "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ vsi->type);
+ break;
+ }
+}
+
+/**
+ * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
+ * @vsi: the VSI being configured
+ * @vf_id: Id of the VF being configured
*
* Return 0 on success and a negative value on error
*/
-static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
{
struct ice_pf *pf = vsi->back;
+ struct ice_vf *vf = NULL;
+
+ if (vsi->type == ICE_VSI_VF)
+ vsi->vf_id = vf_id;
+
switch (vsi->type) {
case ICE_VSI_PF:
vsi->alloc_txq = pf->num_lan_tx;
vsi->alloc_rxq = pf->num_lan_rx;
- vsi->num_desc = ALIGN(ICE_DFLT_NUM_DESC, ICE_REQ_DESC_MULTIPLE);
vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
break;
case ICE_VSI_VF:
- vsi->alloc_txq = pf->num_vf_qps;
- vsi->alloc_rxq = pf->num_vf_qps;
+ vf = &pf->vf[vsi->vf_id];
+ vsi->alloc_txq = vf->num_vf_qs;
+ vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector
@@ -310,6 +332,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
vsi->type);
break;
}
+
+ ice_vsi_set_num_desc(vsi);
}
/**
@@ -455,10 +479,12 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @type: type of VSI
+ * @vf_id: Id of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
-static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
+static struct ice_vsi *
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
{
struct ice_vsi *vsi = NULL;
@@ -484,7 +510,10 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
vsi->idx = pf->next_vsi;
vsi->work_lmt = ICE_DFLT_IRQ_WORK;
- ice_vsi_set_num_qs(vsi);
+ if (type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
case ICE_VSI_PF:
@@ -579,11 +608,10 @@ err_scatter:
/**
* __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
- * @qs_cfg: gathered variables needed for PF->VSI queues assignment
+ * @qs_cfg: gathered variables needed for pf->vsi queues assignment
*
- * This is an internal function for assigning queues from the PF to VSI and
- * initially tries to find contiguous space. If it is not successful to find
- * contiguous space, then it tries with the scatter approach.
+ * This function first tries to find contiguous space. If it is not successful,
+ * it tries with the scatter approach.
*
* Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
*/
@@ -827,7 +855,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount_rx);
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
/* TC is not enabled */
vsi->tc_cfg.tc_info[i].qoffset = 0;
@@ -852,7 +880,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
tx_count += tx_numq_tc;
ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
}
- vsi->num_rxq = offset;
+
+ /* if offset is non-zero, means it is calculated correctly based on
+ * enabled TCs for a given VSI otherwise qcount_rx will always
+ * be correct and non-zero because it is based off - VSI's
+ * allocated Rx queues which is at least 1 (hence qcount_tx will be
+ * at least 1)
+ */
+ if (offset)
+ vsi->num_rxq = offset;
+ else
+ vsi->num_rxq = qcount_rx;
+
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
@@ -923,6 +962,7 @@ static int ice_vsi_init(struct ice_vsi *vsi)
if (!ctxt)
return -ENOMEM;
+ ctxt->info = vsi->info;
switch (vsi->type) {
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
@@ -948,6 +988,14 @@ static int ice_vsi_init(struct ice_vsi *vsi)
ctxt->info.sw_id = vsi->port_info->sw_id;
ice_vsi_setup_q_map(vsi, ctxt);
+ /* Enable MAC Antispoof with new VSI being initialized or updated */
+ if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt->info.sec_flags |=
+ ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
+ }
+
ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
if (ret) {
dev_err(&pf->pdev->dev,
@@ -1215,7 +1263,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->ring_active = false;
ring->vsi = vsi;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_tx_desc;
vsi->tx_rings[i] = ring;
}
@@ -1234,7 +1282,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = &pf->pdev->dev;
- ring->count = vsi->num_desc;
+ ring->count = vsi->num_rx_desc;
vsi->rx_rings[i] = ring;
}
@@ -1640,7 +1688,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */
- for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) {
+ ice_for_each_traffic_class(tc) {
if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
break;
@@ -1717,6 +1765,37 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
}
/**
+ * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
+ * @hw: board specific structure
+ */
+static void ice_cfg_itr_gran(struct ice_hw *hw)
+{
+ u32 regval = rd32(hw, GLINT_CTL);
+
+ /* no need to update global register if ITR gran is already set */
+ if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
+ (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
+ GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
+ GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
+ GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
+ (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
+ GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
+ return;
+
+ regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
+ GLINT_CTL_ITR_GRAN_200_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
+ GLINT_CTL_ITR_GRAN_100_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
+ GLINT_CTL_ITR_GRAN_50_M) |
+ ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
+ GLINT_CTL_ITR_GRAN_25_M);
+ wr32(hw, GLINT_CTL, regval);
+}
+
+/**
* ice_cfg_itr - configure the initial interrupt throttle values
* @hw: pointer to the HW structure
* @q_vector: interrupt vector that's being configured
@@ -1728,6 +1807,8 @@ static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
static void
ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
{
+ ice_cfg_itr_gran(hw);
+
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
@@ -1738,7 +1819,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
@@ -1753,7 +1833,6 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector, u16 vector)
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
- rc->latency_range = ICE_LOW_LATENCY;
wr32(hw, GLINT_ITR(rc->itr_idx, vector),
ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
}
@@ -2025,8 +2104,9 @@ err_alloc_q_ids:
* @rst_src: reset source
* @rel_vmvf_num: Relative id of VF/VM
*/
-int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
- enum ice_disq_rst_src rst_src, u16 rel_vmvf_num)
+int
+ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
+ u16 rel_vmvf_num)
{
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings,
0);
@@ -2036,10 +2116,11 @@ int ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi,
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
+ * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
{
struct ice_vsi_ctx *ctxt;
struct device *dev;
@@ -2067,8 +2148,10 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
- ICE_AQ_VSI_PROP_SW_VALID);
+ if (!vlan_promisc)
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&vsi->back->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -2112,7 +2195,11 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- vsi = ice_vsi_alloc(pf, type);
+ if (type == ICE_VSI_VF)
+ vsi = ice_vsi_alloc(pf, type, vf_id);
+ else
+ vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
+
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
return NULL;
@@ -2596,6 +2683,7 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
+ struct ice_vf *vf = NULL;
struct ice_pf *pf;
int ret, i;
@@ -2603,16 +2691,38 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
return -EINVAL;
pf = vsi->back;
+ if (vsi->type == ICE_VSI_VF)
+ vf = &pf->vf[vsi->vf_id];
+
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
- ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
- ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx);
- vsi->sw_base_vector = 0;
+
+ if (vsi->type != ICE_VSI_VF) {
+ /* reclaim SW interrupts back to the common pool */
+ ice_free_res(pf->sw_irq_tracker, vsi->sw_base_vector, vsi->idx);
+ pf->num_avail_sw_msix += vsi->num_q_vectors;
+ vsi->sw_base_vector = 0;
+ /* reclaim HW interrupts back to the common pool */
+ ice_free_res(pf->hw_irq_tracker, vsi->hw_base_vector,
+ vsi->idx);
+ pf->num_avail_hw_msix += vsi->num_q_vectors;
+ } else {
+ /* Reclaim VF resources back to the common pool for reset and
+ * and rebuild, with vector reassignment
+ */
+ ice_free_res(pf->hw_irq_tracker, vf->first_vector_idx,
+ vsi->idx);
+ pf->num_avail_hw_msix += pf->num_vf_msix;
+ }
vsi->hw_base_vector = 0;
+
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi, false);
ice_dev_onetime_setup(&vsi->back->hw);
- ice_vsi_set_num_qs(vsi);
+ if (vsi->type == ICE_VSI_VF)
+ ice_vsi_set_num_qs(vsi, vf->vf_id);
+ else
+ ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 7988a53729a9..519ef59e9e43 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -35,7 +35,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_vsi_delete(struct ice_vsi *vsi);
@@ -70,8 +70,6 @@ void ice_vsi_free_rx_rings(struct ice_vsi *vsi);
void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
-int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-
int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 47cc3f905b7f..f7073e046979 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -168,6 +168,39 @@ static bool ice_vsi_fltr_changed(struct ice_vsi *vsi)
}
/**
+ * ice_cfg_promisc - Enable or disable promiscuous mode for a given PF
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @set_promisc: enable or disable promisc flag request
+ *
+ */
+static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ enum ice_status status = 0;
+
+ if (vsi->type != ICE_VSI_PF)
+ return 0;
+
+ if (vsi->vlan_ena) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ set_promisc);
+ } else {
+ if (set_promisc)
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ if (status)
+ return -EIO;
+
+ return 0;
+}
+
+/**
* ice_vsi_sync_fltr - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
*
@@ -182,6 +215,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
struct ice_hw *hw = &pf->hw;
enum ice_status status = 0;
u32 changed_flags = 0;
+ u8 promisc_m;
int err = 0;
if (!vsi->netdev)
@@ -226,7 +260,11 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
/* Add mac addresses in the sync list */
status = ice_add_mac(hw, &vsi->tmp_sync_list);
ice_free_fltr_list(dev, &vsi->tmp_sync_list);
- if (status) {
+ /* If filter is added successfully or already exists, do not go into
+ * 'if' condition and report it as error. Instead continue processing
+ * rest of the function.
+ */
+ if (status && status != ICE_ERR_ALREADY_EXISTS) {
netdev_err(netdev, "Failed to add MAC filters\n");
/* If there is no more space for new umac filters, vsi
* should go into promiscuous mode. There should be some
@@ -245,8 +283,35 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
}
}
/* check for changes in promiscuous modes */
- if (changed_flags & IFF_ALLMULTI)
- netdev_warn(netdev, "Unsupported configuration\n");
+ if (changed_flags & IFF_ALLMULTI) {
+ if (vsi->current_netdev_flags & IFF_ALLMULTI) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, true);
+ if (err) {
+ netdev_err(netdev, "Error setting Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags &= ~IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ } else if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) {
+ if (vsi->vlan_ena)
+ promisc_m = ICE_MCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_MCAST_PROMISC_BITS;
+
+ err = ice_cfg_promisc(vsi, promisc_m, false);
+ if (err) {
+ netdev_err(netdev, "Error clearing Multicast promiscuous mode on VSI %i\n",
+ vsi->vsi_num);
+ vsi->current_netdev_flags |= IFF_ALLMULTI;
+ goto out_promisc;
+ }
+ }
+ }
if (((changed_flags & IFF_PROMISC) || promisc_forced_on) ||
test_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags)) {
@@ -322,7 +387,7 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags);
- for (v = 0; v < pf->num_alloc_vsi; v++)
+ ice_for_each_vsi(pf, v)
if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) &&
ice_vsi_sync_fltr(pf->vsi[v])) {
/* come back and try again later */
@@ -342,6 +407,10 @@ ice_prepare_for_reset(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
+ /* already prepared for reset */
+ if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
+ return;
+
/* Notify VFs of impending reset */
if (ice_check_sq_alive(hw, &hw->mailboxq))
ice_vc_notify_reset(pf);
@@ -394,6 +463,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
ice_rebuild(pf);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
}
@@ -416,10 +486,15 @@ static void ice_reset_subtask(struct ice_pf *pf)
* for the reset now), poll for reset done, rebuild and return.
*/
if (test_bit(__ICE_RESET_OICR_RECV, pf->state)) {
- clear_bit(__ICE_GLOBR_RECV, pf->state);
- clear_bit(__ICE_CORER_RECV, pf->state);
- if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
- ice_prepare_for_reset(pf);
+ /* Perform the largest reset requested */
+ if (test_and_clear_bit(__ICE_CORER_RECV, pf->state))
+ reset_type = ICE_RESET_CORER;
+ if (test_and_clear_bit(__ICE_GLOBR_RECV, pf->state))
+ reset_type = ICE_RESET_GLOBR;
+ /* return if no valid reset type requested */
+ if (reset_type == ICE_RESET_INVAL)
+ return;
+ ice_prepare_for_reset(pf);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -436,6 +511,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
clear_bit(__ICE_PFR_REQ, pf->state);
clear_bit(__ICE_CORER_REQ, pf->state);
clear_bit(__ICE_GLOBR_REQ, pf->state);
+ ice_reset_all_vfs(pf, true);
}
return;
@@ -519,6 +595,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
case ICE_FC_RX_PAUSE:
fc = "RX";
break;
+ case ICE_FC_NONE:
+ fc = "None";
+ break;
default:
fc = "Unknown";
break;
@@ -635,19 +714,70 @@ static void ice_watchdog_subtask(struct ice_pf *pf)
pf->serv_tmr_prev = jiffies;
- if (ice_link_event(pf, pf->hw.port_info))
- dev_dbg(&pf->pdev->dev, "ice_link_event failed\n");
-
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
ice_update_pf_stats(pf);
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->netdev)
ice_update_vsi_stats(pf->vsi[i]);
}
/**
+ * ice_init_link_events - enable/initialize link events
+ * @pi: pointer to the port_info instance
+ *
+ * Returns -EIO on failure, 0 on success
+ */
+static int ice_init_link_events(struct ice_port_info *pi)
+{
+ u16 mask;
+
+ mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+
+ if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to set link event mask for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
+ dev_dbg(ice_hw_to_dev(pi->hw),
+ "Failed to enable link events for port %d\n",
+ pi->lport);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_handle_link_event - handle link event via ARQ
+ * @pf: pf that the link event is associated with
+ *
+ * Return -EINVAL if port_info is null
+ * Return status on success
+ */
+static int ice_handle_link_event(struct ice_pf *pf)
+{
+ struct ice_port_info *port_info;
+ int status;
+
+ port_info = pf->hw.port_info;
+ if (!port_info)
+ return -EINVAL;
+
+ status = ice_link_event(pf, port_info);
+ if (status)
+ dev_dbg(&pf->pdev->dev,
+ "Could not process link event, error %d\n", status);
+
+ return status;
+}
+
+/**
* __ice_clean_ctrlq - helper function to clean controlq rings
* @pf: ptr to struct ice_pf
* @q_type: specific Control queue type
@@ -750,6 +880,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
opcode = le16_to_cpu(event.desc.opcode);
switch (opcode) {
+ case ice_aqc_opc_get_link_status:
+ if (ice_handle_link_event(pf))
+ dev_err(&pf->pdev->dev,
+ "Could not handle link event\n");
+ break;
case ice_mbx_opc_send_msg_to_pf:
ice_vc_process_vf_msg(pf, &event);
break;
@@ -877,6 +1012,18 @@ static void ice_service_task_stop(struct ice_pf *pf)
}
/**
+ * ice_service_task_restart - restart service task and schedule works
+ * @pf: board private structure
+ *
+ * This function is needed for suspend and resume works (e.g WoL scenario)
+ */
+static void ice_service_task_restart(struct ice_pf *pf)
+{
+ clear_bit(__ICE_SERVICE_DIS, pf->state);
+ ice_service_task_schedule(pf);
+}
+
+/**
* ice_service_timer - timer callback to schedule service task
* @t: pointer to timer_list
*/
@@ -1111,8 +1258,9 @@ static void ice_set_ctrlq_len(struct ice_hw *hw)
* This is a callback function used by the irq_set_affinity_notifier function
* so that we may register to receive changes to the irq affinity masks.
*/
-static void ice_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
+static void
+ice_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
{
struct ice_q_vector *q_vector =
container_of(notify, struct ice_q_vector, affinity_notify);
@@ -1184,10 +1332,9 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
/* skip this unused q_vector */
continue;
}
- err = devm_request_irq(&pf->pdev->dev,
- pf->msix_entries[base + vector].vector,
- vsi->irq_handler, 0, q_vector->name,
- q_vector);
+ err = devm_request_irq(&pf->pdev->dev, irq_num,
+ vsi->irq_handler, 0,
+ q_vector->name, q_vector);
if (err) {
netdev_err(vsi->netdev,
"MSIX request_irq failed, error: %d\n", err);
@@ -1656,11 +1803,13 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
*
* net_device_ops implementation for adding vlan ids
*/
-static int ice_vlan_rx_add_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
+ int ret;
if (vid >= VLAN_N_VID) {
netdev_err(netdev, "VLAN id requested %d is out of range %d\n",
@@ -1673,8 +1822,7 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
/* Enable VLAN pruning when VLAN 0 is added */
if (unlikely(!vid)) {
- int ret = ice_cfg_vlan_pruning(vsi, true);
-
+ ret = ice_cfg_vlan_pruning(vsi, true, false);
if (ret)
return ret;
}
@@ -1683,7 +1831,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
* needed to continue allowing all untagged packets since VLAN prune
* list is applied to all packets by the switch
*/
- return ice_vsi_add_vlan(vsi, vid);
+ ret = ice_vsi_add_vlan(vsi, vid);
+ if (!ret) {
+ vsi->vlan_ena = true;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ }
+
+ return ret;
}
/**
@@ -1694,12 +1848,13 @@ static int ice_vlan_rx_add_vid(struct net_device *netdev,
*
* net_device_ops implementation for removing vlan ids
*/
-static int ice_vlan_rx_kill_vid(struct net_device *netdev,
- __always_unused __be16 proto, u16 vid)
+static int
+ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
+ u16 vid)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- int status;
+ int ret;
if (vsi->info.pvid)
return -EINVAL;
@@ -1707,15 +1862,17 @@ static int ice_vlan_rx_kill_vid(struct net_device *netdev,
/* Make sure ice_vsi_kill_vlan is successful before updating VLAN
* information
*/
- status = ice_vsi_kill_vlan(vsi, vid);
- if (status)
- return status;
+ ret = ice_vsi_kill_vlan(vsi, vid);
+ if (ret)
+ return ret;
/* Disable VLAN pruning when VLAN 0 is removed */
if (unlikely(!vid))
- status = ice_cfg_vlan_pruning(vsi, false);
+ ret = ice_cfg_vlan_pruning(vsi, false, false);
- return status;
+ vsi->vlan_ena = false;
+ set_bit(ICE_VSI_FLAG_VLAN_FLTR_CHANGED, vsi->flags);
+ return ret;
}
/**
@@ -2033,23 +2190,6 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
- * ice_verify_itr_gran - verify driver's assumption of ITR granularity
- * @pf: pointer to the PF structure
- *
- * There is no error returned here because the driver will be able to handle a
- * different ITR granularity, but interrupt moderation will not be accurate if
- * the driver's assumptions are not verified. This assumption is made so we can
- * use constants in the hot path instead of accessing structure members.
- */
-static void ice_verify_itr_gran(struct ice_pf *pf)
-{
- if (pf->hw.itr_gran != (ICE_ITR_GRAN_S << 1))
- dev_warn(&pf->pdev->dev,
- "%d ITR granularity assumption is invalid, actual ITR granularity is %d. Interrupt moderation will be inaccurate!\n",
- (ICE_ITR_GRAN_S << 1), pf->hw.itr_gran);
-}
-
-/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2072,9 +2212,10 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
*
* Returns 0 on success, negative on failure
*/
-static int ice_probe(struct pci_dev *pdev,
- const struct pci_device_id __always_unused *ent)
+static int
+ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
+ struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -2086,20 +2227,20 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
- dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
+ dev_err(dev, "BAR0 I/O map error %d\n", err);
return err;
}
- pf = devm_kzalloc(&pdev->dev, sizeof(*pf), GFP_KERNEL);
+ pf = devm_kzalloc(dev, sizeof(*pf), GFP_KERNEL);
if (!pf)
return -ENOMEM;
/* set up for high or low dma */
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "DMA configuration failed: 0x%x\n", err);
+ dev_err(dev, "DMA configuration failed: 0x%x\n", err);
return err;
}
@@ -2133,12 +2274,12 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_hw(hw);
if (err) {
- dev_err(&pdev->dev, "ice_init_hw failed: %d\n", err);
+ dev_err(dev, "ice_init_hw failed: %d\n", err);
err = -EIO;
goto err_exit_unroll;
}
- dev_info(&pdev->dev, "firmware %d.%d.%05d api %d.%d\n",
+ dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver);
@@ -2152,8 +2293,8 @@ static int ice_probe(struct pci_dev *pdev,
goto err_init_pf_unroll;
}
- pf->vsi = devm_kcalloc(&pdev->dev, pf->num_alloc_vsi,
- sizeof(*pf->vsi), GFP_KERNEL);
+ pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
+ GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
goto err_init_pf_unroll;
@@ -2161,8 +2302,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_init_interrupt_scheme(pf);
if (err) {
- dev_err(&pdev->dev,
- "ice_init_interrupt_scheme failed: %d\n", err);
+ dev_err(dev, "ice_init_interrupt_scheme failed: %d\n", err);
err = -EIO;
goto err_init_interrupt_unroll;
}
@@ -2178,15 +2318,13 @@ static int ice_probe(struct pci_dev *pdev,
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) {
err = ice_req_irq_msix_misc(pf);
if (err) {
- dev_err(&pdev->dev,
- "setup of misc vector failed: %d\n", err);
+ dev_err(dev, "setup of misc vector failed: %d\n", err);
goto err_init_interrupt_unroll;
}
}
/* create switch struct for the switch element created by FW on boot */
- pf->first_sw = devm_kzalloc(&pdev->dev, sizeof(*pf->first_sw),
- GFP_KERNEL);
+ pf->first_sw = devm_kzalloc(dev, sizeof(*pf->first_sw), GFP_KERNEL);
if (!pf->first_sw) {
err = -ENOMEM;
goto err_msix_misc_unroll;
@@ -2204,8 +2342,7 @@ static int ice_probe(struct pci_dev *pdev,
err = ice_setup_pf_sw(pf);
if (err) {
- dev_err(&pdev->dev,
- "probe failed due to setup pf switch:%d\n", err);
+ dev_err(dev, "probe failed due to setup pf switch:%d\n", err);
goto err_alloc_sw_unroll;
}
@@ -2214,8 +2351,13 @@ static int ice_probe(struct pci_dev *pdev,
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+ err = ice_init_link_events(pf->hw.port_info);
+ if (err) {
+ dev_err(dev, "ice_init_link_events failed: %d\n", err);
+ goto err_alloc_sw_unroll;
+ }
+
ice_verify_cacheline_size(pf);
- ice_verify_itr_gran(pf);
return 0;
@@ -2227,7 +2369,7 @@ err_msix_misc_unroll:
ice_free_irq_msix_misc(pf);
err_init_interrupt_unroll:
ice_clear_interrupt_scheme(pf);
- devm_kfree(&pdev->dev, pf->vsi);
+ devm_kfree(dev, pf->vsi);
err_init_pf_unroll:
ice_deinit_pf(pf);
ice_deinit_hw(hw);
@@ -2272,6 +2414,136 @@ static void ice_remove(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
}
+/**
+ * ice_pci_err_detected - warning that PCI error has been detected
+ * @pdev: PCI device information struct
+ * @err: the type of PCI error
+ *
+ * Called to warn that something happened on the PCI bus and the error handling
+ * is in progress. Allows the driver to gracefully prepare/handle PCI errors.
+ */
+static pci_ers_result_t
+ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev, "%s: unrecoverable device error %d\n",
+ __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * ice_pci_err_slot_reset - a PCI slot reset has just happened
+ * @pdev: PCI device information struct
+ *
+ * Called to determine if the driver can recover from the PCI slot reset by
+ * using a register read to determine if the device is recoverable.
+ */
+static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ pci_ers_result_t result;
+ int err;
+ u32 reg;
+
+ err = pci_enable_device_mem(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset, error %d\n",
+ err);
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_wake_from_d3(pdev, false);
+
+ /* Check for life */
+ reg = rd32(&pf->hw, GLGEN_RTRIG);
+ if (!reg)
+ result = PCI_ERS_RESULT_RECOVERED;
+ else
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ err = pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (err)
+ dev_dbg(&pdev->dev,
+ "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ err);
+ /* non-fatal, continue */
+
+ return result;
+}
+
+/**
+ * ice_pci_err_resume - restart operations after PCI error recovery
+ * @pdev: PCI device information struct
+ *
+ * Called to allow the driver to bring things back up after PCI error and/or
+ * reset recovery have finished
+ */
+static void ice_pci_err_resume(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!pf) {
+ dev_err(&pdev->dev,
+ "%s failed, device is unrecoverable\n", __func__);
+ return;
+ }
+
+ if (test_bit(__ICE_SUSPENDED, pf->state)) {
+ dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n",
+ __func__);
+ return;
+ }
+
+ ice_do_reset(pf, ICE_RESET_PFR);
+ ice_service_task_restart(pf);
+ mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
+}
+
+/**
+ * ice_pci_err_reset_prepare - prepare device driver for PCI reset
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ if (!test_bit(__ICE_SUSPENDED, pf->state)) {
+ ice_service_task_stop(pf);
+
+ if (!test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) {
+ set_bit(__ICE_PFR_REQ, pf->state);
+ ice_prepare_for_reset(pf);
+ }
+ }
+}
+
+/**
+ * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
+ * @pdev: PCI device information struct
+ */
+static void ice_pci_err_reset_done(struct pci_dev *pdev)
+{
+ ice_pci_err_resume(pdev);
+}
+
/* ice_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
@@ -2289,12 +2561,21 @@ static const struct pci_device_id ice_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
+static const struct pci_error_handlers ice_pci_err_handler = {
+ .error_detected = ice_pci_err_detected,
+ .slot_reset = ice_pci_err_slot_reset,
+ .reset_prepare = ice_pci_err_reset_prepare,
+ .reset_done = ice_pci_err_reset_done,
+ .resume = ice_pci_err_resume
+};
+
static struct pci_driver ice_driver = {
.name = KBUILD_MODNAME,
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
.sriov_configure = ice_sriov_configure,
+ .err_handler = &ice_pci_err_handler
};
/**
@@ -2512,9 +2793,10 @@ ice_fdb_add(struct ndmsg *ndm, struct nlattr __always_unused *tb[],
* @addr: the MAC address entry being added
* @vid: VLAN id
*/
-static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
- struct net_device *dev, const unsigned char *addr,
- __always_unused u16 vid)
+static int
+ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
+ struct net_device *dev, const unsigned char *addr,
+ __always_unused u16 vid)
{
int err;
@@ -2538,8 +2820,8 @@ static int ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
* @netdev: ptr to the netdev being adjusted
* @features: the feature set that the stack is suggesting
*/
-static int ice_set_features(struct net_device *netdev,
- netdev_features_t features)
+static int
+ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2666,7 +2948,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
ice_service_task_schedule(pf);
- return err;
+ return 0;
}
/**
@@ -2693,8 +2975,8 @@ int ice_up(struct ice_vsi *vsi)
* This function fetches stats from the ring considering the atomic operations
* that needs to be performed to read u64 values in 32 bit machine.
*/
-static void ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts,
- u64 *bytes)
+static void
+ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
{
unsigned int start;
*pkts = 0;
@@ -3276,7 +3558,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi)
return;
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3375,16 +3657,12 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and reinit the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
int err;
if (!pf->vsi[i])
continue;
- /* VF VSI rebuild isn't supported yet */
- if (pf->vsi[i]->type == ICE_VSI_VF)
- continue;
-
err = ice_vsi_rebuild(pf->vsi[i]);
if (err) {
dev_err(&pf->pdev->dev,
@@ -3412,7 +3690,7 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
int i;
/* loop through pf->vsi array and replay the VSI if found */
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
if (!pf->vsi[i])
continue;
@@ -3521,9 +3799,7 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_vsi_rebuild;
}
- ice_reset_all_vfs(pf, true);
-
- for (i = 0; i < pf->num_alloc_vsi; i++) {
+ ice_for_each_vsi(pf, i) {
bool link_up;
if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 56049739a250..e0218f4c8f0b 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -276,7 +276,8 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
if (status || num_groups_removed != 1)
- ice_debug(hw, ICE_DBG_SCHED, "remove elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return status;
@@ -360,12 +361,8 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_ROOT_PORT &&
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- enum ice_status status;
- status = ice_sched_remove_elems(hw, node->parent, 1, &teid);
- if (status)
- ice_debug(hw, ICE_DBG_SCHED,
- "remove element failed %d\n", status);
+ ice_sched_remove_elems(hw, node->parent, 1, &teid);
}
parent = node->parent;
/* root has no parent */
@@ -697,7 +694,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
if (status || num_groups_added != 1) {
- ice_debug(hw, ICE_DBG_SCHED, "add elements failed\n");
+ ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
+ hw->adminq.sq_last_status);
devm_kfree(ice_hw_to_dev(hw), buf);
return ICE_ERR_CFG;
}
@@ -1271,42 +1269,6 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
/**
- * ice_sched_rm_vsi_child_nodes - remove VSI child nodes from the tree
- * @pi: port information structure
- * @vsi_node: pointer to the VSI node
- * @num_nodes: pointer to the num nodes that needs to be removed per layer
- * @owner: node owner (lan or rdma)
- *
- * This function removes the VSI child nodes from the tree. It gets called for
- * lan and rdma separately.
- */
-static void
-ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
- struct ice_sched_node *vsi_node, u16 *num_nodes,
- u8 owner)
-{
- struct ice_sched_node *node, *next;
- u8 i, qgl, vsil;
- u16 num;
-
- qgl = ice_sched_get_qgrp_layer(pi->hw);
- vsil = ice_sched_get_vsi_layer(pi->hw);
-
- for (i = qgl; i > vsil; i--) {
- num = num_nodes[i];
- node = ice_sched_get_first_node(pi->hw, vsi_node, i);
- while (node && num) {
- next = node->sibling;
- if (node->owner == owner && !node->num_children) {
- ice_free_sched_node(pi, node);
- num--;
- }
- node = next;
- }
- }
-}
-
-/**
* ice_sched_calc_vsi_support_nodes - calculate number of VSI support nodes
* @hw: pointer to the hw struct
* @tc_node: pointer to TC node
@@ -1446,7 +1408,6 @@ static enum ice_status
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
- u16 prev_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u16 new_num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
@@ -1454,7 +1415,6 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 prev_numqs;
- u8 i;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -1473,36 +1433,25 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
else
return ICE_ERR_PARAM;
- /* num queues are not changed */
- if (prev_numqs == new_numqs)
+ /* num queues are not changed or less than the previous number */
+ if (new_numqs <= prev_numqs)
return status;
-
- /* calculate number of nodes based on prev/new number of qs */
- if (prev_numqs)
- ice_sched_calc_vsi_child_nodes(hw, prev_numqs, prev_num_nodes);
-
if (new_numqs)
ice_sched_calc_vsi_child_nodes(hw, new_numqs, new_num_nodes);
-
- if (prev_numqs > new_numqs) {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] = prev_num_nodes[i] - new_num_nodes[i];
-
- ice_sched_rm_vsi_child_nodes(pi, vsi_node, new_num_nodes,
- owner);
- } else {
- for (i = 0; i < ICE_AQC_TOPO_MAX_LEVEL_NUM; i++)
- new_num_nodes[i] -= prev_num_nodes[i];
-
- status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
- new_num_nodes, owner);
- if (status)
- return status;
- }
-
+ /* Keep the max number of queue configuration all the time. Update the
+ * tree only if number of queues > previous number of queues. This may
+ * leave some extra nodes in the tree if number of queues < previous
+ * number but that wouldn't harm anything. Removing those extra nodes
+ * may complicate the code if those nodes are part of SRL or
+ * individually rate limited.
+ */
+ status = ice_sched_add_vsi_child_nodes(pi, vsi_handle, tc_node,
+ new_num_nodes, owner);
+ if (status)
+ return status;
vsi_ctx->sched.max_lanq[tc] = new_numqs;
- return status;
+ return 0;
}
/**
@@ -1527,6 +1476,7 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
return ICE_ERR_PARAM;
@@ -1646,8 +1596,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
- u8 i, j = 0;
+ u8 i;
+ ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return status;
mutex_lock(&pi->sched_lock);
@@ -1655,8 +1606,9 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
if (!vsi_ctx)
goto exit_sched_rm_vsi_cfg;
- for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ ice_for_each_traffic_class(i) {
struct ice_sched_node *vsi_node, *tc_node;
+ u8 j = 0;
tc_node = ice_sched_get_tc_node(pi, i);
if (!tc_node)
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 09d1c314b68f..7dcd9ddf54f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -322,8 +322,8 @@ struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
*
* save the VSI context entry for a given VSI handle
*/
-static void ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle,
- struct ice_vsi_ctx *vsi)
+static void
+ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
{
hw->vsi_ctx[vsi_handle] = vsi;
}
@@ -398,7 +398,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return status;
+ return 0;
}
/**
@@ -643,21 +643,43 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
- fi->lb_en = true;
- /* Do not set lan_en to TRUE if
+ /* Setting LB for prune actions will result in replicated
+ * packets to the internal switch that will be dropped.
+ */
+ if (fi->lkup_type != ICE_SW_LKUP_VLAN)
+ fi->lb_en = true;
+
+ /* Set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
- * 2.1 The lookup is MAC with unicast addr for MAC, OR
- * 2.2 The lookup is MAC_VLAN with unicast addr for MAC
+ * 2.1 The lookup is a directional lookup like ethertype,
+ * promiscuous, ethertype-mac, promiscuous-vlan
+ * and default-port OR
+ * 2.2 The lookup is VLAN, OR
+ * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
+ * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
*
- * In all other cases, the LAN enable has to be set to true.
+ * OR
+ *
+ * The switch is a VEPA.
+ *
+ * In all other cases, the LAN enable has to be set to false.
*/
- if (!(hw->evb_veb &&
- ((fi->lkup_type == ICE_SW_LKUP_MAC &&
- is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
- (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
+ if (hw->evb_veb) {
+ if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC ||
+ fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
+ fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
+ fi->lkup_type == ICE_SW_LKUP_DFLT ||
+ fi->lkup_type == ICE_SW_LKUP_VLAN ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
+ (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
+ !is_unicast_ether_addr(fi->l_data.mac.mac_addr)))
+ fi->lan_en = true;
+ } else {
fi->lan_en = true;
+ }
}
}
@@ -2190,6 +2212,291 @@ ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
}
/**
+ * ice_determine_promisc_mask
+ * @fi: filter info to parse
+ *
+ * Helper function to determine which ICE_PROMISC_ mask corresponds
+ * to given filter into.
+ */
+static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
+{
+ u16 vid = fi->l_data.mac_vlan.vlan_id;
+ u8 *macaddr = fi->l_data.mac.mac_addr;
+ bool is_tx_fltr = false;
+ u8 promisc_mask = 0;
+
+ if (fi->flag == ICE_FLTR_TX)
+ is_tx_fltr = true;
+
+ if (is_broadcast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
+ else if (is_multicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
+ else if (is_unicast_ether_addr(macaddr))
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
+ if (vid)
+ promisc_mask |= is_tx_fltr ?
+ ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
+
+ return promisc_mask;
+}
+
+/**
+ * ice_remove_promisc - Remove promisc based filter rules
+ * @hw: pointer to the hardware structure
+ * @recp_id: recipe id for which the rule needs to removed
+ * @v_list: list of promisc entries
+ */
+static enum ice_status
+ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
+ struct list_head *v_list)
+{
+ struct ice_fltr_list_entry *v_list_itr, *tmp;
+
+ list_for_each_entry_safe(v_list_itr, tmp, v_list, list_entry) {
+ v_list_itr->status =
+ ice_remove_rule_internal(hw, recp_id, v_list_itr);
+ if (v_list_itr->status)
+ return v_list_itr->status;
+ }
+ return 0;
+}
+
+/**
+ * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to clear mode
+ * @promisc_mask: mask of promiscuous config bits to clear
+ * @vid: VLAN ID to clear VLAN promiscuous
+ */
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *fm_entry, *tmp;
+ struct list_head remove_list_head;
+ struct ice_fltr_mgmt_list_entry *itr;
+ struct list_head *rule_head;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ enum ice_status status = 0;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (vid)
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ else
+ recipe_id = ICE_SW_LKUP_PROMISC;
+
+ rule_head = &sw->recp_list[recipe_id].filt_rules;
+ rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
+
+ INIT_LIST_HEAD(&remove_list_head);
+
+ mutex_lock(rule_lock);
+ list_for_each_entry(itr, rule_head, list_entry) {
+ u8 fltr_promisc_mask = 0;
+
+ if (!ice_vsi_uses_fltr(itr, vsi_handle))
+ continue;
+
+ fltr_promisc_mask |=
+ ice_determine_promisc_mask(&itr->fltr_info);
+
+ /* Skip if filter is not completely specified by given mask */
+ if (fltr_promisc_mask & ~promisc_mask)
+ continue;
+
+ status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
+ &remove_list_head,
+ &itr->fltr_info);
+ if (status) {
+ mutex_unlock(rule_lock);
+ goto free_fltr_list;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
+
+free_fltr_list:
+ list_for_each_entry_safe(fm_entry, tmp, &remove_list_head, list_entry) {
+ list_del(&fm_entry->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fm_entry);
+ }
+
+ return status;
+}
+
+/**
+ * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @vid: VLAN ID to set VLAN promiscuous
+ */
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
+{
+ enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
+ struct ice_fltr_list_entry f_list_entry;
+ struct ice_fltr_info new_fltr;
+ enum ice_status status = 0;
+ bool is_tx_fltr;
+ u16 hw_vsi_id;
+ int pkt_type;
+ u8 recipe_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ memset(&new_fltr, 0, sizeof(new_fltr));
+
+ if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
+ new_fltr.l_data.mac_vlan.vlan_id = vid;
+ recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
+ } else {
+ new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
+ recipe_id = ICE_SW_LKUP_PROMISC;
+ }
+
+ /* Separate filters must be set for each direction/packet type
+ * combination, so we will loop over the mask value, store the
+ * individual type, and clear it out in the input mask as it
+ * is found.
+ */
+ while (promisc_mask) {
+ u8 *mac_addr;
+
+ pkt_type = 0;
+ is_tx_fltr = false;
+
+ if (promisc_mask & ICE_PROMISC_UCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_RX;
+ pkt_type = UCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_UCAST_TX;
+ pkt_type = UCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_RX;
+ pkt_type = MCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_MCAST_TX;
+ pkt_type = MCAST_FLTR;
+ is_tx_fltr = true;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_RX;
+ pkt_type = BCAST_FLTR;
+ } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
+ promisc_mask &= ~ICE_PROMISC_BCAST_TX;
+ pkt_type = BCAST_FLTR;
+ is_tx_fltr = true;
+ }
+
+ /* Check for VLAN promiscuous flag */
+ if (promisc_mask & ICE_PROMISC_VLAN_RX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_RX;
+ } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
+ promisc_mask &= ~ICE_PROMISC_VLAN_TX;
+ is_tx_fltr = true;
+ }
+
+ /* Set filter DA based on packet type */
+ mac_addr = new_fltr.l_data.mac.mac_addr;
+ if (pkt_type == BCAST_FLTR) {
+ eth_broadcast_addr(mac_addr);
+ } else if (pkt_type == MCAST_FLTR ||
+ pkt_type == UCAST_FLTR) {
+ /* Use the dummy ether header DA */
+ ether_addr_copy(mac_addr, dummy_eth_header);
+ if (pkt_type == MCAST_FLTR)
+ mac_addr[0] |= 0x1; /* Set multicast bit */
+ }
+
+ /* Need to reset this to zero for all iterations */
+ new_fltr.flag = 0;
+ if (is_tx_fltr) {
+ new_fltr.flag |= ICE_FLTR_TX;
+ new_fltr.src = hw_vsi_id;
+ } else {
+ new_fltr.flag |= ICE_FLTR_RX;
+ new_fltr.src = hw->port_info->lport;
+ }
+
+ new_fltr.fltr_act = ICE_FWD_TO_VSI;
+ new_fltr.vsi_handle = vsi_handle;
+ new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
+ f_list_entry.fltr_info = new_fltr;
+
+ status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
+ if (status)
+ goto set_promisc_exit;
+ }
+
+set_promisc_exit:
+ return status;
+}
+
+/**
+ * ice_set_vlan_vsi_promisc
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle to configure
+ * @promisc_mask: mask of promiscuous config bits
+ * @rm_vlan_promisc: Clear VLANs VSI promisc mode
+ *
+ * Configure VSI with all associated VLANs to given promiscuous mode(s)
+ */
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc)
+{
+ struct ice_switch_info *sw = hw->switch_info;
+ struct ice_fltr_list_entry *list_itr, *tmp;
+ struct list_head vsi_list_head;
+ struct list_head *vlan_head;
+ struct mutex *vlan_lock; /* Lock to protect filter rule list */
+ enum ice_status status;
+ u16 vlan_id;
+
+ INIT_LIST_HEAD(&vsi_list_head);
+ vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ mutex_lock(vlan_lock);
+ status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
+ &vsi_list_head);
+ mutex_unlock(vlan_lock);
+ if (status)
+ goto free_fltr_list;
+
+ list_for_each_entry(list_itr, &vsi_list_head, list_entry) {
+ vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
+ if (rm_vlan_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi_handle,
+ promisc_mask, vlan_id);
+ if (status)
+ break;
+ }
+
+free_fltr_list:
+ list_for_each_entry_safe(list_itr, tmp, &vsi_list_head, list_entry) {
+ list_del(&list_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_itr);
+ }
+ return status;
+}
+
+/**
* ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to remove filters from
@@ -2224,12 +2531,14 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
case ICE_SW_LKUP_VLAN:
ice_remove_vlan(hw, &remove_list_head);
break;
+ case ICE_SW_LKUP_PROMISC:
+ case ICE_SW_LKUP_PROMISC_VLAN:
+ ice_remove_promisc(hw, lkup, &remove_list_head);
+ break;
case ICE_SW_LKUP_MAC_VLAN:
case ICE_SW_LKUP_ETHERTYPE:
case ICE_SW_LKUP_ETHERTYPE_MAC:
- case ICE_SW_LKUP_PROMISC:
case ICE_SW_LKUP_DFLT:
- case ICE_SW_LKUP_PROMISC_VLAN:
case ICE_SW_LKUP_LAST:
default:
ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type %d\n", lkup);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index d5ef0bd58bf9..e4ce0720b871 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -178,6 +178,17 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+enum ice_promisc_flags {
+ ICE_PROMISC_UCAST_RX = 0x1,
+ ICE_PROMISC_UCAST_TX = 0x2,
+ ICE_PROMISC_MCAST_RX = 0x4,
+ ICE_PROMISC_MCAST_TX = 0x8,
+ ICE_PROMISC_BCAST_RX = 0x10,
+ ICE_PROMISC_BCAST_TX = 0x20,
+ ICE_PROMISC_VLAN_RX = 0x40,
+ ICE_PROMISC_VLAN_TX = 0x80,
+};
+
/* VSI related commands */
enum ice_status
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
@@ -199,10 +210,22 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
+enum ice_status
+ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
enum ice_status ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
+
+/* Promisc/defport setup for VSIs */
enum ice_status
ice_cfg_dflt_vsi(struct ice_hw *hw, u16 vsi_handle, bool set, u8 direction);
+enum ice_status
+ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ u16 vid);
+enum ice_status
+ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
+ bool rm_vlan_promisc);
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index c289d97f477d..a6f7b7feaf3c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -100,8 +100,8 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
- int napi_budget)
+static bool
+ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = vsi->work_lmt;
@@ -236,9 +236,9 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
+ /* round up to nearest page */
tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- 4096);
+ PAGE_SIZE);
tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
@@ -282,8 +282,17 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_buf->page)
continue;
- dma_unmap_page(dev, rx_buf->dma, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_pages(rx_buf->page, 0);
+ /* Invalidate cache lines that may have been written to by
+ * device so that we avoid corrupting memory.
+ */
+ dma_sync_single_range_for_cpu(dev, rx_buf->dma,
+ rx_buf->page_offset,
+ ICE_RXBUF_2048, DMA_FROM_DEVICE);
+
+ /* free resources associated with mapping */
+ dma_unmap_page_attrs(dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
rx_buf->page = NULL;
rx_buf->page_offset = 0;
@@ -339,9 +348,9 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return -ENOMEM;
- /* round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union ice_32byte_rx_desc);
- rx_ring->size = ALIGN(rx_ring->size, 4096);
+ /* round up to nearest page */
+ rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
@@ -389,8 +398,8 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
* Returns true if the page was successfully allocated or
* reused.
*/
-static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *bi)
+static bool
+ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -409,7 +418,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
}
/* map page for use */
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
@@ -423,6 +433,8 @@ static bool ice_alloc_mapped_page(struct ice_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = 0;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
@@ -452,6 +464,12 @@ bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
if (!ice_alloc_mapped_page(rx_ring, bi))
goto no_bufs;
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
+ bi->page_offset,
+ ICE_RXBUF_2048,
+ DMA_FROM_DEVICE);
+
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
@@ -497,61 +515,43 @@ static bool ice_page_is_reserved(struct page *page)
}
/**
- * ice_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_buf: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
- * @skb: sk_buf to place the data into
+ * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
+ * @rx_buf: Rx buffer to adjust
+ * @size: Size of adjustment
*
- * This function will add the data contained in rx_buf->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
+ * Update the offset within page so that Rx buf will be ready to be reused.
+ * For systems with PAGE_SIZE < 8192 this function will flip the page offset
+ * so the second half of page assigned to Rx buffer will be used, otherwise
+ * the offset is moved by the @size bytes
*/
-static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static void
+ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
{
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ICE_RXBUF_2048;
+ /* flip page offset to other buffer */
+ rx_buf->page_offset ^= size;
#else
- unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
- unsigned int truesize;
-#endif /* PAGE_SIZE < 8192) */
-
- struct page *page;
- unsigned int size;
-
- size = le16_to_cpu(rx_desc->wb.pkt_len) &
- ICE_RX_FLX_DESC_PKT_LEN_M;
-
- page = rx_buf->page;
+ /* move offset up to the next cache line */
+ rx_buf->page_offset += size;
+#endif
+}
+/**
+ * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
+ * @rx_buf: buffer containing the page
+ *
+ * If page is reusable, we have a green light for calling ice_reuse_rx_page,
+ * which will assign the current buffer to the buffer that next_to_alloc is
+ * pointing to; otherwise, the DMA mapping needs to be destroyed and
+ * page freed
+ */
+static bool ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
+{
#if (PAGE_SIZE >= 8192)
- truesize = ALIGN(size, L1_CACHE_BYTES);
-#endif /* PAGE_SIZE >= 8192) */
-
- /* will the data fit in the skb we allocated? if so, just
- * copy it as it is pretty small anyway
- */
- if (size <= ICE_RX_HDR_SIZE && !skb_is_nonlinear(skb)) {
- unsigned char *va = page_address(page) + rx_buf->page_offset;
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as-is */
- if (likely(!ice_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- __free_pages(page, 0);
- return false;
- }
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rx_buf->page_offset, size, truesize);
+ unsigned int last_offset = PAGE_SIZE - ICE_RXBUF_2048;
+#endif
+ unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
+ struct page *page = rx_buf->page;
/* avoid re-using remote pages */
if (unlikely(ice_page_is_reserved(page)))
@@ -559,36 +559,61 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_count(page) != 1))
+ if (unlikely((page_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buf->page_offset ^= truesize;
#else
- /* move offset up to the next cache line */
- rx_buf->page_offset += truesize;
-
if (rx_buf->page_offset > last_offset)
return false;
#endif /* PAGE_SIZE < 8192) */
- /* Even if we own the page, we are not allowed to use atomic_set()
- * This would break get_page_unless_zero() users.
+ /* If we have drained the page fragment pool we need to update
+ * the pagecnt_bias and page count so that we fully restock the
+ * number of references the driver holds.
*/
- get_page(rx_buf->page);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
+ rx_buf->pagecnt_bias = USHRT_MAX;
+ }
return true;
}
/**
+ * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
+ * @rx_buf: buffer containing page to add
+ * @skb: sk_buff to place the data into
+ * @size: packet length from rx_desc
+ *
+ * This function will add the data contained in rx_buf->page to the skb.
+ * It will just attach the page as a frag to the skb.
+ * The function will then update the page offset.
+ */
+static void
+ice_add_rx_frag(struct ice_rx_buf *rx_buf, struct sk_buff *skb,
+ unsigned int size)
+{
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->page_offset, size, truesize);
+
+ /* page is being used so we must update the page offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
+}
+
+/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
*/
-static void ice_reuse_rx_page(struct ice_ring *rx_ring,
- struct ice_rx_buf *old_buf)
+static void
+ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -599,121 +624,132 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
nta++;
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- /* transfer page from old buffer to new buffer */
- *new_buf = *old_buf;
+ /* Transfer page from old buffer to new buffer.
+ * Move each member individually to avoid possible store
+ * forwarding stalls and unnecessary copy of skb.
+ */
+ new_buf->dma = old_buf->dma;
+ new_buf->page = old_buf->page;
+ new_buf->page_offset = old_buf->page_offset;
+ new_buf->pagecnt_bias = old_buf->pagecnt_bias;
}
/**
- * ice_fetch_rx_buf - Allocate skb and populate it
+ * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
* @rx_ring: Rx descriptor ring to transact packets on
- * @rx_desc: descriptor containing info written by hardware
+ * @skb: skb to be used
+ * @size: size of buffer to add to skb
*
- * This function allocates an skb on the fly, and populates it with the page
- * data from the current receive descriptor, taking care to set up the skb
- * correctly, as well as handling calling the page recycle function if
- * necessary.
+ * This function will pull an Rx buffer from the ring and synchronize it
+ * for use by the CPU.
*/
-static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc)
+static struct ice_rx_buf *
+ice_get_rx_buf(struct ice_ring *rx_ring, struct sk_buff **skb,
+ const unsigned int size)
{
struct ice_rx_buf *rx_buf;
- struct sk_buff *skb;
- struct page *page;
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- page = rx_buf->page;
- prefetchw(page);
+ prefetchw(rx_buf->page);
+ *skb = rx_buf->skb;
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
+ rx_buf->page_offset, size,
+ DMA_FROM_DEVICE);
- skb = rx_buf->skb;
+ /* We have pulled a buffer for use, so decrement pagecnt_bias */
+ rx_buf->pagecnt_bias--;
- if (likely(!skb)) {
- u8 *page_addr = page_address(page) + rx_buf->page_offset;
+ return rx_buf;
+}
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+/**
+ * ice_construct_skb - Allocate skb and populate it
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
+ * @size: the length of the packet
+ *
+ * This function allocates an skb. It then populates it with the page
+ * data from the current receive descriptor, taking care to set up the
+ * skb correctly.
+ */
+static struct sk_buff *
+ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ unsigned int size)
+{
+ void *va = page_address(rx_buf->page) + rx_buf->page_offset;
+ unsigned int headlen;
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch((void *)(page_addr + L1_CACHE_BYTES));
+ prefetch((u8 *)va + L1_CACHE_BYTES);
#endif /* L1_CACHE_BYTES */
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_buf_failed++;
- return NULL;
- }
-
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
- skb_record_rx_queue(skb, rx_ring->q_index);
- } else {
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
- rx_buf->page_offset,
- ICE_RXBUF_2048,
- DMA_FROM_DEVICE);
+ skb_record_rx_queue(skb, rx_ring->q_index);
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > ICE_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, ICE_RX_HDR_SIZE);
- rx_buf->skb = NULL;
- }
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
- /* pull page into skb */
- if (ice_add_rx_frag(rx_buf, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ice_reuse_rx_page(rx_ring, rx_buf);
- rx_ring->rx_stats.page_reuse_count++;
+ /* if we exhaust the linear part then add what is left as a frag */
+ size -= headlen;
+ if (size) {
+#if (PAGE_SIZE >= 8192)
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#else
+ unsigned int truesize = ICE_RXBUF_2048;
+#endif
+ skb_add_rx_frag(skb, 0, rx_buf->page,
+ rx_buf->page_offset + headlen, size, truesize);
+ /* buffer is used by skb, update page_offset */
+ ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
} else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ /* buffer is unused, reset bias back to rx_buf; data was copied
+ * onto skb's linear part so there's no need for adjusting
+ * page offset and we can reuse this buffer as-is
+ */
+ rx_buf->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buf->page = NULL;
-
return skb;
}
/**
- * ice_pull_tail - ice specific version of skb_pull_tail
- * @skb: pointer to current skb being adjusted
+ * ice_put_rx_buf - Clean up used buffer and either recycle or free
+ * @rx_ring: Rx descriptor ring to transact packets on
+ * @rx_buf: Rx buffer to pull data from
*
- * This function is an ice specific version of __pskb_pull_tail. The
- * main difference between this version and the original function is that
- * this function can make several assumptions about the state of things
- * that allow for significant optimizations versus the standard function.
- * As a result we can do things like drop a frag and maintain an accurate
- * truesize for the skb.
+ * This function will clean up the contents of the rx_buf. It will
+ * either recycle the buffer or unmap it and free the associated resources.
*/
-static void ice_pull_tail(struct sk_buff *skb)
+static void ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
{
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int pull_len;
- unsigned char *va;
-
- /* it is valid to use page_address instead of kmap since we are
- * working with pages allocated out of the lomem pool per
- * alloc_page(GFP_ATOMIC)
- */
- va = skb_frag_address(frag);
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, ICE_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
+ /* hand second half of page back to the ring */
+ if (ice_can_reuse_rx_page(rx_buf)) {
+ ice_reuse_rx_page(rx_ring, rx_buf);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
+ }
- /* update all of the pointers */
- skb_frag_size_sub(frag, pull_len);
- frag->page_offset += pull_len;
- skb->data_len -= pull_len;
- skb->tail += pull_len;
+ /* clear contents of buffer_info */
+ rx_buf->page = NULL;
+ rx_buf->skb = NULL;
}
/**
@@ -730,10 +766,6 @@ static void ice_pull_tail(struct sk_buff *skb)
*/
static bool ice_cleanup_headers(struct sk_buff *skb)
{
- /* place header in linear portion of buffer */
- if (skb_is_nonlinear(skb))
- ice_pull_tail(skb);
-
/* if eth_skb_pad returns an error the skb was freed */
if (eth_skb_pad(skb))
return true;
@@ -751,8 +783,8 @@ static bool ice_cleanup_headers(struct sk_buff *skb)
* The status_error_len doesn't need to be shifted because it begins
* at offset zero.
*/
-static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
- const u16 stat_err_bits)
+static bool
+ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, const u16 stat_err_bits)
{
return !!(rx_desc->wb.status_error0 &
cpu_to_le16(stat_err_bits));
@@ -769,9 +801,9 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
*/
-static bool ice_is_non_eop(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb)
+static bool
+ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb)
{
u32 ntc = rx_ring->next_to_clean + 1;
@@ -838,8 +870,9 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
*
* skb->protocol must be set before this function is called
*/
-static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
- union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
+static void
+ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
+ union ice_32b_rx_flex_desc *rx_desc, u8 ptype)
{
struct ice_rx_ptype_decoded decoded;
u32 rx_error, rx_status;
@@ -909,9 +942,10 @@ checksum_fail:
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void ice_process_skb_fields(struct ice_ring *rx_ring,
- union ice_32b_rx_flex_desc *rx_desc,
- struct sk_buff *skb, u8 ptype)
+static void
+ice_process_skb_fields(struct ice_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc,
+ struct sk_buff *skb, u8 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
@@ -930,13 +964,12 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
* This function sends the completed packet (via. skb) up the stack using
* gro receive functions (with/without vlan tag)
*/
-static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
- u16 vlan_tag)
+static void
+ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (vlan_tag & VLAN_VID_MASK)) {
+ (vlan_tag & VLAN_VID_MASK))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
- }
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
@@ -961,7 +994,9 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
/* start the loop to process RX packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
+ struct ice_rx_buf *rx_buf;
struct sk_buff *skb;
+ unsigned int size;
u16 stat_err_bits;
u16 vlan_tag = 0;
u8 rx_ptype;
@@ -991,11 +1026,24 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
*/
dma_rmb();
+ size = le16_to_cpu(rx_desc->wb.pkt_len) &
+ ICE_RX_FLX_DESC_PKT_LEN_M;
+
+ rx_buf = ice_get_rx_buf(rx_ring, &skb, size);
/* allocate (if needed) and populate skb */
- skb = ice_fetch_rx_buf(rx_ring, rx_desc);
- if (!skb)
+ if (skb)
+ ice_add_rx_frag(rx_buf, skb, size);
+ else
+ skb = ice_construct_skb(rx_ring, rx_buf, size);
+
+ /* exit if we failed to retrieve a buffer */
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buf_failed++;
+ rx_buf->pagecnt_bias++;
break;
+ }
+ ice_put_rx_buf(rx_ring, rx_buf);
cleaned_count++;
/* skip if it is NOP desc */
@@ -1048,18 +1096,257 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_pkts;
}
+static unsigned int ice_itr_divisor(struct ice_port_info *pi)
+{
+ switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_40GB:
+ return ICE_ITR_ADAPTIVE_MIN_INC * 1024;
+ case ICE_AQ_LINK_SPEED_25GB:
+ case ICE_AQ_LINK_SPEED_20GB:
+ return ICE_ITR_ADAPTIVE_MIN_INC * 512;
+ case ICE_AQ_LINK_SPEED_100MB:
+ return ICE_ITR_ADAPTIVE_MIN_INC * 32;
+ default:
+ return ICE_ITR_ADAPTIVE_MIN_INC * 256;
+ }
+}
+
+/**
+ * ice_update_itr - update the adaptive ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
+ * @rc: structure containing ring performance data
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ */
+static void
+ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
+{
+ unsigned int avg_wire_size, packets, bytes, itr;
+ unsigned long next_update = jiffies;
+ bool container_is_rx;
+
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->itr_setting))
+ return;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ container_is_rx = (&q_vector->rx == rc);
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = container_is_rx ?
+ ICE_ITR_ADAPTIVE_MIN_USECS | ICE_ITR_ADAPTIVE_LATENCY :
+ ICE_ITR_ADAPTIVE_MAX_USECS | ICE_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ packets = rc->total_pkts;
+ bytes = rc->total_bytes;
+
+ if (container_is_rx) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & ICE_ITR_ADAPTIVE_LATENCY)) {
+ itr = ICE_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == ICE_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & ICE_ITR_MASK) ==
+ ICE_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~ICE_ITR_ADAPTIVE_LATENCY;
+ }
+
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
+ */
+ if (packets < 56) {
+ itr = rc->target_itr + ICE_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= ICE_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr >>= 1;
+ itr &= ICE_ITR_MASK;
+ if (itr < ICE_ITR_ADAPTIVE_MIN_USECS)
+ itr = ICE_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
+ }
+
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = ICE_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * gives the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
+ *
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
+ */
+ if (avg_wire_size <= 60) {
+ /* Start at 250k ints/sec */
+ avg_wire_size = 4096;
+ } else if (avg_wire_size <= 380) {
+ /* 250K ints/sec to 60K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 1696;
+ } else if (avg_wire_size <= 1084) {
+ /* 60K ints/sec to 36K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 36K ints/sec to 30K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 30K ints/sec */
+ avg_wire_size = 32256;
+ }
+
+ /* If we are in low latency mode halve our delay which doubles the
+ * rate to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & ICE_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size >>= 1;
+
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ itr += DIV_ROUND_UP(avg_wire_size,
+ ice_itr_divisor(q_vector->vsi->port_info)) *
+ ICE_ITR_ADAPTIVE_MIN_INC;
+
+ if ((itr & ICE_ITR_MASK) > ICE_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= ICE_ITR_ADAPTIVE_LATENCY;
+ itr += ICE_ITR_ADAPTIVE_MAX_USECS;
+ }
+
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
+ rc->total_bytes = 0;
+ rc->total_pkts = 0;
+}
+
/**
* ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
* @itr_idx: interrupt throttling index
- * @reg_itr: interrupt throttling value adjusted based on ITR granularity
+ * @itr: interrupt throttling value in usecs
*/
-static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
+static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{
+ /* The itr value is reported in microseconds, and the register value is
+ * recorded in 2 microsecond units. For this reason we only need to
+ * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
+ * granularity as a shift instead of division. The mask makes sure the
+ * ITR value is never odd so we don't accidentally write into the field
+ * prior to the ITR field.
+ */
+ itr &= ICE_ITR_MASK;
+
return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
- (reg_itr << GLINT_DYN_CTL_INTERVAL_S);
+ (itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
+
/**
* ice_update_ena_itr - Update ITR and re-enable MSIX interrupt
* @vsi: the VSI associated with the q_vector
@@ -1068,10 +1355,14 @@ static u32 ice_buildreg_itr(int itr_idx, u16 reg_itr)
static void
ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
{
- struct ice_hw *hw = &vsi->back->hw;
- struct ice_ring_container *rc;
+ struct ice_ring_container *tx = &q_vector->tx;
+ struct ice_ring_container *rx = &q_vector->rx;
u32 itr_val;
+ /* This will do nothing if dynamic updates are not enabled */
+ ice_update_itr(q_vector, tx);
+ ice_update_itr(q_vector, rx);
+
/* This block of logic allows us to get away with only updating
* one ITR value with each interrupt. The idea is to perform a
* pseudo-lazy update with the following criteria.
@@ -1080,35 +1371,36 @@ ice_update_ena_itr(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
* 2. If we must reduce an ITR that is given highest priority.
* 3. We then give priority to increasing ITR based on amount.
*/
- if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
- rc = &q_vector->rx;
+ if (rx->target_itr < rx->current_itr) {
/* Rx ITR needs to be reduced, this is highest priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
- ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
- (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
- rc = &q_vector->tx;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((tx->target_itr < tx->current_itr) ||
+ ((rx->target_itr - rx->current_itr) <
+ (tx->target_itr - tx->current_itr))) {
/* Tx ITR needs to be reduced, this is second priority
* Tx ITR needs to be increased more than Rx, fourth priority
*/
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
- } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
- rc = &q_vector->rx;
+ itr_val = ice_buildreg_itr(tx->itr_idx, tx->target_itr);
+ tx->current_itr = tx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (rx->current_itr != rx->target_itr) {
/* Rx ITR needs to be increased, third priority */
- itr_val = ice_buildreg_itr(rc->itr_idx, rc->target_itr);
- rc->current_itr = rc->target_itr;
+ itr_val = ice_buildreg_itr(rx->itr_idx, rx->target_itr);
+ rx->current_itr = rx->target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
} else {
/* Still have to re-enable the interrupts */
itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
- if (!test_bit(__ICE_DOWN, vsi->state)) {
- int vector = vsi->hw_base_vector + q_vector->v_idx;
-
- wr32(hw, GLINT_DYN_CTL(vector), itr_val);
- }
+ if (!test_bit(__ICE_DOWN, vsi->state))
+ wr32(&vsi->back->hw,
+ GLINT_DYN_CTL(vsi->hw_base_vector + q_vector->v_idx),
+ itr_val);
}
/**
@@ -1354,7 +1646,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index fc358ea81816..60131b84b021 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -47,6 +47,9 @@
#define ICE_TX_FLAGS_VLAN_M 0xffff0000
#define ICE_TX_FLAGS_VLAN_S 16
+#define ICE_RX_DMA_ATTR \
+ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
struct ice_tx_buf {
struct ice_tx_desc *next_to_watch;
struct sk_buff *skb;
@@ -73,6 +76,7 @@ struct ice_rx_buf {
dma_addr_t dma;
struct page *page;
unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -124,10 +128,17 @@ enum ice_rx_dtype {
#define ICE_ITR_DYNAMIC 0x8000 /* used as flag for itr_setting */
#define ITR_IS_DYNAMIC(setting) (!!((setting) & ICE_ITR_DYNAMIC))
#define ITR_TO_REG(setting) ((setting) & ~ICE_ITR_DYNAMIC)
-#define ICE_ITR_GRAN_S 1 /* Assume ITR granularity is 2us */
+#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
+#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
+#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define ICE_ITR_ADAPTIVE_MAX_USECS 0x00FA
+#define ICE_ITR_ADAPTIVE_LATENCY 0x8000
+#define ICE_ITR_ADAPTIVE_BULK 0x0000
+
#define ICE_DFLT_INTRL 0
/* Legacy or Advanced Mode Queue */
@@ -173,21 +184,13 @@ struct ice_ring {
u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
-enum ice_latency_range {
- ICE_LOWEST_LATENCY = 0,
- ICE_LOW_LATENCY = 1,
- ICE_BULK_LATENCY = 2,
- ICE_ULTRA_LATENCY = 3,
-};
-
struct ice_ring_container {
/* head of linked-list of rings */
struct ice_ring *ring;
unsigned long next_update; /* jiffies value of next queue update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_pkts; /* total packets processed this int */
- enum ice_latency_range latency_range;
- int itr_idx; /* index in the interrupt vector */
+ u16 itr_idx; /* index in the interrupt vector */
u16 target_itr; /* value in usecs divided by the hw->itr_gran */
u16 current_itr; /* value in usecs divided by the hw->itr_gran */
/* high bit set means dynamic ITR, rest is used to store user
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 17086d5b5c33..3a4e67484487 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -24,6 +24,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_LINK BIT_ULL(4)
+#define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6)
#define ICE_DBG_NVM BIT_ULL(7)
#define ICE_DBG_LAN BIT_ULL(8)
@@ -209,6 +210,9 @@ struct ice_nvm_info {
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
+#define ice_for_each_traffic_class(_i) \
+ for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
+
struct ice_sched_node {
struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */
@@ -247,7 +251,6 @@ struct ice_sched_vsi_info {
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
- u16 vsi_id;
};
/* driver defines the policy */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 57155b4a59dc..84e51a0a0795 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,6 +5,37 @@
#include "ice_lib.h"
/**
+ * ice_err_to_virt err - translate errors for VF return code
+ * @ice_err: error return code
+ */
+static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
+{
+ switch (ice_err) {
+ case ICE_SUCCESS:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
+
+/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
* @v_opcode: operation code
@@ -14,7 +45,7 @@
*/
static void
ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
@@ -104,7 +135,8 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
ICE_AQ_LINK_UP);
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
}
@@ -343,11 +375,41 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr)
}
/**
- * ice_vsi_set_pvid - Set port VLAN id for the VSI
- * @vsi: the VSI being changed
+ * ice_vsi_set_pvid_fill_ctxt - Set VSI ctxt for add pvid
+ * @ctxt: the vsi ctxt to fill
* @vid: the VLAN id to set as a PVID
*/
-static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
+static void ice_vsi_set_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt, u16 vid)
+{
+ ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
+ ICE_AQ_VSI_PVLAN_INSERT_PVID |
+ ICE_AQ_VSI_VLAN_EMOD_STR);
+ ctxt->info.pvid = cpu_to_le16(vid);
+ ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_kill_pvid_fill_ctxt - Set VSI ctx for remove pvid
+ * @ctxt: the VSI ctxt to fill
+ */
+static void ice_vsi_kill_pvid_fill_ctxt(struct ice_vsi_ctx *ctxt)
+{
+ ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
+ ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
+ ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+}
+
+/**
+ * ice_vsi_manage_pvid - Enable or disable port VLAN for VSI
+ * @vsi: the VSI to update
+ * @vid: the VLAN id to set as a PVID
+ * @enable: true for enable pvid false for disable
+ */
+static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
{
struct device *dev = &vsi->back->pdev->dev;
struct ice_hw *hw = &vsi->back->hw;
@@ -359,46 +421,27 @@ static int ice_vsi_set_pvid(struct ice_vsi *vsi, u16 vid)
if (!ctxt)
return -ENOMEM;
- ctxt->info.vlan_flags = (ICE_AQ_VSI_VLAN_MODE_UNTAGGED |
- ICE_AQ_VSI_PVLAN_INSERT_PVID |
- ICE_AQ_VSI_VLAN_EMOD_STR);
- ctxt->info.pvid = cpu_to_le16(vid);
- ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
+ ctxt->info = vsi->info;
+ if (enable)
+ ice_vsi_set_pvid_fill_ctxt(ctxt, vid);
+ else
+ ice_vsi_kill_pvid_fill_ctxt(ctxt);
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_info(dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
}
- vsi->info.pvid = ctxt->info.pvid;
- vsi->info.vlan_flags = ctxt->info.vlan_flags;
+ vsi->info = ctxt->info;
out:
devm_kfree(dev, ctxt);
return ret;
}
/**
- * ice_vsi_kill_pvid - Remove port VLAN id from the VSI
- * @vsi: the VSI being changed
- */
-static int ice_vsi_kill_pvid(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
-
- if (ice_vsi_manage_vlan_stripping(vsi, false)) {
- dev_err(&pf->pdev->dev, "Error removing Port VLAN on VSI %i\n",
- vsi->vsi_num);
- return -ENODEV;
- }
-
- vsi->info.pvid = 0;
- return 0;
-}
-
-/**
* ice_vf_vsi_setup - Set up a VF VSI
* @pf: board private structure
* @pi: pointer to the port_info instance
@@ -446,8 +489,10 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vsi->hw_base_vector += 1;
/* Check if port VLAN exist before, and restore it accordingly */
- if (vf->port_vlan_id)
- ice_vsi_set_pvid(vsi, vf->port_vlan_id);
+ if (vf->port_vlan_id) {
+ ice_vsi_manage_pvid(vsi, vf->port_vlan_id, true);
+ ice_vsi_add_vlan(vsi, vf->port_vlan_id & ICE_VLAN_M);
+ }
eth_broadcast_addr(broadcast);
@@ -484,6 +529,8 @@ ice_alloc_vsi_res_exit:
*/
static int ice_alloc_vf_res(struct ice_vf *vf)
{
+ struct ice_pf *pf = vf->pf;
+ int tx_rx_queue_left;
int status;
/* setup VF VSI and necessary resources */
@@ -491,6 +538,15 @@ static int ice_alloc_vf_res(struct ice_vf *vf)
if (status)
goto ice_alloc_vf_res_exit;
+ /* Update number of VF queues, in case VF had requested for queue
+ * changes
+ */
+ tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ tx_rx_queue_left += ICE_DFLT_QS_PER_VF;
+ if (vf->num_req_qs && vf->num_req_qs <= tx_rx_queue_left &&
+ vf->num_req_qs != vf->num_vf_qs)
+ vf->num_vf_qs = vf->num_req_qs;
+
if (vf->trusted)
set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
else
@@ -548,6 +604,10 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
wr32(hw, GLINT_VECT2FUNC(v), reg);
}
+ /* Map mailbox interrupt. We put an explicit 0 here to remind us that
+ * VF admin queue interrupts will go to VF MSI-X vector 0.
+ */
+ wr32(hw, VPINT_MBX_CTL(abs_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M | 0);
/* set regardless of mapping mode */
wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
@@ -750,6 +810,47 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
}
/**
+ * ice_vf_set_vsi_promisc - set given VF VSI to given promiscuous mode(s)
+ * @vf: pointer to the VF info
+ * @vsi: the VSI being configured
+ * @promisc_m: mask of promiscuous config bits
+ * @rm_promisc: promisc flag request from the VF to remove or add filter
+ *
+ * This function configures VF VSI promiscuous mode, based on the VF requests,
+ * for Unicast, Multicast and VLAN
+ */
+static enum ice_status
+ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
+ bool rm_promisc)
+{
+ struct ice_pf *pf = vf->pf;
+ enum ice_status status = 0;
+ struct ice_hw *hw;
+
+ hw = &pf->hw;
+ if (vf->num_vlan) {
+ status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
+ rm_promisc);
+ } else if (vf->port_vlan_id) {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ vf->port_vlan_id);
+ } else {
+ if (rm_promisc)
+ status = ice_clear_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ else
+ status = ice_set_vsi_promisc(hw, vsi->idx, promisc_m,
+ 0);
+ }
+
+ return status;
+}
+
+/**
* ice_reset_all_vfs - reset all allocated VFs in one go
* @pf: pointer to the PF structure
* @is_vflr: true if VFLR was issued, false if not
@@ -764,6 +865,7 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vf *vf;
int v, i;
/* If we don't have any VFs, then there is nothing to reset */
@@ -778,12 +880,17 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr);
- /* Call Disable LAN Tx queue AQ call with VFR bit set and 0
- * queues to inform Firmware about VF reset.
- */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_dis_vsi_txq(pf->vsi[0]->port_info, 0, NULL, NULL,
- ICE_VF_RESET, v, NULL);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ struct ice_vsi *vsi;
+
+ vf = &pf->vf[v];
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
+ ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
+ ice_vsi_stop_rx_rings(vsi);
+ clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
+ }
+ }
/* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
@@ -796,9 +903,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
- struct ice_vf *vf = &pf->vf[v];
u32 reg;
+ vf = &pf->vf[v];
reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_id));
if (!(reg & VPGEN_VFRSTAT_VFRD_M))
break;
@@ -818,8 +925,18 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
usleep_range(10000, 20000);
/* free VF resources to begin resetting the VSI state */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_free_vf_res(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ ice_free_vf_res(vf);
+
+ /* Free VF queues as well, and reallocate later.
+ * If a given VF has different number of queues
+ * configured, the request for update will come
+ * via mailbox communication.
+ */
+ vf->num_vf_qs = 0;
+ }
if (ice_check_avail_res(pf)) {
dev_err(&pf->pdev->dev,
@@ -828,8 +945,15 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
}
/* Finish the reset on each VF */
- for (v = 0; v < pf->num_alloc_vfs; v++)
- ice_cleanup_and_realloc_vf(&pf->vf[v]);
+ for (v = 0; v < pf->num_alloc_vfs; v++) {
+ vf = &pf->vf[v];
+
+ vf->num_vf_qs = pf->num_vf_qps;
+ dev_dbg(&pf->pdev->dev,
+ "VF-id %d has %d queues configured\n",
+ vf->vf_id, vf->num_vf_qs);
+ ice_cleanup_and_realloc_vf(vf);
+ }
ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
@@ -847,9 +971,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
{
struct ice_pf *pf = vf->pf;
- struct ice_hw *hw = &pf->hw;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
bool rsd = false;
+ u8 promisc_m;
u32 reg;
int i;
@@ -875,6 +1000,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
vf->vf_id, NULL);
}
+ hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure
* that reset is complete
*/
@@ -900,6 +1026,21 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
usleep_range(10000, 20000);
+ /* disable promiscuous modes in case they were enabled
+ * ignore any error if disabling process failed
+ */
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
+ if (vf->port_vlan_id || vf->num_vlan)
+ promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
+ else
+ promisc_m = ICE_UCAST_PROMISC_BITS;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ if (ice_vf_set_vsi_promisc(vf, vsi, promisc_m, true))
+ dev_err(&pf->pdev->dev, "disabling promiscuous mode failed\n");
+ }
+
/* free VF resources to begin resetting the VSI state */
ice_free_vf_res(vf);
@@ -938,7 +1079,7 @@ void ice_vc_notify_reset(struct ice_pf *pf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, ICE_SUCCESS,
+ ice_vc_vf_broadcast(pf, VIRTCHNL_OP_EVENT, VIRTCHNL_STATUS_SUCCESS,
(u8 *)&pfe, sizeof(struct virtchnl_pf_event));
}
@@ -961,8 +1102,9 @@ static void ice_vc_notify_vf_reset(struct ice_vf *vf)
pfe.event = VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = PF_EVENT_SEVERITY_CERTAIN_DOOM;
- ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0,
- (u8 *)&pfe, sizeof(pfe), NULL);
+ ice_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe, sizeof(pfe),
+ NULL);
}
/**
@@ -1012,7 +1154,7 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated during reset */
- if (!ice_reset_all_vfs(pf, false))
+ if (!ice_reset_all_vfs(pf, true))
goto err_unroll_sriov;
goto err_unroll_intr;
@@ -1182,8 +1324,9 @@ static void ice_vc_dis_vf(struct ice_vf *vf)
*
* send msg to VF
*/
-static int ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
- enum ice_status v_retval, u8 *msg, u16 msglen)
+static int
+ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
+ enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
enum ice_status aq_ret;
struct ice_pf *pf;
@@ -1243,8 +1386,8 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
if (VF_IS_V10(&vf->vf_ver))
info.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION, ICE_SUCCESS,
- (u8 *)&info,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&info,
sizeof(struct virtchnl_version_info));
}
@@ -1257,15 +1400,15 @@ static int ice_vc_get_ver_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_resource *vfres = NULL;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int len = 0;
int ret;
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto err;
}
@@ -1273,7 +1416,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres = devm_kzalloc(&pf->pdev->dev, len, GFP_KERNEL);
if (!vfres) {
- aq_ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
len = 0;
goto err;
}
@@ -1286,6 +1429,11 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2;
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto err;
+ }
+
if (!vsi->info.pvid)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -1336,7 +1484,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
err:
/* send the response back to the VF */
- ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, aq_ret,
+ ret = ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_VF_RESOURCES, v_ret,
(u8 *)vfres, len);
devm_kfree(&pf->pdev->dev, vfres);
@@ -1368,7 +1516,7 @@ static struct ice_vsi *ice_find_vsi_from_id(struct ice_pf *pf, u16 id)
{
int i;
- for (i = 0; i < pf->num_alloc_vsi; i++)
+ ice_for_each_vsi(pf, i)
if (pf->vsi[i] && pf->vsi[i]->vsi_num == id)
return pf->vsi[i];
@@ -1416,42 +1564,42 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
*/
static int ice_vc_config_rss_key(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_rss_key *vrk =
(struct virtchnl_rss_key *)msg;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrk->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrk->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrk->key_len != ICE_VSIQF_HKEY_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, vrk->key, NULL, 0);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, vrk->key, NULL, 0))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_KEY, v_ret,
NULL, 0);
}
@@ -1465,40 +1613,40 @@ error_param:
static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
{
struct virtchnl_rss_lut *vrl = (struct virtchnl_rss_lut *)msg;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi = NULL;
- enum ice_status aq_ret;
- int ret;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vrl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vrl->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!test_bit(ICE_FLAG_RSS_ENA, vf->pf->flags)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- ret = ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE);
- aq_ret = ret ? ICE_ERR_PARAM : ICE_SUCCESS;
+ if (ice_set_rss(vsi, NULL, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
NULL, 0);
}
@@ -1511,25 +1659,26 @@ error_param:
*/
static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_eth_stats stats;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1540,7 +1689,7 @@ static int ice_vc_get_stats_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_GET_STATS, v_ret,
(u8 *)&stats, sizeof(stats));
}
@@ -1553,29 +1702,30 @@ error_param:
*/
static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1584,15 +1734,15 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* programmed using ice_vsi_cfg_txqs
*/
if (ice_vsi_start_rx_rings(vsi))
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
/* Set flag to indicate that queues are enabled */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1606,30 +1756,31 @@ error_param:
*/
static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_queue_select *vqs =
(struct virtchnl_queue_select *)msg;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vqs->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!vqs->rx_queues && !vqs->tx_queues) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vqs->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1637,23 +1788,23 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
dev_err(&vsi->back->pdev->dev,
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
if (ice_vsi_stop_rx_rings(vsi)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
/* Clear enabled queues flag */
- if (!aq_ret)
+ if (v_ret == VIRTCHNL_STATUS_SUCCESS)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_QUEUES, v_ret,
NULL, 0);
}
@@ -1666,18 +1817,18 @@ error_param:
*/
static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map;
struct ice_vsi *vsi = NULL;
struct ice_pf *pf = vf->pf;
- enum ice_status aq_ret = 0;
unsigned long qmap;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1689,13 +1840,13 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1705,7 +1856,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
q_vector = vsi->q_vectors[i];
@@ -1719,7 +1870,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
struct ice_q_vector *q_vector;
if (!ice_vc_isvalid_q_id(vf, vsi_id, vsi_q_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
q_vector = vsi->q_vectors[i];
@@ -1733,7 +1884,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
ice_vsi_cfg_msix(vsi);
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
NULL, 0);
}
@@ -1746,26 +1897,34 @@ error_param:
*/
static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
- enum ice_status aq_ret = 0;
+ struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- vsi = ice_find_vsi_from_id(vf->pf, qci->vsi_id);
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ goto error_param;
+ }
+
+ if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF-%d requesting more than supported number of queues: %d\n",
+ vf->vf_id, qci->num_queue_pairs);
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -1775,7 +1934,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* copy Tx queue info from VF into VSI */
@@ -1785,13 +1944,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->max_frame = qpi->rxq.max_pkt_size;
@@ -1802,15 +1961,16 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
*/
vsi->num_txq = qci->num_queue_pairs;
vsi->num_rxq = qci->num_queue_pairs;
+ /* All queues of VF VSI are in TC 0 */
+ vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs;
+ vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs;
- if (!ice_vsi_cfg_lan_txqs(vsi) && !ice_vsi_cfg_rxqs(vsi))
- aq_ret = 0;
- else
- aq_ret = ICE_ERR_PARAM;
+ if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
+ v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret,
NULL, 0);
}
@@ -1852,11 +2012,11 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
- enum ice_status ret;
LIST_HEAD(mac_list);
struct ice_vsi *vsi;
int mac_count = 0;
@@ -1869,19 +2029,27 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(&pf->pdev->dev,
- "Can't add more MAC addresses, because VF is not trusted, switch the VF to trusted mode in order to add more functionalities\n");
- ret = ICE_ERR_PARAM;
+ "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ vf->vf_id);
+ /* There is no need to let VF know about not being trusted
+ * to add more MAC addr, so we can just return success message.
+ */
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
@@ -1893,40 +2061,39 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
* already added. Just continue.
*/
dev_info(&pf->pdev->dev,
- "mac %pM already set for VF %d\n",
+ "MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
/* VF can't remove dflt_lan_addr/bcast mac */
dev_err(&pf->pdev->dev,
- "can't remove mac %pM for VF %d\n",
+ "VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
- goto handle_mac_exit;
+ continue;
}
}
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
dev_err(&pf->pdev->dev,
- "invalid mac %pM provided for VF %d\n",
+ "invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
dev_err(&pf->pdev->dev,
- "can't change unicast mac for untrusted VF %d\n",
+ "can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
- ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
/* get here if maddr is multicast or if VF can change mac */
if (ice_add_mac_to_list(vsi, &mac_list, al->list[i].addr)) {
- ret = ICE_ERR_NO_MEMORY;
+ v_ret = VIRTCHNL_STATUS_ERR_NO_MEMORY;
goto handle_mac_exit;
}
mac_count++;
@@ -1934,14 +2101,14 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
/* program the updated filter list */
if (set)
- ret = ice_add_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_add_mac(&pf->hw, &mac_list));
else
- ret = ice_remove_mac(&pf->hw, &mac_list);
+ v_ret = ice_err_to_virt_err(ice_remove_mac(&pf->hw, &mac_list));
- if (ret) {
+ if (v_ret) {
dev_err(&pf->pdev->dev,
- "can't update mac filters for VF %d, error %d\n",
- vf->vf_id, ret);
+ "can't update MAC filters for VF %d, error %d\n",
+ vf->vf_id, v_ret);
} else {
if (set)
vf->num_mac += mac_count;
@@ -1952,7 +2119,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
handle_mac_exit:
ice_free_fltr_list(&pf->pdev->dev, &mac_list);
/* send the response to the VF */
- return ice_vc_send_msg_to_vf(vf, vc_op, ret, NULL, 0);
+ return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
}
/**
@@ -1991,35 +2158,38 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vf_res_request *vfres =
(struct virtchnl_vf_res_request *)msg;
int req_queues = vfres->num_queue_pairs;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ int max_allowed_vf_queues;
int tx_rx_queue_left;
int cur_queues;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
- cur_queues = pf->num_vf_qps;
+ cur_queues = vf->num_vf_qs;
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
+ max_allowed_vf_queues = tx_rx_queue_left + cur_queues;
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
- } else if (req_queues > ICE_MAX_QS_PER_VF) {
+ } else if (req_queues > ICE_MAX_BASE_QS_PER_VF) {
dev_err(&pf->pdev->dev,
"VF %d tried to request more than %d queues.\n",
- vf->vf_id, ICE_MAX_QS_PER_VF);
- vfres->num_queue_pairs = ICE_MAX_QS_PER_VF;
+ vf->vf_id, ICE_MAX_BASE_QS_PER_VF);
+ vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(&pf->pdev->dev,
"VF %d requested %d more queues, but only %d left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
- vfres->num_queue_pairs = tx_rx_queue_left + cur_queues;
+ vfres->num_queue_pairs = min_t(int, max_allowed_vf_queues,
+ ICE_MAX_BASE_QS_PER_VF);
} else {
/* request is successful, then reset VF */
vf->num_req_qs = req_queues;
@@ -2033,7 +2203,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
error_param:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES,
- aq_ret, (u8 *)vfres, sizeof(*vfres));
+ v_ret, (u8 *)vfres, sizeof(*vfres));
}
/**
@@ -2093,11 +2263,12 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
VLAN_VID_MASK));
if (vlan_id || qos) {
- ret = ice_vsi_set_pvid(vsi, vlanprio);
+ ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
goto error_set_pvid;
} else {
- ice_vsi_kill_pvid(vsi);
+ ice_vsi_manage_pvid(vsi, 0, false);
+ vsi->info.pvid = 0;
}
if (vlan_id) {
@@ -2129,48 +2300,57 @@ error_set_pvid:
*/
static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_vlan_filter_list *vfl =
(struct virtchnl_vlan_filter_list *)msg;
- enum ice_status aq_ret = 0;
struct ice_pf *pf = vf->pf;
+ bool vlan_promisc = false;
struct ice_vsi *vsi;
+ struct ice_hw *hw;
+ int status = 0;
+ u8 promisc_m;
int i;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (!ice_vc_isvalid_vsi_id(vf, vfl->vsi_id)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(&pf->pdev->dev,
- "VF is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n");
- aq_ret = ICE_ERR_PARAM;
+ "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ vf->vf_id);
+ /* There is no need to let VF know about being not trusted,
+ * so we can just return success message here
+ */
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(&pf->pdev->dev,
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
goto error_param;
}
}
- vsi = ice_find_vsi_from_id(vf->pf, vfl->vsi_id);
+ hw = &pf->hw;
+ vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
if (vsi->info.pvid) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -2178,23 +2358,47 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
dev_err(&pf->pdev->dev,
"%sable VLAN stripping failed for VSI %i\n",
add_v ? "en" : "dis", vsi->vsi_num);
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
+ if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
+ test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states))
+ vlan_promisc = true;
+
if (add_v) {
for (i = 0; i < vfl->num_elements; i++) {
u16 vid = vfl->vlan_id[i];
- if (!ice_vsi_add_vlan(vsi, vid)) {
- vf->num_vlan++;
+ if (ice_vsi_add_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
- /* Enable VLAN pruning when VLAN 0 is added */
- if (unlikely(!vid))
- if (ice_cfg_vlan_pruning(vsi, true))
- aq_ret = ICE_ERR_PARAM;
+ vf->num_vlan++;
+ /* Enable VLAN pruning when VLAN is added */
+ if (!vlan_promisc) {
+ status = ice_cfg_vlan_pruning(vsi, true, false);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ vid, status);
+ goto error_param;
+ }
} else {
- aq_ret = ICE_ERR_PARAM;
+ /* Enable Ucast/Mcast VLAN promiscuous mode */
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ status = ice_set_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
+ if (status) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(&pf->pdev->dev,
+ "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ vid, status);
+ }
}
}
} else {
@@ -2204,12 +2408,22 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
- if (!ice_vsi_kill_vlan(vsi, vid)) {
- vf->num_vlan--;
+ if (ice_vsi_kill_vlan(vsi, vid)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
+ vf->num_vlan--;
+ /* Disable VLAN pruning when removing VLAN */
+ ice_cfg_vlan_pruning(vsi, false, false);
- /* Disable VLAN pruning when removing VLAN 0 */
- if (unlikely(!vid))
- ice_cfg_vlan_pruning(vsi, false);
+ /* Disable Unicast/Multicast VLAN promiscuous mode */
+ if (vlan_promisc) {
+ promisc_m = ICE_PROMISC_VLAN_TX |
+ ICE_PROMISC_VLAN_RX;
+
+ ice_clear_vsi_promisc(hw, vsi->idx,
+ promisc_m, vid);
}
}
}
@@ -2217,10 +2431,10 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
error_param:
/* send the response to the VF */
if (add_v)
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN, v_ret,
NULL, 0);
else
- return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, aq_ret,
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN, v_ret,
NULL, 0);
}
@@ -2256,22 +2470,22 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
*/
static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_manage_vlan_stripping(vsi, true))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2282,22 +2496,27 @@ error_param:
*/
static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
{
- enum ice_status aq_ret = 0;
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
- aq_ret = ICE_ERR_PARAM;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi = pf->vsi[vf->lan_vsi_idx];
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto error_param;
+ }
+
if (ice_vsi_manage_vlan_stripping(vsi, false))
- aq_ret = ICE_ERR_AQ_ERROR;
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
- aq_ret, NULL, 0);
+ v_ret, NULL, 0);
}
/**
@@ -2333,7 +2552,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
- if (err == VIRTCHNL_ERR_PARAM)
+ if (err == VIRTCHNL_STATUS_ERR_PARAM)
err = -EPERM;
else
err = -EINVAL;
@@ -2355,7 +2574,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
error_handler:
if (err) {
- ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_PARAM, NULL, 0);
+ ice_vc_send_msg_to_vf(vf, v_opcode, VIRTCHNL_STATUS_ERR_PARAM,
+ NULL, 0);
dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d, error %d\n",
vf_id, v_opcode, msglen, err);
return;
@@ -2418,7 +2638,8 @@ error_handler:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
v_opcode, vf_id);
- err = ice_vc_send_msg_to_vf(vf, v_opcode, ICE_ERR_NOT_IMPL,
+ err = ice_vc_send_msg_to_vf(vf, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
NULL, 0);
break;
}
@@ -2427,7 +2648,7 @@ error_handler:
* as it is busy with pending work.
*/
dev_info(&pf->pdev->dev,
- "PF failed to honor VF %d, opcode %d\n, error %d\n",
+ "PF failed to honor VF %d, opcode %d, error %d\n",
vf_id, v_opcode, err);
}
}
@@ -2440,8 +2661,8 @@ error_handler:
*
* return VF configuration
*/
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi)
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
@@ -2587,7 +2808,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
netdev_info(netdev,
- "mac on VF %d set to %pM\n. VF driver will be reinitialized\n",
+ "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_dis_vf(vf);
@@ -2690,7 +2911,8 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
/* Notify the VF of its new link state */
- ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe,
+ ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 01470a8ee03a..932e2ab3380b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,6 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
+ u16 num_vf_qs; /* num of queue configured per VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
@@ -77,8 +78,8 @@ struct ice_vf {
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
-int ice_get_vf_cfg(struct net_device *netdev, int vf_id,
- struct ifla_vf_info *ivi);
+int
+ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
@@ -86,11 +87,9 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
-int ice_set_vf_port_vlan(struct net_device *netdev, int vf_id,
- u16 vlan_id, u8 qos, __be16 vlan_proto);
-
-int ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
- int max_tx_rate);
+int
+ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
+ __be16 vlan_proto);
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
@@ -162,12 +161,5 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
-ice_set_vf_bw(struct net_device __always_unused *netdev,
- int __always_unused vf_id, int __always_unused min_tx_rate,
- int __always_unused max_tx_rate)
-{
- return -EOPNOTSUPP;
-}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_VIRTCHNL_PF_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c57671068245..c645d9e648e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -3158,8 +3158,8 @@ static int igb_set_eee(struct net_device *netdev,
} else if (!edata->eee_enabled) {
dev_err(&adapter->pdev->dev,
"Setting EEE options are not supported with EEE disabled\n");
- return -EINVAL;
- }
+ return -EINVAL;
+ }
adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 3269d8e94744..acbb5b4f333d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -2480,7 +2480,7 @@ static int igb_set_features(struct net_device *netdev,
else
igb_reset(adapter);
- return 0;
+ return 1;
}
static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -3452,6 +3452,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
}
+
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
+
pm_runtime_put_noidle(&pdev->dev);
return 0;
@@ -6026,7 +6029,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 80faccc34cda..0f5534ce27b0 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -29,9 +29,15 @@ unsigned int igc_get_max_rss_queues(struct igc_adapter *adapter);
void igc_set_flag_queue_pairs(struct igc_adapter *adapter,
const u32 max_rss_queues);
int igc_reinit_queues(struct igc_adapter *adapter);
+void igc_write_rss_indir_tbl(struct igc_adapter *adapter);
bool igc_has_link(struct igc_adapter *adapter);
void igc_reset(struct igc_adapter *adapter);
int igc_set_spd_dplx(struct igc_adapter *adapter, u32 spd, u8 dplx);
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags);
+void igc_update_stats(struct igc_adapter *adapter);
extern char igc_driver_name[];
extern char igc_driver_version[];
@@ -51,6 +57,13 @@ extern char igc_driver_version[];
#define IGC_FLAG_VLAN_PROMISC BIT(15)
#define IGC_FLAG_RX_LEGACY BIT(16)
+#define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+
+#define IGC_MRQC_ENABLE_RSS_MQ 0x00000002
+#define IGC_MRQC_RSS_FIELD_IPV4_UDP 0x00400000
+#define IGC_MRQC_RSS_FIELD_IPV6_UDP 0x00800000
+
#define IGC_START_ITR 648 /* ~6000 ints/sec */
#define IGC_4K_ITR 980
#define IGC_20K_ITR 196
@@ -284,15 +297,50 @@ struct igc_q_vector {
struct igc_ring ring[0] ____cacheline_internodealigned_in_smp;
};
+#define MAX_ETYPE_FILTER (4 - 1)
+
+enum igc_filter_match_flags {
+ IGC_FILTER_FLAG_ETHER_TYPE = 0x1,
+ IGC_FILTER_FLAG_VLAN_TCI = 0x2,
+ IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4,
+ IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8,
+};
+
+/* RX network flow classification data structure */
+struct igc_nfc_input {
+ /* Byte layout in order, all values with MSB first:
+ * match_flags - 1 byte
+ * etype - 2 bytes
+ * vlan_tci - 2 bytes
+ */
+ u8 match_flags;
+ __be16 etype;
+ __be16 vlan_tci;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct igc_nfc_filter {
+ struct hlist_node nfc_node;
+ struct igc_nfc_input filter;
+ unsigned long cookie;
+ u16 etype_reg_index;
+ u16 sw_idx;
+ u16 action;
+};
+
struct igc_mac_addr {
u8 addr[ETH_ALEN];
u8 queue;
u8 state; /* bitmask */
};
-#define IGC_MAC_STATE_DEFAULT 0x1
-#define IGC_MAC_STATE_MODIFIED 0x2
-#define IGC_MAC_STATE_IN_USE 0x4
+#define IGC_MAC_STATE_DEFAULT 0x1
+#define IGC_MAC_STATE_IN_USE 0x2
+#define IGC_MAC_STATE_SRC_ADDR 0x4
+#define IGC_MAC_STATE_QUEUE_STEERING 0x8
+
+#define IGC_MAX_RXNFC_FILTERS 16
/* Board specific private data structure */
struct igc_adapter {
@@ -356,12 +404,22 @@ struct igc_adapter {
u16 tx_ring_count;
u16 rx_ring_count;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_skipped;
+ u32 rx_hwtstamp_cleared;
u32 *shadow_vfta;
u32 rss_queues;
+ u32 rss_indir_tbl_init;
+
+ /* RX network flow classification support */
+ struct hlist_head nfc_filter_list;
+ struct hlist_head cls_flower_list;
+ unsigned int nfc_filter_count;
/* lock for RX network flow classification filter */
spinlock_t nfc_lock;
+ bool etype_bitmap[MAX_ETYPE_FILTER];
struct igc_mac_addr *mac_table;
@@ -447,6 +505,10 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
/* forward declaration */
void igc_reinit_locked(struct igc_adapter *);
+int igc_add_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
+int igc_erase_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
index 76d4991d7284..58d1109d7f3f 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.h
+++ b/drivers/net/ethernet/intel/igc/igc_base.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Intel Corporation */
-#ifndef _IGC_BASE_H
-#define _IGC_BASE_H
+#ifndef _IGC_BASE_H_
+#define _IGC_BASE_H_
/* forward declaration */
void igc_rx_fifo_flush_base(struct igc_hw *hw);
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 7d1bdcd1225a..a9a30268de59 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -310,6 +310,12 @@
IGC_RXDEXT_STATERR_CXE | \
IGC_RXDEXT_STATERR_RXE)
+#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
+#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
+#define IGC_MRQC_RSS_FIELD_IPV6 0x00100000
+#define IGC_MRQC_RSS_FIELD_IPV6_TCP 0x00200000
+
/* Header split receive */
#define IGC_RFCTL_IPV6_EX_DIS 0x00010000
#define IGC_RFCTL_LEF 0x00040000
@@ -325,6 +331,10 @@
#define I225_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
#define I225_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+/* Receive Checksum Control */
+#define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */
+#define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */
+
/* GPY211 - I225 defines */
#define GPY_MMD_MASK 0xFFFF0000
#define GPY_MMD_SHIFT 16
@@ -390,4 +400,11 @@
#define IGC_N0_QUEUE -1
+#define IGC_MAX_MAC_HDR_LEN 127
+#define IGC_MAX_NETWORK_HDR_LEN 511
+
+#define IGC_VLAPQF_QUEUE_SEL(_n, q_idx) ((q_idx) << ((_n) * 4))
+#define IGC_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4))
+#define IGC_VLAPQF_QUEUE_MASK 0x03
+
#endif /* _IGC_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index eff37a6c0afa..ac98f1d96892 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -2,10 +2,120 @@
/* Copyright (c) 2018 Intel Corporation */
/* ethtool support for igc */
+#include <linux/if_vlan.h>
#include <linux/pm_runtime.h>
#include "igc.h"
+/* forward declaration */
+struct igc_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define IGC_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \
+ .stat_offset = offsetof(struct igc_adapter, _stat) \
+}
+
+static const struct igc_stats igc_gstrings_stats[] = {
+ IGC_STAT("rx_packets", stats.gprc),
+ IGC_STAT("tx_packets", stats.gptc),
+ IGC_STAT("rx_bytes", stats.gorc),
+ IGC_STAT("tx_bytes", stats.gotc),
+ IGC_STAT("rx_broadcast", stats.bprc),
+ IGC_STAT("tx_broadcast", stats.bptc),
+ IGC_STAT("rx_multicast", stats.mprc),
+ IGC_STAT("tx_multicast", stats.mptc),
+ IGC_STAT("multicast", stats.mprc),
+ IGC_STAT("collisions", stats.colc),
+ IGC_STAT("rx_crc_errors", stats.crcerrs),
+ IGC_STAT("rx_no_buffer_count", stats.rnbc),
+ IGC_STAT("rx_missed_errors", stats.mpc),
+ IGC_STAT("tx_aborted_errors", stats.ecol),
+ IGC_STAT("tx_carrier_errors", stats.tncrs),
+ IGC_STAT("tx_window_errors", stats.latecol),
+ IGC_STAT("tx_abort_late_coll", stats.latecol),
+ IGC_STAT("tx_deferred_ok", stats.dc),
+ IGC_STAT("tx_single_coll_ok", stats.scc),
+ IGC_STAT("tx_multi_coll_ok", stats.mcc),
+ IGC_STAT("tx_timeout_count", tx_timeout_count),
+ IGC_STAT("rx_long_length_errors", stats.roc),
+ IGC_STAT("rx_short_length_errors", stats.ruc),
+ IGC_STAT("rx_align_errors", stats.algnerrc),
+ IGC_STAT("tx_tcp_seg_good", stats.tsctc),
+ IGC_STAT("tx_tcp_seg_failed", stats.tsctfc),
+ IGC_STAT("rx_flow_control_xon", stats.xonrxc),
+ IGC_STAT("rx_flow_control_xoff", stats.xoffrxc),
+ IGC_STAT("tx_flow_control_xon", stats.xontxc),
+ IGC_STAT("tx_flow_control_xoff", stats.xofftxc),
+ IGC_STAT("rx_long_byte_count", stats.gorc),
+ IGC_STAT("tx_dma_out_of_sync", stats.doosync),
+ IGC_STAT("tx_smbus", stats.mgptc),
+ IGC_STAT("rx_smbus", stats.mgprc),
+ IGC_STAT("dropped_smbus", stats.mgpdc),
+ IGC_STAT("os2bmc_rx_by_bmc", stats.o2bgptc),
+ IGC_STAT("os2bmc_tx_by_bmc", stats.b2ospc),
+ IGC_STAT("os2bmc_tx_by_host", stats.o2bspc),
+ IGC_STAT("os2bmc_rx_by_host", stats.b2ogprc),
+ IGC_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
+ IGC_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped),
+ IGC_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+};
+
+#define IGC_NETDEV_STAT(_net_stat) { \
+ .stat_string = __stringify(_net_stat), \
+ .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
+ .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
+}
+
+static const struct igc_stats igc_gstrings_net_stats[] = {
+ IGC_NETDEV_STAT(rx_errors),
+ IGC_NETDEV_STAT(tx_errors),
+ IGC_NETDEV_STAT(tx_dropped),
+ IGC_NETDEV_STAT(rx_length_errors),
+ IGC_NETDEV_STAT(rx_over_errors),
+ IGC_NETDEV_STAT(rx_frame_errors),
+ IGC_NETDEV_STAT(rx_fifo_errors),
+ IGC_NETDEV_STAT(tx_fifo_errors),
+ IGC_NETDEV_STAT(tx_heartbeat_errors)
+};
+
+enum igc_diagnostics_results {
+ TEST_REG = 0,
+ TEST_EEP,
+ TEST_IRQ,
+ TEST_LOOP,
+ TEST_LINK
+};
+
+static const char igc_gstrings_test[][ETH_GSTRING_LEN] = {
+ [TEST_REG] = "Register test (offline)",
+ [TEST_EEP] = "Eeprom test (offline)",
+ [TEST_IRQ] = "Interrupt test (offline)",
+ [TEST_LOOP] = "Loopback test (offline)",
+ [TEST_LINK] = "Link test (on/offline)"
+};
+
+#define IGC_TEST_LEN (sizeof(igc_gstrings_test) / ETH_GSTRING_LEN)
+
+#define IGC_GLOBAL_STATS_LEN \
+ (sizeof(igc_gstrings_stats) / sizeof(struct igc_stats))
+#define IGC_NETDEV_STATS_LEN \
+ (sizeof(igc_gstrings_net_stats) / sizeof(struct igc_stats))
+#define IGC_RX_QUEUE_STATS_LEN \
+ (sizeof(struct igc_rx_queue_stats) / sizeof(u64))
+#define IGC_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
+#define IGC_QUEUE_STATS_LEN \
+ ((((struct igc_adapter *)netdev_priv(netdev))->num_rx_queues * \
+ IGC_RX_QUEUE_STATS_LEN) + \
+ (((struct igc_adapter *)netdev_priv(netdev))->num_tx_queues * \
+ IGC_TX_QUEUE_STATS_LEN))
+#define IGC_STATS_LEN \
+ (IGC_GLOBAL_STATS_LEN + IGC_NETDEV_STATS_LEN + IGC_QUEUE_STATS_LEN)
+
static const char igc_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IGC_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
@@ -545,6 +655,127 @@ static int igc_set_pauseparam(struct net_device *netdev,
return retval;
}
+static void igc_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *igc_gstrings_test,
+ IGC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IGC_NETDEV_STATS_LEN; i++) {
+ memcpy(p, igc_gstrings_net_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_restart", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_drops", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_csum_err", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_alloc_failed", i);
+ p += ETH_GSTRING_LEN;
+ }
+ /* BUG_ON(p - data != IGC_STATS_LEN * ETH_GSTRING_LEN); */
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, igc_priv_flags_strings,
+ IGC_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int igc_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return IGC_STATS_LEN;
+ case ETH_SS_TEST:
+ return IGC_TEST_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IGC_PRIV_FLAGS_STR_LEN;
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static void igc_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ unsigned int start;
+ struct igc_ring *ring;
+ int i, j;
+ char *p;
+
+ spin_lock(&adapter->stats64_lock);
+ igc_update_stats(adapter);
+
+ for (i = 0; i < IGC_GLOBAL_STATS_LEN; i++) {
+ p = (char *)adapter + igc_gstrings_stats[i].stat_offset;
+ data[i] = (igc_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < IGC_NETDEV_STATS_LEN; j++, i++) {
+ p = (char *)net_stats + igc_gstrings_net_stats[j].stat_offset;
+ data[i] = (igc_gstrings_net_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ u64 restart2;
+
+ ring = adapter->tx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ data[i] = ring->tx_stats.packets;
+ data[i + 1] = ring->tx_stats.bytes;
+ data[i + 2] = ring->tx_stats.restart_queue;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp2);
+ restart2 = ring->tx_stats.restart_queue2;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp2, start));
+ data[i + 2] += restart2;
+
+ i += IGC_TX_QUEUE_STATS_LEN;
+ }
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ data[i] = ring->rx_stats.packets;
+ data[i + 1] = ring->rx_stats.bytes;
+ data[i + 2] = ring->rx_stats.drops;
+ data[i + 3] = ring->rx_stats.csum_err;
+ data[i + 4] = ring->rx_stats.alloc_failed;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ i += IGC_RX_QUEUE_STATS_LEN;
+ }
+ spin_unlock(&adapter->stats64_lock);
+}
+
static int igc_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -643,6 +874,605 @@ static int igc_set_coalesce(struct net_device *netdev,
return 0;
}
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+static int igc_get_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct igc_nfc_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ if (rule->filter.match_flags) {
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->action;
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = rule->filter.etype;
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) {
+ fsp->flow_type |= FLOW_EXT;
+ fsp->h_ext.vlan_tci = rule->filter.vlan_tci;
+ fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_dest,
+ rule->filter.dst_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_dest);
+ }
+ if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ ether_addr_copy(fsp->h_u.ether_spec.h_source,
+ rule->filter.src_addr);
+ /* As we only support matching by the full
+ * mask, return the mask to userspace
+ */
+ eth_broadcast_addr(fsp->m_u.ether_spec.h_source);
+ }
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int igc_get_ethtool_nfc_all(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_nfc_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = IGC_MAX_RXNFC_FILTERS;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int igc_get_rss_hash_opts(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igc */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V4_FLOW:
+ /* Fall through */
+ case AH_ESP_V4_FLOW:
+ /* Fall through */
+ case AH_V4_FLOW:
+ /* Fall through */
+ case ESP_V4_FLOW:
+ /* Fall through */
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V6_FLOW:
+ /* Fall through */
+ case AH_ESP_V6_FLOW:
+ /* Fall through */
+ case AH_V6_FLOW:
+ /* Fall through */
+ case ESP_V6_FLOW:
+ /* Fall through */
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->nfc_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = igc_get_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = igc_get_ethtool_nfc_all(adapter, cmd, rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igc_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGC_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGC_FLAG_RSS_FIELD_IPV6_UDP)
+static int igc_set_rss_hash_opt(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGC_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct igc_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(IGC_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(IGC_MRQC_RSS_FIELD_IPV4_UDP |
+ IGC_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(IGC_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igc_rxnfc_write_etype_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 i;
+ u32 etqf;
+ u16 etype;
+
+ /* find an empty etype filter register */
+ for (i = 0; i < MAX_ETYPE_FILTER; ++i) {
+ if (!adapter->etype_bitmap[i])
+ break;
+ }
+ if (i == MAX_ETYPE_FILTER) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n");
+ return -EINVAL;
+ }
+
+ adapter->etype_bitmap[i] = true;
+
+ etqf = rd32(IGC_ETQF(i));
+ etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK);
+
+ etqf |= IGC_ETQF_FILTER_ENABLE;
+ etqf &= ~IGC_ETQF_ETYPE_MASK;
+ etqf |= (etype & IGC_ETQF_ETYPE_MASK);
+
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf |= ((input->action << IGC_ETQF_QUEUE_SHIFT)
+ & IGC_ETQF_QUEUE_MASK);
+ etqf |= IGC_ETQF_QUEUE_ENABLE;
+
+ wr32(IGC_ETQF(i), etqf);
+
+ input->etype_reg_index = i;
+
+ return 0;
+}
+
+static int igc_rxnfc_write_vlan_prio_filter(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u16 queue_index;
+ u32 vlapqf;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK)
+ >> VLAN_PRIO_SHIFT;
+ queue_index = (vlapqf >> (vlan_priority * 4)) & IGC_VLAPQF_QUEUE_MASK;
+
+ /* check whether this vlan prio is already set */
+ if (vlapqf & IGC_VLAPQF_P_VALID(vlan_priority) &&
+ queue_index != input->action) {
+ dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n");
+ return -EEXIST;
+ }
+
+ vlapqf |= IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf |= IGC_VLAPQF_QUEUE_SEL(vlan_priority, input->action);
+
+ wr32(IGC_VLAPQF, vlapqf);
+
+ return 0;
+}
+
+int igc_add_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int err = -EINVAL;
+
+ if (hw->mac.type == igc_i225 &&
+ !(input->filter.match_flags & ~IGC_FILTER_FLAG_SRC_MAC_ADDR)) {
+ dev_err(&adapter->pdev->dev,
+ "i225 doesn't support flow classification rules specifying only source addresses.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) {
+ err = igc_rxnfc_write_etype_filter(adapter, input);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.dst_addr,
+ input->action, 0);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) {
+ err = igc_add_mac_steering_filter(adapter,
+ input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+ err = min_t(int, err, 0);
+ if (err)
+ return err;
+ }
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ err = igc_rxnfc_write_vlan_prio_filter(adapter, input);
+
+ return err;
+}
+
+static void igc_clear_etype_filter_regs(struct igc_adapter *adapter,
+ u16 reg_index)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 etqf = rd32(IGC_ETQF(reg_index));
+
+ etqf &= ~IGC_ETQF_QUEUE_ENABLE;
+ etqf &= ~IGC_ETQF_QUEUE_MASK;
+ etqf &= ~IGC_ETQF_FILTER_ENABLE;
+
+ wr32(IGC_ETQF(reg_index), etqf);
+
+ adapter->etype_bitmap[reg_index] = false;
+}
+
+static void igc_clear_vlan_prio_filter(struct igc_adapter *adapter,
+ u16 vlan_tci)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u8 vlan_priority;
+ u32 vlapqf;
+
+ vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+
+ vlapqf = rd32(IGC_VLAPQF);
+ vlapqf &= ~IGC_VLAPQF_P_VALID(vlan_priority);
+ vlapqf &= ~IGC_VLAPQF_QUEUE_SEL(vlan_priority,
+ IGC_VLAPQF_QUEUE_MASK);
+
+ wr32(IGC_VLAPQF, vlapqf);
+}
+
+int igc_erase_filter(struct igc_adapter *adapter, struct igc_nfc_filter *input)
+{
+ if (input->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE)
+ igc_clear_etype_filter_regs(adapter,
+ input->etype_reg_index);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI)
+ igc_clear_vlan_prio_filter(adapter,
+ ntohs(input->filter.vlan_tci));
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.src_addr,
+ input->action,
+ IGC_MAC_STATE_SRC_ADDR);
+
+ if (input->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR)
+ igc_del_mac_steering_filter(adapter, input->filter.dst_addr,
+ input->action, 0);
+
+ return 0;
+}
+
+static int igc_update_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct igc_nfc_filter *input,
+ u16 sw_idx)
+{
+ struct igc_nfc_filter *rule, *parent;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = rule;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ if (!input)
+ err = igc_erase_filter(adapter, rule);
+
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ adapter->nfc_filter_count--;
+ }
+
+ /* If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
+ */
+ if (!input)
+ return err;
+
+ /* initialize node */
+ INIT_HLIST_NODE(&input->nfc_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->nfc_node, &parent->nfc_node);
+ else
+ hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list);
+
+ /* update counts */
+ adapter->nfc_filter_count++;
+
+ return 0;
+}
+
+static int igc_add_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct igc_nfc_filter *input, *rule;
+ int err = 0;
+
+ if (!(netdev->hw_features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ /* Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC ||
+ fsp->ring_cookie >= adapter->num_rx_queues) {
+ dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n");
+ return -EINVAL;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= IGC_MAX_RXNFC_FILTERS) {
+ dev_err(&adapter->pdev->dev, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW)
+ return -EINVAL;
+
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+
+ if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) {
+ input->filter.etype = fsp->h_u.ether_spec.h_proto;
+ input->filter.match_flags = IGC_FILTER_FLAG_ETHER_TYPE;
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_source)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_SRC_MAC_ADDR;
+ ether_addr_copy(input->filter.src_addr,
+ fsp->h_u.ether_spec.h_source);
+ }
+
+ /* Only support matching addresses by the full mask */
+ if (is_broadcast_ether_addr(fsp->m_u.ether_spec.h_dest)) {
+ input->filter.match_flags |= IGC_FILTER_FLAG_DST_MAC_ADDR;
+ ether_addr_copy(input->filter.dst_addr,
+ fsp->h_u.ether_spec.h_dest);
+ }
+
+ if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) {
+ if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ input->filter.vlan_tci = fsp->h_ext.vlan_tci;
+ input->filter.match_flags |= IGC_FILTER_FLAG_VLAN_TCI;
+ }
+
+ input->action = fsp->ring_cookie;
+ input->sw_idx = fsp->location;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) {
+ if (!memcmp(&input->filter, &rule->filter,
+ sizeof(input->filter))) {
+ err = -EEXIST;
+ dev_err(&adapter->pdev->dev,
+ "ethtool: this filter is already set\n");
+ goto err_out_w_lock;
+ }
+ }
+
+ err = igc_add_filter(adapter, input);
+ if (err)
+ goto err_out_w_lock;
+
+ igc_update_ethtool_nfc_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->nfc_lock);
+ return 0;
+
+err_out_w_lock:
+ spin_unlock(&adapter->nfc_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int igc_del_ethtool_nfc_entry(struct igc_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
+
+ spin_lock(&adapter->nfc_lock);
+ err = igc_update_ethtool_nfc_entry(adapter, NULL, fsp->location);
+ spin_unlock(&adapter->nfc_lock);
+
+ return err;
+}
+
+static int igc_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igc_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igc_set_rss_hash_opt(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLINS:
+ ret = igc_add_ethtool_nfc_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = igc_del_ethtool_nfc_entry(adapter, cmd);
+ default:
+ break;
+ }
+
+ return ret;
+}
+
void igc_write_rss_indir_tbl(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -885,17 +1715,13 @@ static int igc_get_link_ksettings(struct net_device *netdev,
if (hw->mac.type == igc_i225 &&
(status & IGC_STATUS_SPEED_2500)) {
speed = SPEED_2500;
- hw_dbg("2500 Mbs, ");
} else {
speed = SPEED_1000;
- hw_dbg("1000 Mbs, ");
}
} else if (status & IGC_STATUS_SPEED_100) {
speed = SPEED_100;
- hw_dbg("100 Mbs, ");
} else {
speed = SPEED_10;
- hw_dbg("10 Mbs, ");
}
if ((status & IGC_STATUS_FD) ||
hw->phy.media_type != igc_media_type_copper)
@@ -1011,8 +1837,13 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_ringparam = igc_set_ringparam,
.get_pauseparam = igc_get_pauseparam,
.set_pauseparam = igc_set_pauseparam,
+ .get_strings = igc_get_strings,
+ .get_sset_count = igc_get_sset_count,
+ .get_ethtool_stats = igc_get_ethtool_stats,
.get_coalesce = igc_get_coalesce,
.set_coalesce = igc_set_coalesce,
+ .get_rxnfc = igc_get_rxnfc,
+ .set_rxnfc = igc_set_rxnfc,
.get_rxfh_indir_size = igc_get_rxfh_indir_size,
.get_rxfh = igc_get_rxfh,
.set_rxfh = igc_set_rxfh,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 87a11879bf2d..f79728381e8a 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -620,6 +620,55 @@ static void igc_configure_tx(struct igc_adapter *adapter)
*/
static void igc_setup_mrqc(struct igc_adapter *adapter)
{
+ struct igc_hw *hw = &adapter->hw;
+ u32 j, num_rx_queues;
+ u32 mrqc, rxcsum;
+ u32 rss_key[10];
+
+ netdev_rss_key_fill(rss_key, sizeof(rss_key));
+ for (j = 0; j < 10; j++)
+ wr32(IGC_RSSRK(j), rss_key[j]);
+
+ num_rx_queues = adapter->rss_queues;
+
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGC_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGC_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
+ }
+ igc_write_rss_indir_tbl(adapter);
+
+ /* Disable raw packet checksumming so that RSS hash is placed in
+ * descriptor on writeback. No need to enable TCP/UDP/IP checksum
+ * offloads as they are enabled by default
+ */
+ rxcsum = rd32(IGC_RXCSUM);
+ rxcsum |= IGC_RXCSUM_PCSD;
+
+ /* Enable Receive Checksum Offload for SCTP */
+ rxcsum |= IGC_RXCSUM_CRCOFL;
+
+ /* Don't need to set TUOFL or IPOFL, they default to 1 */
+ wr32(IGC_RXCSUM, rxcsum);
+
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
+ mrqc = IGC_MRQC_RSS_FIELD_IPV4 |
+ IGC_MRQC_RSS_FIELD_IPV4_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6 |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP |
+ IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGC_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
+
+ mrqc |= IGC_MRQC_ENABLE_RSS_MQ;
+
+ wr32(IGC_MRQC, mrqc);
}
/**
@@ -890,7 +939,7 @@ static int igc_tx_map(struct igc_ring *tx_ring,
/* Make sure there is space in the ring for the next send. */
igc_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -1738,12 +1787,200 @@ void igc_up(struct igc_adapter *adapter)
* igc_update_stats - Update the board statistics counters
* @adapter: board private structure
*/
-static void igc_update_stats(struct igc_adapter *adapter)
+void igc_update_stats(struct igc_adapter *adapter)
{
+ struct rtnl_link_stats64 *net_stats = &adapter->stats64;
+ struct pci_dev *pdev = adapter->pdev;
+ struct igc_hw *hw = &adapter->hw;
+ u64 _bytes, _packets;
+ u64 bytes, packets;
+ unsigned int start;
+ u32 mpc;
+ int i;
+
+ /* Prevent stats update while adapter is being reset, or if the pci
+ * connection is down.
+ */
+ if (adapter->link_speed == 0)
+ return;
+ if (pci_channel_offline(pdev))
+ return;
+
+ packets = 0;
+ bytes = 0;
+
+ rcu_read_lock();
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igc_ring *ring = adapter->rx_ring[i];
+ u32 rqdpc = rd32(IGC_RQDPC(i));
+
+ if (hw->mac.type >= igc_i225)
+ wr32(IGC_RQDPC(i), 0);
+
+ if (rqdpc) {
+ ring->rx_stats.drops += rqdpc;
+ net_stats->rx_fifo_errors += rqdpc;
+ }
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
+ _bytes = ring->rx_stats.bytes;
+ _packets = ring->rx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+
+ net_stats->rx_bytes = bytes;
+ net_stats->rx_packets = packets;
+
+ packets = 0;
+ bytes = 0;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
+ _bytes = ring->tx_stats.bytes;
+ _packets = ring->tx_stats.packets;
+ } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
+ bytes += _bytes;
+ packets += _packets;
+ }
+ net_stats->tx_bytes = bytes;
+ net_stats->tx_packets = packets;
+ rcu_read_unlock();
+
+ /* read stats registers */
+ adapter->stats.crcerrs += rd32(IGC_CRCERRS);
+ adapter->stats.gprc += rd32(IGC_GPRC);
+ adapter->stats.gorc += rd32(IGC_GORCL);
+ rd32(IGC_GORCH); /* clear GORCL */
+ adapter->stats.bprc += rd32(IGC_BPRC);
+ adapter->stats.mprc += rd32(IGC_MPRC);
+ adapter->stats.roc += rd32(IGC_ROC);
+
+ adapter->stats.prc64 += rd32(IGC_PRC64);
+ adapter->stats.prc127 += rd32(IGC_PRC127);
+ adapter->stats.prc255 += rd32(IGC_PRC255);
+ adapter->stats.prc511 += rd32(IGC_PRC511);
+ adapter->stats.prc1023 += rd32(IGC_PRC1023);
+ adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.symerrs += rd32(IGC_SYMERRS);
+ adapter->stats.sec += rd32(IGC_SEC);
+
+ mpc = rd32(IGC_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
+ adapter->stats.scc += rd32(IGC_SCC);
+ adapter->stats.ecol += rd32(IGC_ECOL);
+ adapter->stats.mcc += rd32(IGC_MCC);
+ adapter->stats.latecol += rd32(IGC_LATECOL);
+ adapter->stats.dc += rd32(IGC_DC);
+ adapter->stats.rlec += rd32(IGC_RLEC);
+ adapter->stats.xonrxc += rd32(IGC_XONRXC);
+ adapter->stats.xontxc += rd32(IGC_XONTXC);
+ adapter->stats.xoffrxc += rd32(IGC_XOFFRXC);
+ adapter->stats.xofftxc += rd32(IGC_XOFFTXC);
+ adapter->stats.fcruc += rd32(IGC_FCRUC);
+ adapter->stats.gptc += rd32(IGC_GPTC);
+ adapter->stats.gotc += rd32(IGC_GOTCL);
+ rd32(IGC_GOTCH); /* clear GOTCL */
+ adapter->stats.rnbc += rd32(IGC_RNBC);
+ adapter->stats.ruc += rd32(IGC_RUC);
+ adapter->stats.rfc += rd32(IGC_RFC);
+ adapter->stats.rjc += rd32(IGC_RJC);
+ adapter->stats.tor += rd32(IGC_TORH);
+ adapter->stats.tot += rd32(IGC_TOTH);
+ adapter->stats.tpr += rd32(IGC_TPR);
+
+ adapter->stats.ptc64 += rd32(IGC_PTC64);
+ adapter->stats.ptc127 += rd32(IGC_PTC127);
+ adapter->stats.ptc255 += rd32(IGC_PTC255);
+ adapter->stats.ptc511 += rd32(IGC_PTC511);
+ adapter->stats.ptc1023 += rd32(IGC_PTC1023);
+ adapter->stats.ptc1522 += rd32(IGC_PTC1522);
+
+ adapter->stats.mptc += rd32(IGC_MPTC);
+ adapter->stats.bptc += rd32(IGC_BPTC);
+
+ adapter->stats.tpt += rd32(IGC_TPT);
+ adapter->stats.colc += rd32(IGC_COLC);
+
+ adapter->stats.algnerrc += rd32(IGC_ALGNERRC);
+
+ adapter->stats.tsctc += rd32(IGC_TSCTC);
+ adapter->stats.tsctfc += rd32(IGC_TSCTFC);
+
+ adapter->stats.iac += rd32(IGC_IAC);
+ adapter->stats.icrxoc += rd32(IGC_ICRXOC);
+ adapter->stats.icrxptc += rd32(IGC_ICRXPTC);
+ adapter->stats.icrxatc += rd32(IGC_ICRXATC);
+ adapter->stats.ictxptc += rd32(IGC_ICTXPTC);
+ adapter->stats.ictxatc += rd32(IGC_ICTXATC);
+ adapter->stats.ictxqec += rd32(IGC_ICTXQEC);
+ adapter->stats.ictxqmtc += rd32(IGC_ICTXQMTC);
+ adapter->stats.icrxdmtc += rd32(IGC_ICRXDMTC);
+
+ /* Fill out the OS statistics structure */
+ net_stats->multicast = adapter->stats.mprc;
+ net_stats->collisions = adapter->stats.colc;
+
+ /* Rx Errors */
+
+ /* RLEC on some newer hardware can be incorrect so build
+ * our own version based on RUC and ROC
+ */
+ net_stats->rx_errors = adapter->stats.rxerrc +
+ adapter->stats.crcerrs + adapter->stats.algnerrc +
+ adapter->stats.ruc + adapter->stats.roc +
+ adapter->stats.cexterr;
+ net_stats->rx_length_errors = adapter->stats.ruc +
+ adapter->stats.roc;
+ net_stats->rx_crc_errors = adapter->stats.crcerrs;
+ net_stats->rx_frame_errors = adapter->stats.algnerrc;
+ net_stats->rx_missed_errors = adapter->stats.mpc;
+
+ /* Tx Errors */
+ net_stats->tx_errors = adapter->stats.ecol +
+ adapter->stats.latecol;
+ net_stats->tx_aborted_errors = adapter->stats.ecol;
+ net_stats->tx_window_errors = adapter->stats.latecol;
+ net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+ /* Tx Dropped needs to be maintained elsewhere */
+
+ /* Management Stats */
+ adapter->stats.mgptc += rd32(IGC_MGTPTC);
+ adapter->stats.mgprc += rd32(IGC_MGTPRC);
+ adapter->stats.mgpdc += rd32(IGC_MGTPDC);
}
static void igc_nfc_filter_exit(struct igc_adapter *adapter)
{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
+ igc_erase_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
+}
+
+static void igc_nfc_filter_restore(struct igc_adapter *adapter)
+{
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+
+ hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
+ igc_add_filter(adapter, rule);
+
+ spin_unlock(&adapter->nfc_lock);
}
/**
@@ -1890,6 +2127,86 @@ static struct net_device_stats *igc_get_stats(struct net_device *netdev)
return &netdev->stats;
}
+static netdev_features_t igc_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
+ return features;
+}
+
+static int igc_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct igc_adapter *adapter = netdev_priv(netdev);
+
+ /* Add VLAN support */
+ if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
+ return 0;
+
+ if (!(features & NETIF_F_NTUPLE)) {
+ struct hlist_node *node2;
+ struct igc_nfc_filter *rule;
+
+ spin_lock(&adapter->nfc_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &adapter->nfc_filter_list, nfc_node) {
+ igc_erase_filter(adapter, rule);
+ hlist_del(&rule->nfc_node);
+ kfree(rule);
+ }
+ spin_unlock(&adapter->nfc_lock);
+ adapter->nfc_filter_count = 0;
+ }
+
+ netdev->features = features;
+
+ if (netif_running(netdev))
+ igc_reinit_locked(adapter);
+ else
+ igc_reset(adapter);
+
+ return 1;
+}
+
+static netdev_features_t
+igc_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGC_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPv4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
/**
* igc_configure - configure the hardware for RX and TX
* @adapter: private board structure
@@ -1906,6 +2223,7 @@ static void igc_configure(struct igc_adapter *adapter)
igc_setup_mrqc(adapter);
igc_setup_rctl(adapter);
+ igc_nfc_filter_restore(adapter);
igc_configure_tx(adapter);
igc_configure_rx(adapter);
@@ -1967,6 +2285,127 @@ static void igc_set_default_mac_filter(struct igc_adapter *adapter)
igc_rar_set_index(adapter, 0);
}
+/* If the filter to be added and an already existing filter express
+ * the same address and address type, it should be possible to only
+ * override the other configurations, for example the queue to steer
+ * traffic.
+ */
+static bool igc_mac_entry_can_be_used(const struct igc_mac_addr *entry,
+ const u8 *addr, const u8 flags)
+{
+ if (!(entry->state & IGC_MAC_STATE_IN_USE))
+ return true;
+
+ if ((entry->state & IGC_MAC_STATE_SRC_ADDR) !=
+ (flags & IGC_MAC_STATE_SRC_ADDR))
+ return false;
+
+ if (!ether_addr_equal(addr, entry->addr))
+ return false;
+
+ return true;
+}
+
+/* Add a MAC filter for 'addr' directing matching traffic to 'queue',
+ * 'flags' is used to indicate what kind of match is made, match is by
+ * default for the destination address, if matching by source address
+ * is desired the flag IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_add_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for the first empty entry in the MAC table.
+ * Do not touch entries at the end of the table reserved for the VF MAC
+ * addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!igc_mac_entry_can_be_used(&adapter->mac_table[i],
+ addr, flags))
+ continue;
+
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ adapter->mac_table[i].state |= IGC_MAC_STATE_IN_USE | flags;
+
+ igc_rar_set_index(adapter, i);
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+int igc_add_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_add_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
+/* Remove a MAC filter for 'addr' directing matching traffic to
+ * 'queue', 'flags' is used to indicate what kind of match need to be
+ * removed, match is by default for the destination address, if
+ * matching by source address is to be removed the flag
+ * IGC_MAC_STATE_SRC_ADDR can be used.
+ */
+static int igc_del_mac_filter_flags(struct igc_adapter *adapter,
+ const u8 *addr, const u8 queue,
+ const u8 flags)
+{
+ struct igc_hw *hw = &adapter->hw;
+ int rar_entries = hw->mac.rar_entry_count;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ /* Search for matching entry in the MAC table based on given address
+ * and queue. Do not touch entries at the end of the table reserved
+ * for the VF MAC addresses.
+ */
+ for (i = 0; i < rar_entries; i++) {
+ if (!(adapter->mac_table[i].state & IGC_MAC_STATE_IN_USE))
+ continue;
+ if ((adapter->mac_table[i].state & flags) != flags)
+ continue;
+ if (adapter->mac_table[i].queue != queue)
+ continue;
+ if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
+ continue;
+
+ /* When a filter for the default address is "deleted",
+ * we return it to its initial configuration
+ */
+ if (adapter->mac_table[i].state & IGC_MAC_STATE_DEFAULT) {
+ adapter->mac_table[i].state =
+ IGC_MAC_STATE_DEFAULT | IGC_MAC_STATE_IN_USE;
+ } else {
+ adapter->mac_table[i].state = 0;
+ adapter->mac_table[i].queue = 0;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ }
+
+ igc_rar_set_index(adapter, i);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+int igc_del_mac_steering_filter(struct igc_adapter *adapter,
+ const u8 *addr, u8 queue, u8 flags)
+{
+ return igc_del_mac_filter_flags(adapter, addr, queue,
+ IGC_MAC_STATE_QUEUE_STEERING | flags);
+}
+
/**
* igc_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -3434,6 +3873,9 @@ static const struct net_device_ops igc_netdev_ops = {
.ndo_set_mac_address = igc_set_mac,
.ndo_change_mtu = igc_change_mtu,
.ndo_get_stats = igc_get_stats,
+ .ndo_fix_features = igc_fix_features,
+ .ndo_set_features = igc_set_features,
+ .ndo_features_check = igc_features_check,
};
/* PCIe configuration access */
@@ -3663,6 +4105,9 @@ static int igc_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
/* MTU range: 68 - 9216 */
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h
index 5afe7a8d3faf..50d7c04dccf5 100644
--- a/drivers/net/ethernet/intel/igc/igc_regs.h
+++ b/drivers/net/ethernet/intel/igc/igc_regs.h
@@ -80,8 +80,23 @@
/* MSI-X Table Register Descriptions */
#define IGC_PBACL 0x05B68 /* MSIx PBA Clear - R/W 1 to clear */
+/* RSS registers */
+#define IGC_MRQC 0x05818 /* Multiple Receive Control - RW */
+
+/* Filtering Registers */
+#define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+/* ETQF register bit definitions */
+#define IGC_ETQF_FILTER_ENABLE BIT(26)
+#define IGC_ETQF_QUEUE_ENABLE BIT(31)
+#define IGC_ETQF_QUEUE_SHIFT 16
+#define IGC_ETQF_QUEUE_MASK 0x00070000
+#define IGC_ETQF_ETYPE_MASK 0x0000FFFF
+
/* Redirection Table - RW Array */
#define IGC_RETA(_i) (0x05C00 + ((_i) * 4))
+/* RSS Random Key - RW Array */
+#define IGC_RSSRK(_i) (0x05C80 + ((_i) * 4))
/* Receive Register Descriptions */
#define IGC_RCTL 0x00100 /* Rx Control - RW */
@@ -101,6 +116,7 @@
#define IGC_UTA 0x0A000 /* Unicast Table Array - RW */
#define IGC_RAL(_n) (0x05400 + ((_n) * 0x08))
#define IGC_RAH(_n) (0x05404 + ((_n) * 0x08))
+#define IGC_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */
/* Transmit Register Descriptions */
#define IGC_TCTL 0x00400 /* Tx Control - RW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e100054a3765..60cec3540dd7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8297,7 +8297,7 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
@@ -8483,8 +8483,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#ifdef IXGBE_FCOE
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct ixgbe_adapter *adapter;
struct ixgbe_ring_feature *f;
@@ -8514,7 +8513,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
break;
/* fall through */
default:
- return fallback(dev, skb, sb_dev);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -9796,7 +9795,7 @@ static int ixgbe_set_features(struct net_device *netdev,
NETIF_F_HW_VLAN_CTAG_FILTER))
ixgbe_set_rx_mode(netdev);
- return 0;
+ return 1;
}
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index c0a3718b2e2a..bb68737dce56 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2467,7 +2467,7 @@ out:
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
- if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ if (!netdev_xmit_more() || netif_xmit_stopped(nq) ||
txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
mvneta_txq_pend_desc_add(pp, txq, frags);
else
@@ -3385,6 +3385,7 @@ static void mvneta_validate(struct net_device *ndev, unsigned long *supported,
phylink_set(mask, 1000baseX_Full);
}
if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+ phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ff0f4c503f53..67cce2736806 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -101,6 +101,7 @@
#define MVPP2_CLS_FLOW_TBL1_REG 0x1828
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS_MASK 0x7
#define MVPP2_CLS_FLOW_TBL1_N_FIELDS(x) (x)
+#define MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu) (((lu) & 0x3f) << 3)
#define MVPP2_CLS_FLOW_TBL1_PRIO_MASK 0x3f
#define MVPP2_CLS_FLOW_TBL1_PRIO(x) ((x) << 9)
#define MVPP2_CLS_FLOW_TBL1_SEQ_MASK 0x7
@@ -123,7 +124,10 @@
#define MVPP22_CLS_C2_TCAM_DATA2 0x1b18
#define MVPP22_CLS_C2_TCAM_DATA3 0x1b1c
#define MVPP22_CLS_C2_TCAM_DATA4 0x1b20
+#define MVPP22_CLS_C2_LU_TYPE(lu) ((lu) & 0x3f)
#define MVPP22_CLS_C2_PORT_ID(port) ((port) << 8)
+#define MVPP22_CLS_C2_TCAM_INV 0x1b24
+#define MVPP22_CLS_C2_TCAM_INV_BIT BIT(31)
#define MVPP22_CLS_C2_HIT_CTR 0x1b50
#define MVPP22_CLS_C2_ACT 0x1b60
#define MVPP22_CLS_C2_ACT_RSS_EN(act) (((act) & 0x3) << 19)
@@ -610,6 +614,8 @@
#define MVPP2_BIT_TO_WORD(bit) ((bit) / 32)
#define MVPP2_BIT_IN_WORD(bit) ((bit) % 32)
+#define MVPP2_N_PRS_FLOWS 52
+
/* RSS constants */
#define MVPP22_RSS_TABLE_ENTRIES 32
@@ -710,6 +716,7 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
/* Definitions */
+struct mvpp2_dbgfs_entries;
/* Shared Packet Processor resources */
struct mvpp2 {
@@ -771,6 +778,9 @@ struct mvpp2 {
/* Debugfs root entry */
struct dentry *dbgfs_dir;
+
+ /* Debugfs entries private data */
+ struct mvpp2_dbgfs_entries *dbgfs_entries;
};
struct mvpp2_pcpu_stats {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index efdb7a656835..1087974d3b98 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -22,7 +22,7 @@
} \
}
-static struct mvpp2_cls_flow cls_flows[MVPP2_N_FLOWS] = {
+static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
/* TCP over IPv4 flows, Not fragmented, no vlan tag */
MVPP2_DEF_FLOW(TCP_V4_FLOW, MVPP2_FL_IP4_TCP_NF_UNTAG,
MVPP22_CLS_HEK_IP4_5T,
@@ -429,12 +429,6 @@ static void mvpp2_cls_flow_port_id_sel(struct mvpp2_cls_flow_entry *fe,
fe->data[0] &= ~MVPP2_CLS_FLOW_TBL0_PORT_ID_SEL;
}
-static void mvpp2_cls_flow_seq_set(struct mvpp2_cls_flow_entry *fe, u32 seq)
-{
- fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_SEQ(MVPP2_CLS_FLOW_TBL1_SEQ_MASK);
- fe->data[1] |= MVPP2_CLS_FLOW_TBL1_SEQ(seq);
-}
-
static void mvpp2_cls_flow_last_set(struct mvpp2_cls_flow_entry *fe,
bool is_last)
{
@@ -454,9 +448,16 @@ static void mvpp2_cls_flow_port_add(struct mvpp2_cls_flow_entry *fe,
fe->data[0] |= MVPP2_CLS_FLOW_TBL0_PORT_ID(port);
}
+static void mvpp2_cls_flow_lu_type_set(struct mvpp2_cls_flow_entry *fe,
+ u8 lu_type)
+{
+ fe->data[1] &= ~MVPP2_CLS_FLOW_TBL1_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK);
+ fe->data[1] |= MVPP2_CLS_FLOW_TBL1_LU_TYPE(lu_type);
+}
+
/* Initialize the parser entry for the given flow */
static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
mvpp2_prs_add_flow(priv, flow->flow_id, flow->prs_ri.ri,
flow->prs_ri.ri_mask);
@@ -464,7 +465,7 @@ static void mvpp2_cls_flow_prs_init(struct mvpp2 *priv,
/* Initialize the Lookup Id table entry for the given flow */
static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
- struct mvpp2_cls_flow *flow)
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_lookup_entry le;
@@ -477,7 +478,7 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
/* We point on the first lookup in the sequence for the flow, that is
* the C2 lookup.
*/
- le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_FLOW_C2_ENTRY(flow->flow_id));
+ le.data |= MVPP2_CLS_LKP_FLOW_PTR(MVPP2_CLS_FLT_FIRST(flow->flow_id));
/* CLS is always enabled, RSS is enabled/disabled in C2 lookup */
le.data |= MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
@@ -485,21 +486,86 @@ static void mvpp2_cls_flow_lkp_init(struct mvpp2 *priv,
mvpp2_cls_lookup_write(priv, &le);
}
+static void mvpp2_cls_c2_write(struct mvpp2 *priv,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ if (c2->valid)
+ val &= ~MVPP22_CLS_C2_TCAM_INV_BIT;
+ else
+ val |= MVPP22_CLS_C2_TCAM_INV_BIT;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_INV, val);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
+
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
+ /* Writing TCAM_DATA4 flushes writes to TCAM_DATA0-4 and INV to HW */
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
+}
+
+void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
+ struct mvpp2_cls_c2_entry *c2)
+{
+ u32 val;
+ mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
+
+ c2->index = index;
+
+ c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
+ c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
+ c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
+ c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
+ c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
+
+ c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
+
+ c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
+ c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
+ c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
+ c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
+
+ val = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_INV);
+ c2->valid = !(val & MVPP22_CLS_C2_TCAM_INV_BIT);
+}
+
/* Initialize the flow table entries for the given flow */
-static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
+static void mvpp2_cls_flow_init(struct mvpp2 *priv,
+ const struct mvpp2_cls_flow *flow)
{
struct mvpp2_cls_flow_entry fe;
- int i;
+ int i, pri = 0;
+
+ /* Assign default values to all entries in the flow */
+ for (i = MVPP2_CLS_FLT_FIRST(flow->flow_id);
+ i <= MVPP2_CLS_FLT_LAST(flow->flow_id); i++) {
+ memset(&fe, 0, sizeof(fe));
+ fe.index = i;
+ mvpp2_cls_flow_pri_set(&fe, pri++);
- /* C2 lookup */
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_FLOW_C2_ENTRY(flow->flow_id);
+ if (i == MVPP2_CLS_FLT_LAST(flow->flow_id))
+ mvpp2_cls_flow_last_set(&fe, 1);
+
+ mvpp2_cls_flow_write(priv, &fe);
+ }
+
+ /* RSS config C2 lookup */
+ mvpp2_cls_flow_read(priv, MVPP2_CLS_FLT_C2_RSS_ENTRY(flow->flow_id),
+ &fe);
mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C2);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_last_set(&fe, 0);
- mvpp2_cls_flow_pri_set(&fe, 0);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_FIRST1);
+ mvpp2_cls_flow_lu_type_set(&fe, MVPP2_CLS_LU_ALL);
/* Add all ports */
for (i = 0; i < MVPP2_MAX_PORTS; i++)
@@ -509,22 +575,19 @@ static void mvpp2_cls_flow_init(struct mvpp2 *priv, struct mvpp2_cls_flow *flow)
/* C3Hx lookups */
for (i = 0; i < MVPP2_MAX_PORTS; i++) {
- memset(&fe, 0, sizeof(fe));
- fe.index = MVPP2_PORT_FLOW_HASH_ENTRY(i, flow->flow_id);
+ mvpp2_cls_flow_read(priv,
+ MVPP2_CLS_FLT_HASH_ENTRY(i, flow->flow_id),
+ &fe);
+ /* Set a default engine. Will be overwritten when setting the
+ * real HEK parameters
+ */
+ mvpp2_cls_flow_eng_set(&fe, MVPP22_CLS_ENGINE_C3HA);
mvpp2_cls_flow_port_id_sel(&fe, true);
- mvpp2_cls_flow_pri_set(&fe, i + 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_MIDDLE);
mvpp2_cls_flow_port_add(&fe, BIT(i));
mvpp2_cls_flow_write(priv, &fe);
}
-
- /* Update the last entry */
- mvpp2_cls_flow_last_set(&fe, 1);
- mvpp2_cls_flow_seq_set(&fe, MVPP2_CLS_FLOW_SEQ_LAST);
-
- mvpp2_cls_flow_write(priv, &fe);
}
/* Adds a field to the Header Extracted Key generation parameters*/
@@ -555,6 +618,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
for_each_set_bit(i, &hash_opts, MVPP22_CLS_HEK_N_FIELDS) {
switch (BIT(i)) {
+ case MVPP22_CLS_HEK_OPT_MAC_DA:
+ field_id = MVPP22_CLS_FIELD_MAC_DA;
+ break;
case MVPP22_CLS_HEK_OPT_VLAN:
field_id = MVPP22_CLS_FIELD_VLAN;
break;
@@ -586,9 +652,9 @@ static int mvpp2_flow_set_hek_fields(struct mvpp2_cls_flow_entry *fe,
return 0;
}
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
{
- if (flow >= MVPP2_N_FLOWS)
+ if (flow >= MVPP2_N_PRS_FLOWS)
return NULL;
return &cls_flows[flow];
@@ -608,21 +674,17 @@ struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow)
static int mvpp2_port_rss_hash_opts_set(struct mvpp2_port *port, int flow_type,
u16 requested_opts)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, engine, flow_index;
u16 hash_opts;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return -EINVAL;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -697,21 +759,17 @@ u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe)
*/
static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
{
+ const struct mvpp2_cls_flow *flow;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *flow;
int i, flow_index;
u16 hash_opts = 0;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for_each_cls_flow_id_with_type(i, flow_type) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
return 0;
- if (flow->flow_type != flow_type)
- continue;
-
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(port->id,
- flow->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(port->id, flow->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -723,10 +781,10 @@ static u16 mvpp2_port_rss_hash_opts_get(struct mvpp2_port *port, int flow_type)
static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
{
- struct mvpp2_cls_flow *flow;
+ const struct mvpp2_cls_flow *flow;
int i;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
flow = mvpp2_cls_flow_get(i);
if (!flow)
break;
@@ -737,47 +795,6 @@ static void mvpp2_cls_port_init_flows(struct mvpp2 *priv)
}
}
-static void mvpp2_cls_c2_write(struct mvpp2 *priv,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, c2->index);
-
- /* Write TCAM */
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA0, c2->tcam[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA1, c2->tcam[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA2, c2->tcam[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA3, c2->tcam[3]);
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_DATA4, c2->tcam[4]);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ACT, c2->act);
-
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR0, c2->attr[0]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR1, c2->attr[1]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR2, c2->attr[2]);
- mvpp2_write(priv, MVPP22_CLS_C2_ATTR3, c2->attr[3]);
-}
-
-void mvpp2_cls_c2_read(struct mvpp2 *priv, int index,
- struct mvpp2_cls_c2_entry *c2)
-{
- mvpp2_write(priv, MVPP22_CLS_C2_TCAM_IDX, index);
-
- c2->index = index;
-
- c2->tcam[0] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA0);
- c2->tcam[1] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA1);
- c2->tcam[2] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA2);
- c2->tcam[3] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA3);
- c2->tcam[4] = mvpp2_read(priv, MVPP22_CLS_C2_TCAM_DATA4);
-
- c2->act = mvpp2_read(priv, MVPP22_CLS_C2_ACT);
-
- c2->attr[0] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR0);
- c2->attr[1] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR1);
- c2->attr[2] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR2);
- c2->attr[3] = mvpp2_read(priv, MVPP22_CLS_C2_ATTR3);
-}
-
static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
{
struct mvpp2_cls_c2_entry c2;
@@ -791,6 +808,10 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.tcam[4] = MVPP22_CLS_C2_PORT_ID(pmap);
c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_PORT_ID(pmap));
+ /* Match on Lookup Type */
+ c2.tcam[4] |= MVPP22_CLS_C2_TCAM_EN(MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_TYPE_MASK));
+ c2.tcam[4] |= MVPP22_CLS_C2_LU_TYPE(MVPP2_CLS_LU_ALL);
+
/* Update RSS status after matching this entry */
c2.act = MVPP22_CLS_C2_ACT_RSS_EN(MVPP22_C2_UPD_LOCK);
@@ -809,6 +830,8 @@ static void mvpp2_port_c2_cls_init(struct mvpp2_port *port)
c2.attr[0] = MVPP22_CLS_C2_ATTR0_QHIGH(qh) |
MVPP22_CLS_C2_ATTR0_QLOW(ql);
+ c2.valid = true;
+
mvpp2_cls_c2_write(port->priv, &c2);
}
@@ -817,6 +840,7 @@ void mvpp2_cls_init(struct mvpp2 *priv)
{
struct mvpp2_cls_lookup_entry le;
struct mvpp2_cls_flow_entry fe;
+ struct mvpp2_cls_c2_entry c2;
int index;
/* Enable classifier */
@@ -840,6 +864,14 @@ void mvpp2_cls_init(struct mvpp2 *priv)
mvpp2_cls_lookup_write(priv, &le);
}
+ /* Clear C2 TCAM engine table */
+ memset(&c2, 0, sizeof(c2));
+ c2.valid = false;
+ for (index = 0; index < MVPP22_CLS_C2_N_ENTRIES; index++) {
+ c2.index = index;
+ mvpp2_cls_c2_write(priv, &c2);
+ }
+
mvpp2_cls_port_init_flows(priv);
}
@@ -902,12 +934,12 @@ static void mvpp2_rss_port_c2_disable(struct mvpp2_port *port)
mvpp2_cls_c2_write(port->priv, &c2);
}
-void mvpp22_rss_enable(struct mvpp2_port *port)
+void mvpp22_port_rss_enable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_enable(port);
}
-void mvpp22_rss_disable(struct mvpp2_port *port)
+void mvpp22_port_rss_disable(struct mvpp2_port *port)
{
mvpp2_rss_port_c2_disable(port);
}
@@ -1037,7 +1069,7 @@ int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info)
return 0;
}
-void mvpp22_rss_port_init(struct mvpp2_port *port)
+void mvpp22_port_rss_init(struct mvpp2_port *port)
{
struct mvpp2 *priv = port->priv;
int i;
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
index 089f05f29891..96304ffc5d49 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.h
@@ -71,14 +71,6 @@ enum mvpp2_cls_field_id {
MVPP22_CLS_FIELD_L4DIP = 0x1e,
};
-enum mvpp2_cls_flow_seq {
- MVPP2_CLS_FLOW_SEQ_NORMAL = 0,
- MVPP2_CLS_FLOW_SEQ_FIRST1,
- MVPP2_CLS_FLOW_SEQ_FIRST2,
- MVPP2_CLS_FLOW_SEQ_LAST,
- MVPP2_CLS_FLOW_SEQ_MIDDLE
-};
-
/* Classifier C2 engine constants */
#define MVPP22_CLS_C2_TCAM_EN(data) ((data) << 16)
@@ -105,34 +97,25 @@ enum mvpp22_cls_c2_fwd_action {
struct mvpp2_cls_c2_entry {
u32 index;
+ /* TCAM lookup key */
u32 tcam[MVPP2_CLS_C2_TCAM_WORDS];
+ /* Actions to perform upon TCAM match */
u32 act;
+ /* Attributes relative to the actions to perform */
u32 attr[MVPP2_CLS_C2_ATTR_WORDS];
+ /* Entry validity */
+ u8 valid;
};
/* Classifier C2 engine entries */
-#define MVPP22_CLS_C2_RSS_ENTRY(port) (port)
-#define MVPP22_CLS_C2_N_ENTRIES MVPP2_MAX_PORTS
+#define MVPP22_CLS_C2_N_ENTRIES 256
-/* RSS flow entries in the flow table. We have 2 entries per port for RSS.
- *
- * The first performs a lookup using the C2 TCAM engine, to tag the
- * packet for software forwarding (needed for RSS), enable or disable RSS, and
- * assign the default rx queue.
- *
- * The second configures the hash generation, by specifying which fields of the
- * packet header are used to generate the hash, and specifies the relevant hash
- * engine to use.
- */
-#define MVPP22_RSS_FLOW_C2_OFFS 0
-#define MVPP22_RSS_FLOW_HASH_OFFS 1
-#define MVPP22_RSS_FLOW_SIZE (MVPP22_RSS_FLOW_HASH_OFFS + 1)
+/* Number of per-port dedicated entries in the C2 TCAM */
+#define MVPP22_CLS_C2_PORT_RANGE 8
-#define MVPP22_RSS_FLOW_C2(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_C2_OFFS)
-#define MVPP22_RSS_FLOW_HASH(port) ((port) * MVPP22_RSS_FLOW_SIZE + \
- MVPP22_RSS_FLOW_HASH_OFFS)
-#define MVPP22_RSS_FLOW_FIRST(port) MVPP22_RSS_FLOW_C2(port)
+#define MVPP22_CLS_C2_PORT_FIRST(p) (MVPP22_CLS_C2_N_ENTRIES - \
+ ((p) * MVPP22_CLS_C2_PORT_RANGE))
+#define MVPP22_CLS_C2_RSS_ENTRY(p) (MVPP22_CLS_C2_PORT_FIRST(p) - 1)
/* Packet flow ID */
enum mvpp2_prs_flow {
@@ -162,6 +145,15 @@ enum mvpp2_prs_flow {
MVPP2_FL_LAST,
};
+enum mvpp2_cls_lu_type {
+ MVPP2_CLS_LU_ALL = 0,
+};
+
+/* LU Type defined for all engines, and specified in the flow table */
+#define MVPP2_CLS_LU_TYPE_MASK 0x3f
+
+#define MVPP2_N_FLOWS (MVPP2_FL_LAST - MVPP2_FL_START)
+
struct mvpp2_cls_flow {
/* The L2-L4 traffic flow type */
int flow_type;
@@ -176,12 +168,37 @@ struct mvpp2_cls_flow {
struct mvpp2_prs_result_info prs_ri;
};
-#define MVPP2_N_FLOWS 52
+#define MVPP2_CLS_FLT_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
+#define MVPP2_CLS_FLT_FIRST(id) (((id) - MVPP2_FL_START) * \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW)
+#define MVPP2_CLS_FLT_C2_RSS_ENTRY(id) (MVPP2_CLS_FLT_FIRST(id))
+#define MVPP2_CLS_FLT_HASH_ENTRY(port, id) (MVPP2_CLS_FLT_C2_RSS_ENTRY(id) + (port) + 1)
+#define MVPP2_CLS_FLT_LAST(id) (MVPP2_CLS_FLT_FIRST(id) + \
+ MVPP2_CLS_FLT_ENTRIES_PER_FLOW - 1)
+
+/* Iterate on each classifier flow id. Sets 'i' to be the index of the first
+ * entry in the cls_flows table for each different flow_id.
+ * This relies on entries having the same flow_id in the cls_flows table being
+ * contiguous.
+ */
+#define for_each_cls_flow_id(i) \
+ for ((i) = 0; (i) < MVPP2_N_PRS_FLOWS; (i)++) \
+ if ((i) > 0 && \
+ cls_flows[(i)].flow_id == cls_flows[(i) - 1].flow_id) \
+ continue; \
+ else
+
+/* Iterate on each classifier flow that has a given flow_type. Sets 'i' to be
+ * the index of the first entry in the cls_flow table for each different flow_id
+ * that has the given flow_type. This allows to operate on all flows that
+ * matches a given ethtool flow type.
+ */
+#define for_each_cls_flow_id_with_type(i, type) \
+ for_each_cls_flow_id((i)) \
+ if (cls_flows[(i)].flow_type != (type)) \
+ continue; \
+ else
-#define MVPP2_ENTRIES_PER_FLOW (MVPP2_MAX_PORTS + 1)
-#define MVPP2_FLOW_C2_ENTRY(id) ((id) * MVPP2_ENTRIES_PER_FLOW)
-#define MVPP2_PORT_FLOW_HASH_ENTRY(port, id) ((id) * MVPP2_ENTRIES_PER_FLOW + \
- (port) + 1)
struct mvpp2_cls_flow_entry {
u32 index;
u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
@@ -194,11 +211,10 @@ struct mvpp2_cls_lookup_entry {
};
void mvpp22_rss_fill_table(struct mvpp2_port *port, u32 table);
+void mvpp22_port_rss_init(struct mvpp2_port *port);
-void mvpp22_rss_port_init(struct mvpp2_port *port);
-
-void mvpp22_rss_enable(struct mvpp2_port *port);
-void mvpp22_rss_disable(struct mvpp2_port *port);
+void mvpp22_port_rss_enable(struct mvpp2_port *port);
+void mvpp22_port_rss_disable(struct mvpp2_port *port);
int mvpp2_ethtool_rxfh_get(struct mvpp2_port *port, struct ethtool_rxnfc *info);
int mvpp2_ethtool_rxfh_set(struct mvpp2_port *port, struct ethtool_rxnfc *info);
@@ -213,7 +229,7 @@ int mvpp2_cls_flow_eng_get(struct mvpp2_cls_flow_entry *fe);
u16 mvpp2_flow_get_hek_fields(struct mvpp2_cls_flow_entry *fe);
-struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
+const struct mvpp2_cls_flow *mvpp2_cls_flow_get(int flow);
u32 mvpp2_cls_flow_hits(struct mvpp2 *priv, int index);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index f9744a61e5dd..0ee39ea47b6b 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -18,22 +18,48 @@ struct mvpp2_dbgfs_prs_entry {
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_c2_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_flow_entry {
int flow;
struct mvpp2 *priv;
};
+struct mvpp2_dbgfs_flow_tbl_entry {
+ int id;
+ struct mvpp2 *priv;
+};
+
struct mvpp2_dbgfs_port_flow_entry {
struct mvpp2_port *port;
struct mvpp2_dbgfs_flow_entry *dbg_fe;
};
+struct mvpp2_dbgfs_entries {
+ /* Entries for Header Parser debug info */
+ struct mvpp2_dbgfs_prs_entry prs_entries[MVPP2_PRS_TCAM_SRAM_SIZE];
+
+ /* Entries for Classifier C2 engine debug info */
+ struct mvpp2_dbgfs_c2_entry c2_entries[MVPP22_CLS_C2_N_ENTRIES];
+
+ /* Entries for Classifier Flow Table debug info */
+ struct mvpp2_dbgfs_flow_tbl_entry flt_entries[MVPP2_CLS_FLOWS_TBL_SIZE];
+
+ /* Entries for Classifier flows debug info */
+ struct mvpp2_dbgfs_flow_entry flow_entries[MVPP2_N_PRS_FLOWS];
+
+ /* Entries for per-port flows debug info */
+ struct mvpp2_dbgfs_port_flow_entry port_flow_entries[MVPP2_MAX_PORTS];
+};
+
static int mvpp2_dbgfs_flow_flt_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- int id = MVPP2_FLOW_C2_ENTRY(entry->flow);
+ struct mvpp2_dbgfs_flow_tbl_entry *entry = s->private;
- u32 hits = mvpp2_cls_flow_hits(entry->priv, id);
+ u32 hits = mvpp2_cls_flow_hits(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -58,7 +84,7 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_dec_hits);
static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
const char *flow_name;
f = mvpp2_cls_flow_get(entry->flow);
@@ -93,30 +119,12 @@ static int mvpp2_dbgfs_flow_type_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_flow_type_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_flow_type_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_flow_type_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_flow_type_fops = {
- .open = mvpp2_dbgfs_flow_type_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_flow_type_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_type);
static int mvpp2_dbgfs_flow_id_show(struct seq_file *s, void *unused)
{
- struct mvpp2_dbgfs_flow_entry *entry = s->private;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_dbgfs_flow_entry *entry = s->private;
+ const struct mvpp2_cls_flow *f;
f = mvpp2_cls_flow_get(entry->flow);
if (!f)
@@ -134,7 +142,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index;
u16 hash_opts;
@@ -142,7 +150,7 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -153,42 +161,21 @@ static int mvpp2_dbgfs_port_flow_hash_opt_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_port_flow_hash_opt_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_port_flow_hash_opt_show,
- inode->i_private);
-}
-
-static int mvpp2_dbgfs_port_flow_hash_opt_release(struct inode *inode,
- struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_port_flow_entry *flow_entry = seq->private;
-
- kfree(flow_entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_port_flow_hash_opt_fops = {
- .open = mvpp2_dbgfs_port_flow_hash_opt_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_port_flow_hash_opt_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_hash_opt);
static int mvpp2_dbgfs_port_flow_engine_show(struct seq_file *s, void *unused)
{
struct mvpp2_dbgfs_port_flow_entry *entry = s->private;
struct mvpp2_port *port = entry->port;
struct mvpp2_cls_flow_entry fe;
- struct mvpp2_cls_flow *f;
+ const struct mvpp2_cls_flow *f;
int flow_index, engine;
f = mvpp2_cls_flow_get(entry->dbg_fe->flow);
if (!f)
return -EINVAL;
- flow_index = MVPP2_PORT_FLOW_HASH_ENTRY(entry->port->id, f->flow_id);
+ flow_index = MVPP2_CLS_FLT_HASH_ENTRY(entry->port->id, f->flow_id);
mvpp2_cls_flow_read(port->priv, flow_index, &fe);
@@ -203,11 +190,10 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_port_flow_engine);
static int mvpp2_dbgfs_flow_c2_hits_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
u32 hits;
- hits = mvpp2_cls_c2_hit_count(port->priv,
- MVPP22_CLS_C2_RSS_ENTRY(port->id));
+ hits = mvpp2_cls_c2_hit_count(entry->priv, entry->id);
seq_printf(s, "%u\n", hits);
@@ -218,11 +204,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_hits);
static int mvpp2_dbgfs_flow_c2_rxq_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
u8 qh, ql;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
qh = (c2.attr[0] >> MVPP22_CLS_C2_ATTR0_QHIGH_OFFS) &
MVPP22_CLS_C2_ATTR0_QHIGH_MASK;
@@ -239,11 +225,11 @@ DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_flow_c2_rxq);
static int mvpp2_dbgfs_flow_c2_enable_show(struct seq_file *s, void *unused)
{
- struct mvpp2_port *port = s->private;
+ struct mvpp2_dbgfs_c2_entry *entry = s->private;
struct mvpp2_cls_c2_entry c2;
int enabled;
- mvpp2_cls_c2_read(port->priv, MVPP22_CLS_C2_RSS_ENTRY(port->id), &c2);
+ mvpp2_cls_c2_read(entry->priv, entry->id, &c2);
enabled = !!(c2.attr[2] & MVPP22_CLS_C2_ATTR2_RSS_EN);
@@ -456,25 +442,7 @@ static int mvpp2_dbgfs_prs_valid_show(struct seq_file *s, void *unused)
return 0;
}
-static int mvpp2_dbgfs_prs_valid_open(struct inode *inode, struct file *file)
-{
- return single_open(file, mvpp2_dbgfs_prs_valid_show, inode->i_private);
-}
-
-static int mvpp2_dbgfs_prs_valid_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct mvpp2_dbgfs_prs_entry *entry = seq->private;
-
- kfree(entry);
- return single_release(inode, file);
-}
-
-static const struct file_operations mvpp2_dbgfs_prs_valid_fops = {
- .open = mvpp2_dbgfs_prs_valid_open,
- .read = seq_read,
- .release = mvpp2_dbgfs_prs_valid_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mvpp2_dbgfs_prs_valid);
static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
struct mvpp2_port *port,
@@ -487,10 +455,7 @@ static int mvpp2_dbgfs_flow_port_init(struct dentry *parent,
if (IS_ERR(port_dir))
return PTR_ERR(port_dir);
- /* This will be freed by 'hash_opts' release op */
- port_entry = kmalloc(sizeof(*port_entry), GFP_KERNEL);
- if (!port_entry)
- return -ENOMEM;
+ port_entry = &port->priv->dbgfs_entries->port_flow_entries[port->id];
port_entry->port = port;
port_entry->dbg_fe = entry;
@@ -518,17 +483,11 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (!flow_entry_dir)
return -ENOMEM;
- /* This will be freed by 'type' release op */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->flow_entries[flow];
entry->flow = flow;
entry->priv = priv;
- debugfs_create_file("flow_hits", 0444, flow_entry_dir, entry,
- &mvpp2_dbgfs_flow_flt_hits_fops);
-
debugfs_create_file("dec_hits", 0444, flow_entry_dir, entry,
&mvpp2_dbgfs_flow_dec_hits_fops);
@@ -545,6 +504,7 @@ static int mvpp2_dbgfs_flow_entry_init(struct dentry *parent,
if (ret)
return ret;
}
+
return 0;
}
@@ -557,7 +517,7 @@ static int mvpp2_dbgfs_flow_init(struct dentry *parent, struct mvpp2 *priv)
if (!flow_dir)
return -ENOMEM;
- for (i = 0; i < MVPP2_N_FLOWS; i++) {
+ for (i = 0; i < MVPP2_N_PRS_FLOWS; i++) {
ret = mvpp2_dbgfs_flow_entry_init(flow_dir, priv, i);
if (ret)
return ret;
@@ -582,10 +542,7 @@ static int mvpp2_dbgfs_prs_entry_init(struct dentry *parent,
if (!prs_entry_dir)
return -ENOMEM;
- /* The 'valid' entry's ops will free that */
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
+ entry = &priv->dbgfs_entries->prs_entries[tid];
entry->tid = tid;
entry->priv = priv;
@@ -630,6 +587,98 @@ static int mvpp2_dbgfs_prs_init(struct dentry *parent, struct mvpp2 *priv)
return 0;
}
+static int mvpp2_dbgfs_c2_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_c2_entry *entry;
+ struct dentry *c2_entry_dir;
+ char c2_entry_name[10];
+
+ if (id >= MVPP22_CLS_C2_N_ENTRIES)
+ return -EINVAL;
+
+ sprintf(c2_entry_name, "%03d", id);
+
+ c2_entry_dir = debugfs_create_dir(c2_entry_name, parent);
+ if (!c2_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->c2_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_hits_fops);
+
+ debugfs_create_file("default_rxq", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_rxq_fops);
+
+ debugfs_create_file("rss_enable", 0444, c2_entry_dir, entry,
+ &mvpp2_dbgfs_flow_c2_enable_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_flow_tbl_entry_init(struct dentry *parent,
+ struct mvpp2 *priv, int id)
+{
+ struct mvpp2_dbgfs_flow_tbl_entry *entry;
+ struct dentry *flow_tbl_entry_dir;
+ char flow_tbl_entry_name[10];
+
+ if (id >= MVPP2_CLS_FLOWS_TBL_SIZE)
+ return -EINVAL;
+
+ sprintf(flow_tbl_entry_name, "%03d", id);
+
+ flow_tbl_entry_dir = debugfs_create_dir(flow_tbl_entry_name, parent);
+ if (!flow_tbl_entry_dir)
+ return -ENOMEM;
+
+ entry = &priv->dbgfs_entries->flt_entries[id];
+
+ entry->id = id;
+ entry->priv = priv;
+
+ debugfs_create_file("hits", 0444, flow_tbl_entry_dir, entry,
+ &mvpp2_dbgfs_flow_flt_hits_fops);
+
+ return 0;
+}
+
+static int mvpp2_dbgfs_cls_init(struct dentry *parent, struct mvpp2 *priv)
+{
+ struct dentry *cls_dir, *c2_dir, *flow_tbl_dir;
+ int i, ret;
+
+ cls_dir = debugfs_create_dir("classifier", parent);
+ if (!cls_dir)
+ return -ENOMEM;
+
+ c2_dir = debugfs_create_dir("c2", cls_dir);
+ if (!c2_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP22_CLS_C2_N_ENTRIES; i++) {
+ ret = mvpp2_dbgfs_c2_entry_init(c2_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ flow_tbl_dir = debugfs_create_dir("flow_table", cls_dir);
+ if (!flow_tbl_dir)
+ return -ENOMEM;
+
+ for (i = 0; i < MVPP2_CLS_FLOWS_TBL_SIZE; i++) {
+ ret = mvpp2_dbgfs_flow_tbl_entry_init(flow_tbl_dir, priv, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int mvpp2_dbgfs_port_init(struct dentry *parent,
struct mvpp2_port *port)
{
@@ -648,21 +697,14 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
debugfs_create_file("vid_filter", 0444, port_dir, port,
&mvpp2_dbgfs_port_vid_fops);
- debugfs_create_file("c2_hits", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_hits_fops);
-
- debugfs_create_file("default_rxq", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_rxq_fops);
-
- debugfs_create_file("rss_enable", 0444, port_dir, port,
- &mvpp2_dbgfs_flow_c2_enable_fops);
-
return 0;
}
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
+
+ kfree(priv->dbgfs_entries);
}
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
@@ -682,11 +724,18 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
return;
priv->dbgfs_dir = mvpp2_dir;
+ priv->dbgfs_entries = kzalloc(sizeof(*priv->dbgfs_entries), GFP_KERNEL);
+ if (!priv->dbgfs_entries)
+ goto err;
ret = mvpp2_dbgfs_prs_init(mvpp2_dir, priv);
if (ret)
goto err;
+ ret = mvpp2_dbgfs_cls_init(mvpp2_dir, priv);
+ if (ret)
+ goto err;
+
for (i = 0; i < priv->port_count; i++) {
ret = mvpp2_dbgfs_port_init(mvpp2_dir, priv->port_list[i]);
if (ret)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 25fbed2b8d94..f128ea22b339 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3741,9 +3741,9 @@ static int mvpp2_set_features(struct net_device *dev,
if (changed & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- mvpp22_rss_enable(port);
+ mvpp22_port_rss_enable(port);
else
- mvpp22_rss_disable(port);
+ mvpp22_port_rss_disable(port);
}
return 0;
@@ -4301,7 +4301,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_cls_port_config(port);
if (mvpp22_rss_is_supported())
- mvpp22_rss_port_init(port);
+ mvpp22_port_rss_init(port);
/* Provide an initial Rx packet size */
port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
@@ -4848,6 +4848,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct device_node *port_node = to_of_node(port_fwnode);
+ netdev_features_t features;
struct net_device *dev;
struct resource *res;
struct phylink *phylink;
@@ -4856,7 +4857,6 @@ static int mvpp2_port_probe(struct platform_device *pdev,
unsigned long flags = 0;
bool has_tx_irqs;
u32 id;
- int features;
int phy_mode;
int err, i;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 549d36497b8c..53abe925ecb1 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -767,7 +767,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
*/
wmb();
- if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
+ if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) ||
+ !netdev_xmit_more())
mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index ff8057ed97ee..8491db57b0b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -26,6 +26,7 @@ config MLX4_EN_DCB
config MLX4_CORE
tristate
depends on PCI
+ select NET_DEVLINK
default n
config MLX4_DEBUG
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 2cbd2bd7c67c..36a92b19e613 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -685,16 +685,15 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
if (netdev_get_num_tc(dev))
- return fallback(dev, skb, NULL);
+ return netdev_pick_tx(dev, skb, NULL);
- return fallback(dev, skb, NULL) % rings_p_up;
+ return netdev_pick_tx(dev, skb, NULL) % rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, const void *src,
@@ -1043,7 +1042,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
tx_info->nr_bytes,
- skb->xmit_more);
+ netdev_xmit_more());
real_size = (real_size / 16) & 0x3f;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 8137454e2534..630f15977f09 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -698,8 +698,7 @@ void mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring,
struct mlx4_en_rx_alloc *frame,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 6debffb8336b..9aca8086ee01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -5,6 +5,7 @@
config MLX5_CORE
tristate "Mellanox 5th generation network adapters (ConnectX series) core driver"
depends on PCI
+ select NET_DEVLINK
imply PTP_1588_CLOCK
imply VXLAN
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71c65cc17904..3e1ea8b42c77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -769,11 +769,10 @@ struct mlx5e_profile {
void mlx5e_build_ptys2ethtool_map(void);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi);
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
void mlx5e_completion_event(struct mlx5_core_cq *mcq);
void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
@@ -885,6 +884,53 @@ static inline bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version));
}
+static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_ETH(mdev, swp) &&
+ MLX5_CAP_ETH(mdev, swp_csum) && MLX5_CAP_ETH(mdev, swp_lso);
+}
+
+struct mlx5e_swp_spec {
+ __be16 l3_proto;
+ u8 l4_proto;
+ u8 is_tun;
+ __be16 tun_l3_proto;
+ u8 tun_l4_proto;
+};
+
+static inline void
+mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
+ struct mlx5e_swp_spec *swp_spec)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
+ if (swp_spec->l4_proto) {
+ eseg->swp_outer_l4_offset = skb_transport_offset(skb) / 2;
+ if (swp_spec->l4_proto == IPPROTO_UDP)
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ }
+
+ if (swp_spec->is_tun) {
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ if (swp_spec->tun_l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ } else { /* typically for ipsec when xfrm mode != XFRM_MODE_TUNNEL */
+ eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
+ if (swp_spec->l3_proto == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ }
+ switch (swp_spec->tun_l4_proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ /* fall through */
+ case IPPROTO_TCP:
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ }
+}
+
static inline void mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe **wqe,
u16 *pi)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 4ab0d030b544..633b117eb13e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -167,23 +167,23 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
/**
- * update_buffer_lossy()
- * max_mtu: netdev's max_mtu
- * pfc_en: <input> current pfc configuration
- * buffer: <input> current prio to buffer mapping
- * xoff: <input> xoff value
- * port_buffer: <output> port receive buffer configuration
- * change: <output>
+ * update_buffer_lossy - Update buffer configuration based on pfc
+ * @max_mtu: netdev's max_mtu
+ * @pfc_en: <input> current pfc configuration
+ * @buffer: <input> current prio to buffer mapping
+ * @xoff: <input> xoff value
+ * @port_buffer: <output> port receive buffer configuration
+ * @change: <output>
*
- * Update buffer configuration based on pfc configuraiton and priority
- * to buffer mapping.
- * Buffer's lossy bit is changed to:
- * lossless if there is at least one PFC enabled priority mapped to this buffer
- * lossy if all priorities mapped to this buffer are PFC disabled
+ * Update buffer configuration based on pfc configuraiton and
+ * priority to buffer mapping.
+ * Buffer's lossy bit is changed to:
+ * lossless if there is at least one PFC enabled priority
+ * mapped to this buffer lossy if all priorities mapped to
+ * this buffer are PFC disabled
*
- * Return:
- * Return 0 if no error.
- * Set change to true if buffer configuration is modified.
+ * @return: 0 if no error,
+ * sets change to true if buffer configuration was modified.
*/
static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 1dd225380a66..6da7c88742dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -40,6 +40,57 @@
#include "en_accel/tls_rxtx.h"
#include "en.h"
+#if IS_ENABLED(CONFIG_GENEVE)
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return mlx5_tx_swp_supported(mdev);
+}
+
+static inline void
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_swp_spec swp_spec = {};
+ unsigned int offset = 0;
+ __be16 l3_proto;
+ u8 l4_proto;
+
+ l3_proto = vlan_get_protocol(skb);
+ switch (l3_proto) {
+ case htons(ETH_P_IP):
+ l4_proto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
+ break;
+ default:
+ return;
+ }
+
+ if (l4_proto != IPPROTO_UDP ||
+ udp_hdr(skb)->dest != cpu_to_be16(GENEVE_UDP_PORT))
+ return;
+ swp_spec.l3_proto = l3_proto;
+ swp_spec.l4_proto = l4_proto;
+ swp_spec.is_tun = true;
+ if (inner_ip_hdr(skb)->version == 6) {
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
+ } else {
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
+ }
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+}
+
+#else
+static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
+{
+ return false;
+}
+
+#endif /* CONFIG_GENEVE */
+
static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 53608afd39b6..0dd17514caae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -136,7 +136,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg, u8 mode,
struct xfrm_offload *xo)
{
- u8 proto;
+ struct mlx5e_swp_spec swp_spec = {};
/* Tunnel Mode:
* SWP: OutL3 InL3 InL4
@@ -146,35 +146,23 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* SWP: OutL3 InL4
* InL3
* Pkt: MAC IP ESP L4
- *
- * Offsets are in 2-byte words, counting from start of frame
*/
- eseg->swp_outer_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L3_IPV6;
-
- if (mode == XFRM_MODE_TUNNEL) {
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ swp_spec.l3_proto = skb->protocol;
+ swp_spec.is_tun = mode == XFRM_MODE_TUNNEL;
+ if (swp_spec.is_tun) {
if (xo->proto == IPPROTO_IPV6) {
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = inner_ipv6_hdr(skb)->nexthdr;
+ swp_spec.tun_l3_proto = htons(ETH_P_IPV6);
+ swp_spec.tun_l4_proto = inner_ipv6_hdr(skb)->nexthdr;
} else {
- proto = inner_ip_hdr(skb)->protocol;
+ swp_spec.tun_l3_proto = htons(ETH_P_IP);
+ swp_spec.tun_l4_proto = inner_ip_hdr(skb)->protocol;
}
} else {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- proto = xo->proto;
- }
- switch (proto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- /* Fall through */
- case IPPROTO_TCP:
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- break;
+ swp_spec.tun_l3_proto = skb->protocol;
+ swp_spec.tun_l4_proto = xo->proto;
}
+
+ mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index be137d4a9169..439bf5953885 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -181,7 +181,6 @@ static void mlx5e_tls_complete_sync_skb(struct sk_buff *skb,
*/
nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->xmit_more = 1;
nskb->queue_mapping = skb->queue_mapping;
}
@@ -248,7 +247,7 @@ mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- mlx5e_sq_xmit(sq, nskb, *wqe, *pi);
+ mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true);
mlx5e_sq_fetch_wqe(sq, wqe, pi);
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5fdbd3190d9..e08a1eb04e22 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
+#include <net/geneve.h>
#include <linux/bpf.h>
#include <linux/if_bridge.h>
#include <net/page_pool.h>
@@ -43,6 +44,7 @@
#include "en_rep.h"
#include "en_accel/ipsec.h"
#include "en_accel/ipsec_rxtx.h"
+#include "en_accel/en_accel.h"
#include "en_accel/tls.h"
#include "accel/ipsec.h"
#include "accel/tls.h"
@@ -2173,10 +2175,13 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ bool allow_swp;
+ allow_swp = mlx5_geneve_tx_allowed(priv->mdev) ||
+ !!MLX5_IPSEC_DEV(priv->mdev);
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
- MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
+ MLX5_SET(sqc, sqc, allow_swp, allow_swp);
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -4103,6 +4108,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
return features;
+
+#if IS_ENABLED(CONFIG_GENEVE)
+ /* Support Geneve offload for default UDP port */
+ if (port == GENEVE_UDP_PORT && mlx5_geneve_tx_allowed(priv->mdev))
+ return features;
+#endif
}
out:
@@ -4674,7 +4685,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
- if (mlx5_vxlan_allowed(mdev->vxlan) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
+ MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4682,7 +4694,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
}
- if (mlx5_vxlan_allowed(mdev->vxlan)) {
+ if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index d75dc44eb2ff..ffc4a36551c8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -44,6 +44,7 @@
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/arp.h>
+#include <net/ipv6_stubs.h>
#include "en.h"
#include "en_rep.h"
#include "en_tc.h"
@@ -1827,6 +1828,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
struct pedit_headers {
struct ethhdr eth;
+ struct vlan_hdr vlan;
struct iphdr ip4;
struct ipv6hdr ip6;
struct tcphdr tcp;
@@ -1884,6 +1886,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
+ OFFLOAD(FIRST_VID, 2, vlan.h_vlan_TCI, 0),
OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
OFFLOAD(SIPV4, 4, ip4.saddr, 0),
@@ -2291,6 +2294,35 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
return (fsystem_guid == psystem_guid);
}
+static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
+ const struct flow_action_entry *act,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct pedit_headers_action *hdrs,
+ u32 *action, struct netlink_ext_ack *extack)
+{
+ u16 mask16 = VLAN_VID_MASK;
+ u16 val16 = act->vlan.vid & VLAN_VID_MASK;
+ const struct flow_action_entry pedit_act = {
+ .id = FLOW_ACTION_MANGLE,
+ .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
+ .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
+ .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
+ .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
+ };
+ int err;
+
+ if (act->vlan.prio) {
+ NL_SET_ERR_MSG_MOD(extack, "Setting VLAN prio is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr,
+ hdrs, NULL);
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return err;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -2326,6 +2358,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_KERNEL,
+ act, parse_attr, hdrs,
+ &action, extack);
+ if (err)
+ return err;
+
+ break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
act->csum_flags,
@@ -2544,8 +2585,7 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
}
break;
default:
- /* action is FLOW_ACT_VLAN_MANGLE */
- return -EOPNOTSUPP;
+ return -EINVAL;
}
attr->total_vlan = vlan_idx + 1;
@@ -2679,7 +2719,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ if (act->id == FLOW_ACTION_VLAN_PUSH &&
+ (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
+ /* Replace vlan pop+push with vlan modify */
+ action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
+ } else {
+ err = parse_tc_vlan_action(priv, act, attr, &action);
+ }
+ if (err)
+ return err;
+
+ attr->split_count = attr->out_count;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ err = add_vlan_rewrite_action(priv,
+ MLX5_FLOW_NAMESPACE_FDB,
+ act, parse_attr, hdrs,
+ &action, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 25a8f8260c14..40f3f98aa279 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/geneve.h>
#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
@@ -110,11 +111,10 @@ static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb
#endif
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
+ int channel_ix = netdev_pick_tx(dev, skb, NULL);
struct mlx5e_priv *priv = netdev_priv(dev);
- int channel_ix = fallback(dev, skb, NULL);
u16 num_channels;
int up = 0;
@@ -297,7 +297,8 @@ static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq,
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
- struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg)
+ struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
+ bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -320,14 +321,14 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
sq->stats->stopped++;
}
- if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ if (!xmit_more || netif_xmit_stopped(sq->txq))
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi)
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
@@ -360,7 +361,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -392,6 +393,10 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg = &wqe->eth;
dseg = wqe->data;
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
eseg->mss = mss;
@@ -419,7 +424,7 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, xmit_more);
return NETDEV_TX_OK;
@@ -445,7 +450,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!skb))
return NETDEV_TX_OK;
- return mlx5e_sq_xmit(sq, skb, wqe, pi);
+ return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
}
static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
@@ -655,7 +660,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
}
stats->bytes += num_bytes;
- stats->xmit_more += skb->xmit_more;
+ stats->xmit_more += netdev_xmit_more();
headlen = skb->len - ihs - skb->data_len;
ds_cnt += !!headlen;
@@ -700,7 +705,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
goto err_drop;
mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg);
+ num_dma, wi, cseg, false);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index bb6e5b5d9681..46a747f7c162 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -900,14 +900,12 @@ mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector)
}
EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask);
+#ifdef CONFIG_RFS_ACCEL
struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev)
{
-#ifdef CONFIG_RFS_ACCEL
return dev->priv.eq_table->rmap;
-#else
- return NULL;
-#endif
}
+#endif
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3f3cd32ae60a..e0ba59b5296f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -431,6 +431,9 @@ static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw,
return index;
}
+/* TODO: This mlx5e_tc function shouldn't be called by eswitch */
+void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 9b2d78ee22b8..fe770cd2151c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1287,13 +1287,13 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
{
- int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
+ int total_vports = MLX5_TOTAL_VPORTS(esw->dev);
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_eswitch_rep *rep;
u8 hw_id[ETH_ALEN], rep_type;
int vport;
- esw->offloads.vport_reps = kcalloc(total_vfs,
+ esw->offloads.vport_reps = kcalloc(total_vports,
sizeof(struct mlx5_eswitch_rep),
GFP_KERNEL);
if (!esw->offloads.vport_reps)
@@ -1523,8 +1523,6 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
return 0;
}
-void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
-
static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
{
mlx5e_tc_clean_fdb_peer_flows(esw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 5633f8572800..8212bfd05733 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -122,7 +122,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
/* Handle add/replace event */
if (fi->fib_nhs == 1) {
if (__mlx5_lag_is_active(ldev)) {
- struct net_device *nh_dev = fi->fib_nh[0].nh_dev;
+ struct net_device *nh_dev = fi->fib_nh[0].fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
mlx5_lag_set_port_affinity(ldev, ++i);
@@ -134,10 +134,10 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
return;
/* Verify next hops are ports of the same hca */
- if (!(fi->fib_nh[0].nh_dev == ldev->pf[0].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[1].netdev) &&
- !(fi->fib_nh[0].nh_dev == ldev->pf[1].netdev &&
- fi->fib_nh[1].nh_dev == ldev->pf[0].netdev)) {
+ if (!(fi->fib_nh[0].fib_nh_dev == ldev->pf[0].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[1].netdev) &&
+ !(fi->fib_nh[0].fib_nh_dev == ldev->pf[1].netdev &&
+ fi->fib_nh[1].fib_nh_dev == ldev->pf[0].netdev)) {
mlx5_core_warn(ldev->pf[0].dev, "Multipath offload require two ports of the same HCA\n");
return;
}
@@ -167,7 +167,7 @@ static void mlx5_lag_fib_nexthop_event(struct mlx5_lag *ldev,
/* nh added/removed */
if (event == FIB_EVENT_NH_DEL) {
- int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->nh_dev);
+ int i = mlx5_lag_dev_get_netdev_idx(ldev, fib_nh->fib_nh_dev);
if (i >= 0) {
i = (i + 1) % 2 + 1; /* peer port */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
index 40f4a19b1ce1..be69c1d7941a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/port_tun.c
@@ -80,10 +80,8 @@ void mlx5_init_port_tun_entropy(struct mlx5_tun_entropy *tun_entropy,
mlx5_query_port_tun_entropy(mdev, &entropy_flags);
tun_entropy->num_enabling_entries = 0;
tun_entropy->num_disabling_entries = 0;
- tun_entropy->enabled = entropy_flags.calc_enabled;
- tun_entropy->enabled =
- (entropy_flags.calc_supported) ?
- entropy_flags.calc_enabled : true;
+ tun_entropy->enabled = entropy_flags.calc_supported ?
+ entropy_flags.calc_enabled : true;
}
static int mlx5_set_entropy(struct mlx5_tun_entropy *tun_entropy,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
index 9a8fd762167b..b9d4f4e19ff9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mlx5/driver.h>
+#include <net/vxlan.h>
#include "mlx5_core.h"
#include "vxlan.h"
@@ -204,8 +205,8 @@ struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
spin_lock_init(&vxlan->lock);
hash_init(vxlan->htable);
- /* Hardware adds 4789 by default */
- mlx5_vxlan_add_port(vxlan, 4789);
+ /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */
+ mlx5_vxlan_add_port(vxlan, IANA_VXLAN_UDP_PORT);
return vxlan;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 7b331674622c..6fb99be60584 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -111,7 +111,6 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
@@ -176,6 +175,11 @@ int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw);
void mlx5e_init(void);
void mlx5e_cleanup(void);
+static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
+{
+ return pci_num_vf(dev->pdev) ? true : false;
+}
+
static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
{
/* LACP owner conditions:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b23fa8d2d60..a249b3c3843d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -36,13 +36,6 @@
#include "mlx5_core.h"
#include "eswitch.h"
-bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev)
-{
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
-
- return !!sriov->num_vfs;
-}
-
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
@@ -151,33 +144,10 @@ out:
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
-static int mlx5_pci_enable_sriov(struct pci_dev *pdev, int num_vfs)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
-
- if (pci_num_vf(pdev)) {
- mlx5_core_warn(dev, "Unable to enable pci sriov, already enabled\n");
- return -EBUSY;
- }
-
- err = pci_enable_sriov(pdev, num_vfs);
- if (err)
- mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
-
- return err;
-}
-
-static void mlx5_pci_disable_sriov(struct pci_dev *pdev)
-{
- pci_disable_sriov(pdev);
-}
-
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- int err = 0;
+ int err;
err = mlx5_device_enable_sriov(dev, num_vfs);
if (err) {
@@ -185,42 +155,37 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
return err;
}
- err = mlx5_pci_enable_sriov(pdev, num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
if (err) {
- mlx5_core_warn(dev, "mlx5_pci_enable_sriov failed : %d\n", err);
+ mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
mlx5_device_disable_sriov(dev);
- return err;
}
-
- sriov->num_vfs = num_vfs;
-
- return 0;
+ return err;
}
static void mlx5_sriov_disable(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_sriov *sriov = &dev->priv.sriov;
- mlx5_pci_disable_sriov(pdev);
+ pci_disable_sriov(pdev);
mlx5_device_disable_sriov(dev);
- sriov->num_vfs = 0;
}
int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err = 0;
mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
- if (!mlx5_core_is_pf(dev))
- return -EPERM;
if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
else
mlx5_sriov_disable(pdev);
+ if (!err)
+ sriov->num_vfs = num_vfs;
return err ? err : num_vfs;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 9c195dfed031..b6b3ff0fe17f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -4,6 +4,7 @@
config MLXSW_CORE
tristate "Mellanox Technologies Switch ASICs support"
+ select NET_DEVLINK
---help---
This driver supports Mellanox Technologies Switch ASICs family.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index d23d53c0e284..027e393c7cb2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1718,7 +1718,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len)
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
struct mlxsw_core_port *mlxsw_core_port =
@@ -1727,6 +1731,9 @@ int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
int err;
mlxsw_core_port->local_port = local_port;
+ devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
+ port_number, split, split_port_subnumber,
+ switch_id, switch_id_len);
err = devlink_port_register(devlink, devlink_port, local_port);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
@@ -1746,17 +1753,13 @@ void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
EXPORT_SYMBOL(mlxsw_core_port_fini);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber)
+ void *port_driver_priv, struct net_device *dev)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
mlxsw_core_port->port_driver_priv = port_driver_priv;
- devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
- port_number, split, split_port_subnumber);
devlink_port_type_eth_set(devlink_port, dev);
}
EXPORT_SYMBOL(mlxsw_core_port_eth_set);
@@ -1796,16 +1799,18 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_type_get);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len)
+
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port)
{
struct mlxsw_core_port *mlxsw_core_port =
&mlxsw_core->ports[local_port];
struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
- return devlink_port_get_phys_port_name(devlink_port, name, len);
+ return devlink_port;
}
-EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
+EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 8ec53f027575..d51dfc3560b6 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -164,20 +164,23 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port);
-int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port,
+ u32 port_number, bool split,
+ u32 split_port_subnumber,
+ const unsigned char *switch_id,
+ unsigned char switch_id_len);
void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port);
void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
- void *port_driver_priv, struct net_device *dev,
- u32 port_number, bool split,
- u32 split_port_subnumber);
+ void *port_driver_priv, struct net_device *dev);
void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
void *port_driver_priv);
enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
-int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
- u8 local_port, char *name, size_t len);
+struct devlink_port *
+mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
+ u8 local_port);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index 00c390024350..cf2114273b72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -51,33 +51,20 @@ static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
return 0;
}
-static int
-mlxsw_m_port_get_phys_port_name(struct net_device *dev, char *name, size_t len)
-{
- struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
- struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
- u8 local_port = mlxsw_m_port->local_port;
-
- return mlxsw_core_port_get_phys_port_name(core, local_port, name, len);
-}
-
-static int mlxsw_m_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_m_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- ppid->id_len = sizeof(mlxsw_m->base_mac);
- memcpy(&ppid->id, &mlxsw_m->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_m->core,
+ mlxsw_m_port->local_port);
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
.ndo_open = mlxsw_m_port_dummy_open_stop,
.ndo_stop = mlxsw_m_port_dummy_open_stop,
- .ndo_get_phys_port_name = mlxsw_m_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_m_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
static int mlxsw_m_get_module_info(struct net_device *netdev,
@@ -150,7 +137,10 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_m->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_m->core, local_port,
+ module + 1, false, 0,
+ mlxsw_m->base_mac,
+ sizeof(mlxsw_m->base_mac));
if (err) {
dev_err(mlxsw_m->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -190,7 +180,7 @@ mlxsw_m_port_create(struct mlxsw_m *mlxsw_m, u8 local_port, u8 module)
}
mlxsw_core_port_eth_set(mlxsw_m->core, mlxsw_m_port->local_port,
- mlxsw_m_port, dev, module + 1, false, 0);
+ mlxsw_m_port, dev);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9eb63300c1d3..fc325f1213fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1254,16 +1254,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
-static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sp_port->mlxsw_sp->core,
- mlxsw_sp_port->local_port,
- name, len);
-}
-
static struct mlxsw_sp_port_mall_tc_entry *
mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
unsigned long cookie) {
@@ -1714,16 +1704,14 @@ static int mlxsw_sp_set_features(struct net_device *dev,
mlxsw_sp_feature_hw_tc);
}
-static int mlxsw_sp_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sp_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- ppid->id_len = sizeof(mlxsw_sp->base_mac);
- memcpy(&ppid->id, &mlxsw_sp->base_mac, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
+ mlxsw_sp_port->local_port);
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
@@ -1739,9 +1727,8 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
- .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
.ndo_set_features = mlxsw_sp_set_features,
- .ndo_get_port_parent_id = mlxsw_sp_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -3391,7 +3378,10 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
struct net_device *dev;
int err;
- err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
+ module + 1, split, lane / width,
+ mlxsw_sp->base_mac,
+ sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -3573,8 +3563,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
- mlxsw_sp_port, dev, module + 1,
- mlxsw_sp_port->split, lane / width);
+ mlxsw_sp_port, dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 8811f6513e36..e993159e8e4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -216,7 +216,6 @@ struct mlxsw_sp_acl_tcam_vregion {
struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
} rehash;
struct mlxsw_sp *mlxsw_sp;
- bool failed_rollback; /* Indicates failed rollback during migration */
unsigned int ref_count;
};
@@ -1256,11 +1255,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
- if (IS_ERR(new_chunk)) {
- if (ctx->this_is_rollback)
- vchunk->vregion->failed_rollback = true;
+ if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
- }
vchunk->chunk2 = vchunk->chunk;
vchunk->chunk = new_chunk;
ctx->current_vchunk = vchunk;
@@ -1318,8 +1314,13 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
vchunk->chunk, credits);
if (err) {
- if (ctx->this_is_rollback)
+ if (ctx->this_is_rollback) {
+ /* Save the ventry which we ended with and try
+ * to continue later on.
+ */
+ ctx->start_ventry = ventry;
return err;
+ }
/* Swap the chunk and chunk2 pointers so the follow-up
* rollback call will see the original chunk pointer
* in vchunk->chunk.
@@ -1397,8 +1398,12 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
- if (err2)
- vregion->failed_rollback = true;
+ if (err2) {
+ trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
+ vregion);
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
+ /* Let the rollback to be continued later on. */
+ }
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
@@ -1423,8 +1428,6 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
int err;
trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
- if (vregion->failed_rollback)
- return -EBUSY;
hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
if (IS_ERR(hints_priv))
@@ -1471,11 +1474,9 @@ mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
- if (!vregion->failed_rollback) {
- vregion->region2 = NULL;
- mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
- mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
- }
+ vregion->region2 = NULL;
+ mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
+ mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
ops->region_rehash_hints_put(ctx->hints_priv);
ctx->hints_priv = NULL;
}
@@ -1506,11 +1507,6 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
- if (vregion->failed_rollback) {
- trace_mlxsw_sp_acl_tcam_vregion_rehash_dis(mlxsw_sp,
- vregion);
- dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
- }
}
if (*credits >= 0)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 52fed8c7bf1e..0ba9daa05a52 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2873,12 +2873,13 @@ mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
return false;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct in6_addr *gw;
int ifindex, weight;
- ifindex = mlxsw_sp_rt6->rt->fib6_nh.nh_dev->ifindex;
- weight = mlxsw_sp_rt6->rt->fib6_nh.nh_weight;
- gw = &mlxsw_sp_rt6->rt->fib6_nh.nh_gw;
+ ifindex = fib6_nh->fib_nh_dev->ifindex;
+ weight = fib6_nh->fib_nh_weight;
+ gw = &fib6_nh->fib_nh_gw6;
if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
weight))
return false;
@@ -2944,7 +2945,7 @@ mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
struct net_device *dev;
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
- dev = mlxsw_sp_rt6->rt->fib6_nh.nh_dev;
+ dev = mlxsw_sp_rt6->rt->fib6_nh.fib_nh_dev;
val ^= dev->ifindex;
}
@@ -3610,7 +3611,7 @@ static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib_nh *fib_nh,
enum mlxsw_sp_ipip_type *p_ipipt)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
return dev &&
fib_nh->nh_parent->fib_type == RTN_UNICAST &&
@@ -3637,7 +3638,7 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
struct fib_nh *fib_nh)
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
@@ -3681,18 +3682,18 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct net_device *dev = fib_nh->nh_dev;
+ struct net_device *dev = fib_nh->fib_nh_dev;
struct in_device *in_dev;
int err;
nh->nh_grp = nh_grp;
nh->key.fib_nh = fib_nh;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- nh->nh_weight = fib_nh->nh_weight;
+ nh->nh_weight = fib_nh->fib_nh_weight;
#else
nh->nh_weight = 1;
#endif
- memcpy(&nh->gw_addr, &fib_nh->nh_gw, sizeof(fib_nh->nh_gw));
+ memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
if (err)
return err;
@@ -3705,7 +3706,7 @@ static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
in_dev = __in_dev_get_rtnl(dev);
if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
- fib_nh->nh_flags & RTNH_F_LINKDOWN)
+ fib_nh->fib_nh_flags & RTNH_F_LINKDOWN)
return 0;
err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
@@ -3804,7 +3805,7 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib_info *fi)
{
- return fi->fib_nh->nh_scope == RT_SCOPE_LINK ||
+ return fi->fib_nh->fib_nh_scope == RT_SCOPE_LINK ||
mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fi->fib_nh, NULL);
}
@@ -3946,9 +3947,9 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh.nh_dev &&
+ if (nh->rif && nh->rif->dev == rt->fib6_nh.fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
- &rt->fib6_nh.nh_gw))
+ &rt->fib6_nh.fib_nh_gw6))
return nh;
continue;
}
@@ -3966,7 +3967,7 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP) {
- nh_grp->nexthops->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh_grp->nexthops->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
@@ -3974,9 +3975,9 @@ mlxsw_sp_fib4_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
if (nh->offloaded)
- nh->key.fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -3992,7 +3993,7 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
- nh->key.fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4008,19 +4009,20 @@ mlxsw_sp_fib6_entry_offload_set(struct mlxsw_sp_fib_entry *fib_entry)
if (fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_LOCAL ||
fib_entry->type == MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE) {
list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
- list)->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ list)->rt->fib6_nh.fib_nh_flags |= RTNH_F_OFFLOAD;
return;
}
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+ struct fib6_nh *fib6_nh = &mlxsw_sp_rt6->rt->fib6_nh;
struct mlxsw_sp_nexthop *nh;
nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
if (nh && nh->offloaded)
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags |= RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
else
- mlxsw_sp_rt6->rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4035,7 +4037,7 @@ mlxsw_sp_fib6_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- rt->fib6_nh.nh_flags &= ~RTNH_F_OFFLOAD;
+ rt->fib6_nh.fib_nh_flags &= ~RTNH_F_OFFLOAD;
}
}
@@ -4913,7 +4915,7 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
{
/* RTF_CACHE routes are ignored */
- return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+ return !(rt->fib6_flags & RTF_ADDRCONF) && rt->fib6_nh.fib_nh_has_gw;
}
static struct fib6_info *
@@ -4972,8 +4974,8 @@ static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt,
enum mlxsw_sp_ipip_type *ret)
{
- return rt->fib6_nh.nh_dev &&
- mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.nh_dev, ret);
+ return rt->fib6_nh.fib_nh_dev &&
+ mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh.fib_nh_dev, ret);
}
static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
@@ -4983,7 +4985,7 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
struct mlxsw_sp_rif *rif;
int err;
@@ -5026,11 +5028,11 @@ static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
const struct fib6_info *rt)
{
- struct net_device *dev = rt->fib6_nh.nh_dev;
+ struct net_device *dev = rt->fib6_nh.fib_nh_dev;
nh->nh_grp = nh_grp;
- nh->nh_weight = rt->fib6_nh.nh_weight;
- memcpy(&nh->gw_addr, &rt->fib6_nh.nh_gw, sizeof(nh->gw_addr));
+ nh->nh_weight = rt->fib6_nh.fib_nh_weight;
+ memcpy(&nh->gw_addr, &rt->fib6_nh.fib_nh_gw6, sizeof(nh->gw_addr));
mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
@@ -5053,7 +5055,7 @@ static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
const struct fib6_info *rt)
{
- return rt->fib6_flags & RTF_GATEWAY ||
+ return rt->fib6_nh.fib_nh_has_gw ||
mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchib.c b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
index bcf2e79a21c8..0d9356b3f65d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchib.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchib.c
@@ -30,6 +30,7 @@ struct mlxsw_sib {
struct mlxsw_sib_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
+ u8 hw_id[ETH_ALEN];
};
struct mlxsw_sib_port {
@@ -102,6 +103,18 @@ mlxsw_sib_tx_v1_hdr_construct(struct sk_buff *skb,
mlxsw_tx_v1_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
}
+static int mlxsw_sib_hw_id_get(struct mlxsw_sib *mlxsw_sib)
+{
+ char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
+ int err;
+
+ err = mlxsw_reg_query(mlxsw_sib->core, MLXSW_REG(spad), spad_pl);
+ if (err)
+ return err;
+ mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sib->hw_id);
+ return 0;
+}
+
static int
mlxsw_sib_port_admin_status_set(struct mlxsw_sib_port *mlxsw_sib_port,
bool is_up)
@@ -267,7 +280,9 @@ static int mlxsw_sib_port_create(struct mlxsw_sib *mlxsw_sib, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sib->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sib->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sib->hw_id, sizeof(mlxsw_sib->hw_id));
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
@@ -439,6 +454,12 @@ static int mlxsw_sib_init(struct mlxsw_core *mlxsw_core,
mlxsw_sib->core = mlxsw_core;
mlxsw_sib->bus_info = mlxsw_bus_info;
+ err = mlxsw_sib_hw_id_get(mlxsw_sib);
+ if (err) {
+ dev_err(mlxsw_sib->bus_info->dev, "Failed to get switch HW ID\n");
+ return err;
+ }
+
err = mlxsw_sib_ports_create(mlxsw_sib);
if (err) {
dev_err(mlxsw_sib->bus_info->dev, "Failed to create ports\n");
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 533fe6235b7c..fc4f19167262 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -379,26 +379,14 @@ mlxsw_sx_port_get_stats64(struct net_device *dev,
stats->tx_dropped = tx_dropped;
}
-static int mlxsw_sx_port_get_phys_port_name(struct net_device *dev, char *name,
- size_t len)
-{
- struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
-
- return mlxsw_core_port_get_phys_port_name(mlxsw_sx_port->mlxsw_sx->core,
- mlxsw_sx_port->local_port,
- name, len);
-}
-
-static int mlxsw_sx_port_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static struct devlink_port *
+mlxsw_sx_port_get_devlink_port(struct net_device *dev)
{
struct mlxsw_sx_port *mlxsw_sx_port = netdev_priv(dev);
struct mlxsw_sx *mlxsw_sx = mlxsw_sx_port->mlxsw_sx;
- ppid->id_len = sizeof(mlxsw_sx->hw_id);
- memcpy(&ppid->id, &mlxsw_sx->hw_id, ppid->id_len);
-
- return 0;
+ return mlxsw_core_port_devlink_port_get(mlxsw_sx->core,
+ mlxsw_sx_port->local_port);
}
static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
@@ -407,8 +395,7 @@ static const struct net_device_ops mlxsw_sx_port_netdev_ops = {
.ndo_start_xmit = mlxsw_sx_port_xmit,
.ndo_change_mtu = mlxsw_sx_port_change_mtu,
.ndo_get_stats64 = mlxsw_sx_port_get_stats64,
- .ndo_get_phys_port_name = mlxsw_sx_port_get_phys_port_name,
- .ndo_get_port_parent_id = mlxsw_sx_port_get_port_parent_id,
+ .ndo_get_devlink_port = mlxsw_sx_port_get_devlink_port,
};
static void mlxsw_sx_port_get_drvinfo(struct net_device *dev,
@@ -1102,7 +1089,7 @@ static int __mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
}
mlxsw_core_port_eth_set(mlxsw_sx->core, mlxsw_sx_port->local_port,
- mlxsw_sx_port, dev, module + 1, false, 0);
+ mlxsw_sx_port, dev);
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
@@ -1127,7 +1114,9 @@ static int mlxsw_sx_port_eth_create(struct mlxsw_sx *mlxsw_sx, u8 local_port,
{
int err;
- err = mlxsw_core_port_init(mlxsw_sx->core, local_port);
+ err = mlxsw_core_port_init(mlxsw_sx->core, local_port,
+ module + 1, false, 0,
+ mlxsw_sx->hw_id, sizeof(mlxsw_sx->hw_id));
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 8f72587b5a2c..0567e4f387a5 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Microchip ENC28J60 ethernet driver (MAC + PHY)
*
@@ -5,11 +6,6 @@
* Author: Claudio Lanconelli <lanconelli.claudio@eptar.com>
* based on enc28j60.c written by David Anders for 2.4 kernel version
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* $Id: enc28j60.c,v 1.22 2007/12/20 10:47:01 claudio Exp $
*/
@@ -18,9 +14,9 @@
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
+#include <linux/property.h>
#include <linux/string.h>
#include <linux/errno.h>
-#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
@@ -28,7 +24,6 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/of_net.h>
#include "enc28j60_hw.h"
@@ -41,10 +36,11 @@
(NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK)
/* Buffer size required for the largest SPI transfer (i.e., reading a
- * frame). */
+ * frame).
+ */
#define SPI_TRANSFER_BUF_LEN (4 + MAX_FRAMELEN)
-#define TX_TIMEOUT (4 * HZ)
+#define TX_TIMEOUT (4 * HZ)
/* Max TX retries in case of collision as suggested by errata datasheet */
#define MAX_TX_RETRYCOUNT 16
@@ -83,11 +79,12 @@ static struct {
/*
* SPI read buffer
- * wait for the SPI transfer and copy received data to destination
+ * Wait for the SPI transfer and copy received data to destination.
*/
static int
spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
+ struct device *dev = &priv->spi->dev;
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
struct spi_transfer tx = {
@@ -113,8 +110,8 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
@@ -122,9 +119,9 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
/*
* SPI write buffer
*/
-static int spi_write_buf(struct enc28j60_net *priv, int len,
- const u8 *data)
+static int spi_write_buf(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
int ret;
if (len > SPI_TRANSFER_BUF_LEN - 1 || len <= 0)
@@ -134,8 +131,8 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
memcpy(&priv->spi_transfer_buf[1], data, len);
ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
}
return ret;
}
@@ -143,9 +140,9 @@ static int spi_write_buf(struct enc28j60_net *priv, int len,
/*
* basic SPI read operation
*/
-static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
- u8 addr)
+static u8 spi_read_op(struct enc28j60_net *priv, u8 op, u8 addr)
{
+ struct device *dev = &priv->spi->dev;
u8 tx_buf[2];
u8 rx_buf[4];
u8 val = 0;
@@ -159,8 +156,8 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
tx_buf[0] = op | (addr & ADDR_MASK);
ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen);
if (ret)
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
else
val = rx_buf[slen - 1];
@@ -170,28 +167,25 @@ static u8 spi_read_op(struct enc28j60_net *priv, u8 op,
/*
* basic SPI write operation
*/
-static int spi_write_op(struct enc28j60_net *priv, u8 op,
- u8 addr, u8 val)
+static int spi_write_op(struct enc28j60_net *priv, u8 op, u8 addr, u8 val)
{
+ struct device *dev = &priv->spi->dev;
int ret;
priv->spi_transfer_buf[0] = op | (addr & ADDR_MASK);
priv->spi_transfer_buf[1] = val;
ret = spi_write(priv->spi, priv->spi_transfer_buf, 2);
if (ret && netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n",
- __func__, ret);
+ dev_printk(KERN_DEBUG, dev, "%s() failed: ret = %d\n",
+ __func__, ret);
return ret;
}
static void enc28j60_soft_reset(struct enc28j60_net *priv)
{
- if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET);
/* Errata workaround #1, CLKRDY check is unreliable,
- * delay at least 1 mS instead */
+ * delay at least 1 ms instead */
udelay(2000);
}
@@ -203,7 +197,7 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
u8 b = (addr & BANK_MASK) >> 5;
/* These registers (EIE, EIR, ESTAT, ECON2, ECON1)
- * are present in all banks, no need to switch bank
+ * are present in all banks, no need to switch bank.
*/
if (addr >= EIE && addr <= ECON1)
return;
@@ -242,15 +236,13 @@ static void enc28j60_set_bank(struct enc28j60_net *priv, u8 addr)
/*
* Register bit field Set
*/
-static void nolock_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_SET, addr, mask);
}
-static void locked_reg_bfset(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfset(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfset(priv, addr, mask);
@@ -260,15 +252,13 @@ static void locked_reg_bfset(struct enc28j60_net *priv,
/*
* Register bit field Clear
*/
-static void nolock_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void nolock_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
enc28j60_set_bank(priv, addr);
spi_write_op(priv, ENC28J60_BIT_FIELD_CLR, addr, mask);
}
-static void locked_reg_bfclr(struct enc28j60_net *priv,
- u8 addr, u8 mask)
+static void locked_reg_bfclr(struct enc28j60_net *priv, u8 addr, u8 mask)
{
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, addr, mask);
@@ -278,15 +268,13 @@ static void locked_reg_bfclr(struct enc28j60_net *priv,
/*
* Register byte read
*/
-static int nolock_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regb_read(struct enc28j60_net *priv, u8 address)
{
enc28j60_set_bank(priv, address);
return spi_read_op(priv, ENC28J60_READ_CTRL_REG, address);
}
-static int locked_regb_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regb_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -300,8 +288,7 @@ static int locked_regb_read(struct enc28j60_net *priv,
/*
* Register word read
*/
-static int nolock_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int nolock_regw_read(struct enc28j60_net *priv, u8 address)
{
int rl, rh;
@@ -312,8 +299,7 @@ static int nolock_regw_read(struct enc28j60_net *priv,
return (rh << 8) | rl;
}
-static int locked_regw_read(struct enc28j60_net *priv,
- u8 address)
+static int locked_regw_read(struct enc28j60_net *priv, u8 address)
{
int ret;
@@ -327,15 +313,13 @@ static int locked_regw_read(struct enc28j60_net *priv,
/*
* Register byte write
*/
-static void nolock_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void nolock_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, data);
}
-static void locked_regb_write(struct enc28j60_net *priv,
- u8 address, u8 data)
+static void locked_regb_write(struct enc28j60_net *priv, u8 address, u8 data)
{
mutex_lock(&priv->lock);
nolock_regb_write(priv, address, data);
@@ -345,8 +329,7 @@ static void locked_regb_write(struct enc28j60_net *priv,
/*
* Register word write
*/
-static void nolock_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void nolock_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
enc28j60_set_bank(priv, address);
spi_write_op(priv, ENC28J60_WRITE_CTRL_REG, address, (u8) data);
@@ -354,8 +337,7 @@ static void nolock_regw_write(struct enc28j60_net *priv,
(u8) (data >> 8));
}
-static void locked_regw_write(struct enc28j60_net *priv,
- u8 address, u16 data)
+static void locked_regw_write(struct enc28j60_net *priv, u8 address, u16 data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, address, data);
@@ -364,20 +346,23 @@ static void locked_regw_write(struct enc28j60_net *priv,
/*
* Buffer memory read
- * Select the starting address and execute a SPI buffer read
+ * Select the starting address and execute a SPI buffer read.
*/
-static void enc28j60_mem_read(struct enc28j60_net *priv,
- u16 addr, int len, u8 *data)
+static void enc28j60_mem_read(struct enc28j60_net *priv, u16 addr, int len,
+ u8 *data)
{
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERDPTL, addr);
#ifdef CONFIG_ENC28J60_WRITEVERIFY
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
u16 reg;
+
reg = nolock_regw_read(priv, ERDPTL);
if (reg != addr)
- printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT "
- "(0x%04x - 0x%04x)\n", __func__, reg, addr);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() error writing ERDPT (0x%04x - 0x%04x)\n",
+ __func__, reg, addr);
}
#endif
spi_read_buf(priv, len, data);
@@ -390,6 +375,8 @@ static void enc28j60_mem_read(struct enc28j60_net *priv,
static void
enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
/* Set the write pointer to start of transmit buffer area */
nolock_regw_write(priv, EWRPTL, TXSTART_INIT);
@@ -398,9 +385,9 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
u16 reg;
reg = nolock_regw_read(priv, EWRPTL);
if (reg != TXSTART_INIT)
- printk(KERN_DEBUG DRV_NAME
- ": %s() ERWPT:0x%04x != 0x%04x\n",
- __func__, reg, TXSTART_INIT);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERWPT:0x%04x != 0x%04x\n",
+ __func__, reg, TXSTART_INIT);
}
#endif
/* Set the TXND pointer to correspond to the packet size given */
@@ -408,30 +395,28 @@ enc28j60_packet_write(struct enc28j60_net *priv, int len, const u8 *data)
/* write per-packet control byte */
spi_write_op(priv, ENC28J60_WRITE_BUF_MEM, 0, 0x00);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after control byte ERWPT:0x%04x\n",
- __func__, nolock_regw_read(priv, EWRPTL));
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after control byte ERWPT:0x%04x\n",
+ __func__, nolock_regw_read(priv, EWRPTL));
/* copy the packet into the transmit buffer */
spi_write_buf(priv, len, data);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() after write packet ERWPT:0x%04x, len=%d\n",
- __func__, nolock_regw_read(priv, EWRPTL), len);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() after write packet ERWPT:0x%04x, len=%d\n",
+ __func__, nolock_regw_read(priv, EWRPTL), len);
mutex_unlock(&priv->lock);
}
-static unsigned long msec20_to_jiffies;
-
static int poll_ready(struct enc28j60_net *priv, u8 reg, u8 mask, u8 val)
{
- unsigned long timeout = jiffies + msec20_to_jiffies;
+ struct device *dev = &priv->spi->dev;
+ unsigned long timeout = jiffies + msecs_to_jiffies(20);
/* 20 msec timeout read */
while ((nolock_regb_read(priv, reg) & mask) != val) {
if (time_after(jiffies, timeout)) {
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev,
- "reg %02x ready timeout!\n", reg);
+ dev_dbg(dev, "reg %02x ready timeout!\n", reg);
return -ETIMEDOUT;
}
cpu_relax();
@@ -449,7 +434,7 @@ static int wait_phy_ready(struct enc28j60_net *priv)
/*
* PHY register read
- * PHY registers are not accessed directly, but through the MII
+ * PHY registers are not accessed directly, but through the MII.
*/
static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
{
@@ -465,7 +450,7 @@ static u16 enc28j60_phy_read(struct enc28j60_net *priv, u8 address)
/* quit reading */
nolock_regb_write(priv, MICMD, 0x00);
/* return the data */
- ret = nolock_regw_read(priv, MIRDL);
+ ret = nolock_regw_read(priv, MIRDL);
mutex_unlock(&priv->lock);
return ret;
@@ -494,13 +479,13 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
{
int ret;
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
mutex_lock(&priv->lock);
if (!priv->hw_enable) {
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME
- ": %s: Setting MAC address to %pM\n",
- ndev->name, ndev->dev_addr);
+ dev_info(dev, "%s: Setting MAC address to %pM\n",
+ ndev->name, ndev->dev_addr);
/* NOTE: MAC address in ENC28J60 is byte-backward */
nolock_regb_write(priv, MAADR5, ndev->dev_addr[0]);
nolock_regb_write(priv, MAADR4, ndev->dev_addr[1]);
@@ -511,9 +496,9 @@ static int enc28j60_set_hw_macaddr(struct net_device *ndev)
ret = 0;
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME
- ": %s() Hardware must be disabled to set "
- "Mac address\n", __func__);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() Hardware must be disabled to set Mac address\n",
+ __func__);
ret = -EBUSY;
}
mutex_unlock(&priv->lock);
@@ -532,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ ether_addr_copy(dev->dev_addr, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -541,33 +526,36 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
*/
static void enc28j60_dump_regs(struct enc28j60_net *priv, const char *msg)
{
+ struct device *dev = &priv->spi->dev;
+
mutex_lock(&priv->lock);
- printk(KERN_DEBUG DRV_NAME " %s\n"
- "HwRevID: 0x%02x\n"
- "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
- " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
- "MAC : MACON1 MACON3 MACON4\n"
- " 0x%02x 0x%02x 0x%02x\n"
- "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
- " 0x%04x 0x%04x 0x%04x 0x%04x "
- "0x%02x 0x%02x 0x%04x\n"
- "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
- " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
- msg, nolock_regb_read(priv, EREVID),
- nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
- nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
- nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
- nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
- nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
- nolock_regw_read(priv, ERXWRPTL),
- nolock_regw_read(priv, ERXRDPTL),
- nolock_regb_read(priv, ERXFCON),
- nolock_regb_read(priv, EPKTCNT),
- nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
- nolock_regw_read(priv, ETXNDL),
- nolock_regb_read(priv, MACLCON1),
- nolock_regb_read(priv, MACLCON2),
- nolock_regb_read(priv, MAPHSUP));
+ dev_printk(KERN_DEBUG, dev,
+ " %s\n"
+ "HwRevID: 0x%02x\n"
+ "Cntrl: ECON1 ECON2 ESTAT EIR EIE\n"
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n"
+ "MAC : MACON1 MACON3 MACON4\n"
+ " 0x%02x 0x%02x 0x%02x\n"
+ "Rx : ERXST ERXND ERXWRPT ERXRDPT ERXFCON EPKTCNT MAMXFL\n"
+ " 0x%04x 0x%04x 0x%04x 0x%04x "
+ "0x%02x 0x%02x 0x%04x\n"
+ "Tx : ETXST ETXND MACLCON1 MACLCON2 MAPHSUP\n"
+ " 0x%04x 0x%04x 0x%02x 0x%02x 0x%02x\n",
+ msg, nolock_regb_read(priv, EREVID),
+ nolock_regb_read(priv, ECON1), nolock_regb_read(priv, ECON2),
+ nolock_regb_read(priv, ESTAT), nolock_regb_read(priv, EIR),
+ nolock_regb_read(priv, EIE), nolock_regb_read(priv, MACON1),
+ nolock_regb_read(priv, MACON3), nolock_regb_read(priv, MACON4),
+ nolock_regw_read(priv, ERXSTL), nolock_regw_read(priv, ERXNDL),
+ nolock_regw_read(priv, ERXWRPTL),
+ nolock_regw_read(priv, ERXRDPTL),
+ nolock_regb_read(priv, ERXFCON),
+ nolock_regb_read(priv, EPKTCNT),
+ nolock_regw_read(priv, MAMXFLL), nolock_regw_read(priv, ETXSTL),
+ nolock_regw_read(priv, ETXNDL),
+ nolock_regb_read(priv, MACLCON1),
+ nolock_regb_read(priv, MACLCON2),
+ nolock_regb_read(priv, MAPHSUP));
mutex_unlock(&priv->lock);
}
@@ -599,12 +587,13 @@ static u16 rx_packet_start(u16 ptr)
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
u16 erxrdpt;
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) RXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set receive buffer start + end */
@@ -617,10 +606,12 @@ static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
+ struct device *dev = &priv->spi->dev;
+
if (start > 0x1FFF || end > 0x1FFF || start > end) {
if (netif_msg_drv(priv))
- printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO "
- "bad parameters!\n", __func__, start, end);
+ dev_err(dev, "%s(%d, %d) TXFIFO bad parameters!\n",
+ __func__, start, end);
return;
}
/* set transmit buffer start + end */
@@ -630,14 +621,15 @@ static void nolock_txfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
/*
* Low power mode shrinks power consumption about 100x, so we'd like
- * the chip to be in that mode whenever it's inactive. (However, we
- * can't stay in lowpower mode during suspend with WOL active.)
+ * the chip to be in that mode whenever it's inactive. (However, we
+ * can't stay in low power mode during suspend with WOL active.)
*/
static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
{
+ struct device *dev = &priv->spi->dev;
+
if (netif_msg_drv(priv))
- dev_dbg(&priv->spi->dev, "%s power...\n",
- is_low ? "low" : "high");
+ dev_dbg(dev, "%s power...\n", is_low ? "low" : "high");
mutex_lock(&priv->lock);
if (is_low) {
@@ -656,11 +648,12 @@ static void enc28j60_lowpower(struct enc28j60_net *priv, bool is_low)
static int enc28j60_hw_init(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
u8 reg;
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__,
- priv->full_duplex ? "FullDuplex" : "HalfDuplex");
+ dev_printk(KERN_DEBUG, dev, "%s() - %s\n", __func__,
+ priv->full_duplex ? "FullDuplex" : "HalfDuplex");
mutex_lock(&priv->lock);
/* first reset the chip */
@@ -682,15 +675,15 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* Check the RevID.
* If it's 0x00 or 0xFF probably the enc28j60 is not mounted or
- * damaged
+ * damaged.
*/
reg = locked_regb_read(priv, EREVID);
if (netif_msg_drv(priv))
- printk(KERN_INFO DRV_NAME ": chip RevID: 0x%02x\n", reg);
+ dev_info(dev, "chip RevID: 0x%02x\n", reg);
if (reg == 0x00 || reg == 0xff) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n",
- __func__, reg);
+ dev_printk(KERN_DEBUG, dev, "%s() Invalid RevId %d\n",
+ __func__, reg);
return 0;
}
@@ -723,7 +716,7 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
/*
* MACLCON1 (default)
* MACLCON2 (default)
- * Set the maximum packet size which the controller will accept
+ * Set the maximum packet size which the controller will accept.
*/
locked_regw_write(priv, MAMXFLL, MAX_FRAMELEN);
@@ -750,10 +743,12 @@ static int enc28j60_hw_init(struct enc28j60_net *priv)
static void enc28j60_hw_enable(struct enc28j60_net *priv)
{
+ struct device *dev = &priv->spi->dev;
+
/* enable interrupts */
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n",
- __func__);
+ dev_printk(KERN_DEBUG, dev, "%s() enabling interrupts.\n",
+ __func__);
enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE);
@@ -772,7 +767,7 @@ static void enc28j60_hw_enable(struct enc28j60_net *priv)
static void enc28j60_hw_disable(struct enc28j60_net *priv)
{
mutex_lock(&priv->lock);
- /* disable interrutps and packet reception */
+ /* disable interrupts and packet reception */
nolock_regb_write(priv, EIE, 0x00);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
priv->hw_enable = false;
@@ -793,14 +788,12 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
priv->full_duplex = (duplex == DUPLEX_FULL);
else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev,
- "unsupported link setting\n");
+ netdev_warn(ndev, "unsupported link setting\n");
ret = -EOPNOTSUPP;
}
} else {
if (netif_msg_link(priv))
- dev_warn(&ndev->dev, "Warning: hw must be disabled "
- "to set link mode\n");
+ netdev_warn(ndev, "Warning: hw must be disabled to set link mode\n");
ret = -EBUSY;
}
return ret;
@@ -811,21 +804,23 @@ enc28j60_setlink(struct net_device *ndev, u8 autoneg, u16 speed, u8 duplex)
*/
static void enc28j60_read_tsv(struct enc28j60_net *priv, u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
int endptr;
endptr = locked_regw_read(priv, ETXNDL);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": reading TSV at addr:0x%04x\n",
- endptr + 1);
+ dev_printk(KERN_DEBUG, dev, "reading TSV at addr:0x%04x\n",
+ endptr + 1);
enc28j60_mem_read(priv, endptr + 1, TSV_SIZE, tsv);
}
static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
- u8 tsv[TSV_SIZE])
+ u8 tsv[TSV_SIZE])
{
+ struct device *dev = &priv->spi->dev;
u16 tmp1, tmp2;
- printk(KERN_DEBUG DRV_NAME ": %s - TSV:\n", msg);
+ dev_printk(KERN_DEBUG, dev, "%s - TSV:\n", msg);
tmp1 = tsv[1];
tmp1 <<= 8;
tmp1 |= tsv[0];
@@ -834,30 +829,32 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
tmp2 <<= 8;
tmp2 |= tsv[4];
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, CollisionCount: %d,"
- " TotByteOnWire: %d\n", tmp1, tsv[2] & 0x0f, tmp2);
- printk(KERN_DEBUG DRV_NAME ": TxDone: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", TSV_GETBIT(tsv, TSV_TXDONE),
- TSV_GETBIT(tsv, TSV_TXCRCERROR),
- TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
- TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "PacketDefer: %d, ExDefer: %d\n",
- TSV_GETBIT(tsv, TSV_TXMULTICAST),
- TSV_GETBIT(tsv, TSV_TXBROADCAST),
- TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
- TSV_GETBIT(tsv, TSV_TXEXDEFER));
- printk(KERN_DEBUG DRV_NAME ": ExCollision: %d, LateCollision: %d, "
- "Giant: %d, Underrun: %d\n",
- TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
- TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
- TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d, "
- "BackPressApp: %d, VLanTagFrame: %d\n",
- TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
- TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
- TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
- TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
+ dev_printk(KERN_DEBUG, dev,
+ "ByteCount: %d, CollisionCount: %d, TotByteOnWire: %d\n",
+ tmp1, tsv[2] & 0x0f, tmp2);
+ dev_printk(KERN_DEBUG, dev,
+ "TxDone: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ TSV_GETBIT(tsv, TSV_TXDONE),
+ TSV_GETBIT(tsv, TSV_TXCRCERROR),
+ TSV_GETBIT(tsv, TSV_TXLENCHKERROR),
+ TSV_GETBIT(tsv, TSV_TXLENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, PacketDefer: %d, ExDefer: %d\n",
+ TSV_GETBIT(tsv, TSV_TXMULTICAST),
+ TSV_GETBIT(tsv, TSV_TXBROADCAST),
+ TSV_GETBIT(tsv, TSV_TXPACKETDEFER),
+ TSV_GETBIT(tsv, TSV_TXEXDEFER));
+ dev_printk(KERN_DEBUG, dev,
+ "ExCollision: %d, LateCollision: %d, Giant: %d, Underrun: %d\n",
+ TSV_GETBIT(tsv, TSV_TXEXCOLLISION),
+ TSV_GETBIT(tsv, TSV_TXLATECOLLISION),
+ TSV_GETBIT(tsv, TSV_TXGIANT), TSV_GETBIT(tsv, TSV_TXUNDERRUN));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, BackPressApp: %d, VLanTagFrame: %d\n",
+ TSV_GETBIT(tsv, TSV_TXCONTROLFRAME),
+ TSV_GETBIT(tsv, TSV_TXPAUSEFRAME),
+ TSV_GETBIT(tsv, TSV_BACKPRESSUREAPP),
+ TSV_GETBIT(tsv, TSV_TXVLANTAGFRAME));
}
/*
@@ -866,27 +863,29 @@ static void enc28j60_dump_tsv(struct enc28j60_net *priv, const char *msg,
static void enc28j60_dump_rsv(struct enc28j60_net *priv, const char *msg,
u16 pk_ptr, int len, u16 sts)
{
- printk(KERN_DEBUG DRV_NAME ": %s - NextPk: 0x%04x - RSV:\n",
- msg, pk_ptr);
- printk(KERN_DEBUG DRV_NAME ": ByteCount: %d, DribbleNibble: %d\n", len,
- RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
- printk(KERN_DEBUG DRV_NAME ": RxOK: %d, CRCErr:%d, LenChkErr: %d,"
- " LenOutOfRange: %d\n", RSV_GETBIT(sts, RSV_RXOK),
- RSV_GETBIT(sts, RSV_CRCERROR),
- RSV_GETBIT(sts, RSV_LENCHECKERR),
- RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
- printk(KERN_DEBUG DRV_NAME ": Multicast: %d, Broadcast: %d, "
- "LongDropEvent: %d, CarrierEvent: %d\n",
- RSV_GETBIT(sts, RSV_RXMULTICAST),
- RSV_GETBIT(sts, RSV_RXBROADCAST),
- RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
- RSV_GETBIT(sts, RSV_CARRIEREV));
- printk(KERN_DEBUG DRV_NAME ": ControlFrame: %d, PauseFrame: %d,"
- " UnknownOp: %d, VLanTagFrame: %d\n",
- RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
- RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
- RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
- RSV_GETBIT(sts, RSV_RXTYPEVLAN));
+ struct device *dev = &priv->spi->dev;
+
+ dev_printk(KERN_DEBUG, dev, "%s - NextPk: 0x%04x - RSV:\n", msg, pk_ptr);
+ dev_printk(KERN_DEBUG, dev, "ByteCount: %d, DribbleNibble: %d\n",
+ len, RSV_GETBIT(sts, RSV_DRIBBLENIBBLE));
+ dev_printk(KERN_DEBUG, dev,
+ "RxOK: %d, CRCErr:%d, LenChkErr: %d, LenOutOfRange: %d\n",
+ RSV_GETBIT(sts, RSV_RXOK),
+ RSV_GETBIT(sts, RSV_CRCERROR),
+ RSV_GETBIT(sts, RSV_LENCHECKERR),
+ RSV_GETBIT(sts, RSV_LENOUTOFRANGE));
+ dev_printk(KERN_DEBUG, dev,
+ "Multicast: %d, Broadcast: %d, LongDropEvent: %d, CarrierEvent: %d\n",
+ RSV_GETBIT(sts, RSV_RXMULTICAST),
+ RSV_GETBIT(sts, RSV_RXBROADCAST),
+ RSV_GETBIT(sts, RSV_RXLONGEVDROPEV),
+ RSV_GETBIT(sts, RSV_CARRIEREV));
+ dev_printk(KERN_DEBUG, dev,
+ "ControlFrame: %d, PauseFrame: %d, UnknownOp: %d, VLanTagFrame: %d\n",
+ RSV_GETBIT(sts, RSV_RXCONTROLFRAME),
+ RSV_GETBIT(sts, RSV_RXPAUSEFRAME),
+ RSV_GETBIT(sts, RSV_RXUNKNOWNOPCODE),
+ RSV_GETBIT(sts, RSV_RXTYPEVLAN));
}
static void dump_packet(const char *msg, int len, const char *data)
@@ -904,20 +903,20 @@ static void dump_packet(const char *msg, int len, const char *data)
static void enc28j60_hw_rx(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
struct sk_buff *skb = NULL;
u16 erxrdpt, next_packet, rxstat;
u8 rsv[RSV_SIZE];
int len;
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": RX pk_addr:0x%04x\n",
- priv->next_pk_ptr);
+ netdev_printk(KERN_DEBUG, ndev, "RX pk_addr:0x%04x\n",
+ priv->next_pk_ptr);
if (unlikely(priv->next_pk_ptr > RXEND_INIT)) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "%s() Invalid packet address!! 0x%04x\n",
- __func__, priv->next_pk_ptr);
+ netdev_err(ndev, "%s() Invalid packet address!! 0x%04x\n",
+ __func__, priv->next_pk_ptr);
/* packet address corrupted: reset RX logic */
mutex_lock(&priv->lock);
nolock_reg_bfclr(priv, ECON1, ECON1_RXEN);
@@ -950,7 +949,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
if (!RSV_GETBIT(rxstat, RSV_RXOK) || len > MAX_FRAMELEN) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev, "Rx Error (%04x)\n", rxstat);
+ netdev_err(ndev, "Rx Error (%04x)\n", rxstat);
ndev->stats.rx_errors++;
if (RSV_GETBIT(rxstat, RSV_CRCERROR))
ndev->stats.rx_crc_errors++;
@@ -962,8 +961,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb = netdev_alloc_skb(ndev, len + NET_IP_ALIGN);
if (!skb) {
if (netif_msg_rx_err(priv))
- dev_err(&ndev->dev,
- "out of memory for Rx'd frame\n");
+ netdev_err(ndev, "out of memory for Rx'd frame\n");
ndev->stats.rx_dropped++;
} else {
skb_reserve(skb, NET_IP_ALIGN);
@@ -983,12 +981,12 @@ static void enc28j60_hw_rx(struct net_device *ndev)
/*
* Move the RX read pointer to the start of the next
* received packet.
- * This frees the memory we just read out
+ * This frees the memory we just read out.
*/
erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n",
- __func__, erxrdpt);
+ dev_printk(KERN_DEBUG, dev, "%s() ERXRDPT:0x%04x\n",
+ __func__, erxrdpt);
mutex_lock(&priv->lock);
nolock_regw_write(priv, ERXRDPTL, erxrdpt);
@@ -997,9 +995,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
u16 reg;
reg = nolock_regw_read(priv, ERXRDPTL);
if (reg != erxrdpt)
- printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify "
- "error (0x%04x - 0x%04x)\n", __func__,
- reg, erxrdpt);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() ERXRDPT verify error (0x%04x - 0x%04x)\n",
+ __func__, reg, erxrdpt);
}
#endif
priv->next_pk_ptr = next_packet;
@@ -1013,6 +1011,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
*/
static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
int epkcnt, erxst, erxnd, erxwr, erxrd;
int free_space;
@@ -1035,8 +1034,8 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
}
mutex_unlock(&priv->lock);
if (netif_msg_rx_status(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n",
- __func__, free_space);
+ netdev_printk(KERN_DEBUG, ndev, "%s() free_space = %d\n",
+ __func__, free_space);
return free_space;
}
@@ -1046,24 +1045,25 @@ static int enc28j60_get_free_rxfifo(struct enc28j60_net *priv)
static void enc28j60_check_link_status(struct net_device *ndev)
{
struct enc28j60_net *priv = netdev_priv(ndev);
+ struct device *dev = &priv->spi->dev;
u16 reg;
int duplex;
reg = enc28j60_phy_read(priv, PHSTAT2);
if (netif_msg_hw(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, "
- "PHSTAT2: %04x\n", __func__,
- enc28j60_phy_read(priv, PHSTAT1), reg);
+ dev_printk(KERN_DEBUG, dev,
+ "%s() PHSTAT1: %04x, PHSTAT2: %04x\n", __func__,
+ enc28j60_phy_read(priv, PHSTAT1), reg);
duplex = reg & PHSTAT2_DPXSTAT;
if (reg & PHSTAT2_LSTAT) {
netif_carrier_on(ndev);
if (netif_msg_ifup(priv))
- dev_info(&ndev->dev, "link up - %s\n",
- duplex ? "Full duplex" : "Half duplex");
+ netdev_info(ndev, "link up - %s\n",
+ duplex ? "Full duplex" : "Half duplex");
} else {
if (netif_msg_ifdown(priv))
- dev_info(&ndev->dev, "link down\n");
+ netdev_info(ndev, "link down\n");
netif_carrier_off(ndev);
}
}
@@ -1089,8 +1089,8 @@ static void enc28j60_tx_clear(struct net_device *ndev, bool err)
/*
* RX handler
- * ignore PKTIF because is unreliable! (look at the errata datasheet)
- * check EPKTCNT is the suggested workaround.
+ * Ignore PKTIF because is unreliable! (Look at the errata datasheet)
+ * Check EPKTCNT is the suggested workaround.
* We don't need to clear interrupt flag, automatically done when
* enc28j60_hw_rx() decrements the packet counter.
* Returns how many packet processed.
@@ -1102,13 +1102,14 @@ static int enc28j60_rx_interrupt(struct net_device *ndev)
pk_counter = locked_regb_read(priv, EPKTCNT);
if (pk_counter && netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": intRX, pk_cnt: %d\n", pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "intRX, pk_cnt: %d\n",
+ pk_counter);
if (pk_counter > priv->max_pk_counter) {
/* update statistics */
priv->max_pk_counter = pk_counter;
if (netif_msg_rx_status(priv) && priv->max_pk_counter > 1)
- printk(KERN_DEBUG DRV_NAME ": RX max_pk_cnt: %d\n",
- priv->max_pk_counter);
+ netdev_printk(KERN_DEBUG, ndev, "RX max_pk_cnt: %d\n",
+ priv->max_pk_counter);
}
ret = pk_counter;
while (pk_counter-- > 0)
@@ -1124,8 +1125,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
struct net_device *ndev = priv->netdev;
int intflags, loop;
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
/* disable further interrupts */
locked_reg_bfclr(priv, EIE, EIE_INTIE);
@@ -1136,16 +1135,16 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_DMAIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intDMA(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intDMA(%d)\n",
+ loop);
locked_reg_bfclr(priv, EIR, EIR_DMAIF);
}
/* LINK changed handler */
if ((intflags & EIR_LINKIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intLINK(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intLINK(%d)\n",
+ loop);
enc28j60_check_link_status(ndev);
/* read PHIR to clear the flag */
enc28j60_phy_read(priv, PHIR);
@@ -1156,13 +1155,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
bool err = false;
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTX(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTX(%d)\n",
+ loop);
priv->tx_retry_count = 0;
if (locked_regb_read(priv, ESTAT) & ESTAT_TXABRT) {
if (netif_msg_tx_err(priv))
- dev_err(&ndev->dev,
- "Tx Error (aborted)\n");
+ netdev_err(ndev, "Tx Error (aborted)\n");
err = true;
}
if (netif_msg_tx_done(priv)) {
@@ -1179,8 +1177,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intTXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intTXErr(%d)\n",
+ loop);
locked_reg_bfclr(priv, ECON1, ECON1_TXRTS);
enc28j60_read_tsv(priv, tsv);
if (netif_msg_tx_err(priv))
@@ -1194,9 +1192,9 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* Transmit Late collision check for retransmit */
if (TSV_GETBIT(tsv, TSV_TXLATECOLLISION)) {
if (netif_msg_tx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": LateCollision TXErr (%d)\n",
- priv->tx_retry_count);
+ netdev_printk(KERN_DEBUG, ndev,
+ "LateCollision TXErr (%d)\n",
+ priv->tx_retry_count);
if (priv->tx_retry_count++ < MAX_TX_RETRYCOUNT)
locked_reg_bfset(priv, ECON1,
ECON1_TXRTS);
@@ -1210,13 +1208,12 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
if ((intflags & EIR_RXERIF) != 0) {
loop++;
if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME
- ": intRXErr(%d)\n", loop);
+ netdev_printk(KERN_DEBUG, ndev, "intRXErr(%d)\n",
+ loop);
/* Check free FIFO space to flag RX overrun */
if (enc28j60_get_free_rxfifo(priv) <= 0) {
if (netif_msg_rx_err(priv))
- printk(KERN_DEBUG DRV_NAME
- ": RX Overrun\n");
+ netdev_printk(KERN_DEBUG, ndev, "RX Overrun\n");
ndev->stats.rx_dropped++;
}
locked_reg_bfclr(priv, EIR, EIR_RXERIF);
@@ -1228,8 +1225,6 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
/* re-enable interrupts */
locked_reg_bfset(priv, EIE, EIE_INTIE);
- if (netif_msg_intr(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__);
}
/*
@@ -1239,11 +1234,13 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ struct net_device *ndev = priv->netdev;
+
BUG_ON(!priv->tx_skb);
if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME
- ": Tx Packet Len:%d\n", priv->tx_skb->len);
+ netdev_printk(KERN_DEBUG, ndev, "Tx Packet Len:%d\n",
+ priv->tx_skb->len);
if (netif_msg_pktdata(priv))
dump_packet(__func__,
@@ -1253,6 +1250,7 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
#ifdef CONFIG_ENC28J60_WRITEVERIFY
/* readback and verify written data */
if (netif_msg_drv(priv)) {
+ struct device *dev = &priv->spi->dev;
int test_len, k;
u8 test_buf[64]; /* limit the test to the first 64 bytes */
int okflag;
@@ -1266,16 +1264,14 @@ static void enc28j60_hw_tx(struct enc28j60_net *priv)
okflag = 1;
for (k = 0; k < test_len; k++) {
if (priv->tx_skb->data[k] != test_buf[k]) {
- printk(KERN_DEBUG DRV_NAME
- ": Error, %d location differ: "
- "0x%02x-0x%02x\n", k,
- priv->tx_skb->data[k], test_buf[k]);
+ dev_printk(KERN_DEBUG, dev,
+ "Error, %d location differ: 0x%02x-0x%02x\n",
+ k, priv->tx_skb->data[k], test_buf[k]);
okflag = 0;
}
}
if (!okflag)
- printk(KERN_DEBUG DRV_NAME ": Tx write buffer, "
- "verify ERROR!\n");
+ dev_printk(KERN_DEBUG, dev, "Tx write buffer, verify ERROR!\n");
}
#endif
/* set TX request flag */
@@ -1287,14 +1283,11 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_tx_queued(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
/* If some error occurs while trying to transmit this
* packet, you should return '1' from this function.
* In such a case you _may not_ do anything to the
* SKB, it is still owned by the network queueing
- * layer when an error is returned. This means you
+ * layer when an error is returned. This means you
* may not modify any SKB fields, you may not free
* the SKB, etc.
*/
@@ -1337,7 +1330,7 @@ static void enc28j60_tx_timeout(struct net_device *ndev)
struct enc28j60_net *priv = netdev_priv(ndev);
if (netif_msg_timer(priv))
- dev_err(&ndev->dev, DRV_NAME " tx timeout\n");
+ netdev_err(ndev, "tx timeout\n");
ndev->stats.tx_errors++;
/* can't restart safely under softirq */
@@ -1356,13 +1349,9 @@ static int enc28j60_net_open(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
if (!is_valid_ether_addr(dev->dev_addr)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "invalid MAC address %pM\n",
- dev->dev_addr);
+ netdev_err(dev, "invalid MAC address %pM\n", dev->dev_addr);
return -EADDRNOTAVAIL;
}
/* Reset the hardware here (and take it out of low power mode) */
@@ -1370,7 +1359,7 @@ static int enc28j60_net_open(struct net_device *dev)
enc28j60_hw_disable(priv);
if (!enc28j60_hw_init(priv)) {
if (netif_msg_ifup(priv))
- dev_err(&dev->dev, "hw_reset() failed\n");
+ netdev_err(dev, "hw_reset() failed\n");
return -EINVAL;
}
/* Update the MAC address (in case user has changed it) */
@@ -1392,9 +1381,6 @@ static int enc28j60_net_close(struct net_device *dev)
{
struct enc28j60_net *priv = netdev_priv(dev);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__);
-
enc28j60_hw_disable(priv);
enc28j60_lowpower(priv, true);
netif_stop_queue(dev);
@@ -1415,16 +1401,16 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "promiscuous mode\n");
+ netdev_info(dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
} else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "%smulticast mode\n",
- (dev->flags & IFF_ALLMULTI) ? "all-" : "");
+ netdev_info(dev, "%smulticast mode\n",
+ (dev->flags & IFF_ALLMULTI) ? "all-" : "");
priv->rxfilter = RXFILTER_MULTI;
} else {
if (netif_msg_link(priv))
- dev_info(&dev->dev, "normal mode\n");
+ netdev_info(dev, "normal mode\n");
priv->rxfilter = RXFILTER_NORMAL;
}
@@ -1436,20 +1422,21 @@ static void enc28j60_setrx_work_handler(struct work_struct *work)
{
struct enc28j60_net *priv =
container_of(work, struct enc28j60_net, setrx_work);
+ struct device *dev = &priv->spi->dev;
if (priv->rxfilter == RXFILTER_PROMISC) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": promiscuous mode\n");
+ dev_printk(KERN_DEBUG, dev, "promiscuous mode\n");
locked_regb_write(priv, ERXFCON, 0x00);
} else if (priv->rxfilter == RXFILTER_MULTI) {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": multicast mode\n");
+ dev_printk(KERN_DEBUG, dev, "multicast mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN | ERXFCON_MCEN);
} else {
if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": normal mode\n");
+ dev_printk(KERN_DEBUG, dev, "normal mode\n");
locked_regb_write(priv, ERXFCON,
ERXFCON_UCEN | ERXFCON_CRCEN |
ERXFCON_BCEN);
@@ -1468,7 +1455,7 @@ static void enc28j60_restart_work_handler(struct work_struct *work)
enc28j60_net_close(ndev);
ret = enc28j60_net_open(ndev);
if (unlikely(ret)) {
- dev_info(&ndev->dev, " could not restart %d\n", ret);
+ netdev_info(ndev, "could not restart %d\n", ret);
dev_close(ndev);
}
}
@@ -1552,14 +1539,13 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
+ unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
- const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
- dev_info(&spi->dev, DRV_NAME " Ethernet driver %s loaded\n",
- DRV_VERSION);
+ dev_info(&spi->dev, "Ethernet driver %s loaded\n", DRV_VERSION);
dev = alloc_etherdev(sizeof(struct enc28j60_net));
if (!dev) {
@@ -1570,8 +1556,7 @@ static int enc28j60_probe(struct spi_device *spi)
priv->netdev = dev; /* priv to netdev reference */
priv->spi = spi; /* priv to spi reference */
- priv->msg_enable = netif_msg_init(debug.msg_enable,
- ENC28J60_MSG_DEFAULT);
+ priv->msg_enable = netif_msg_init(debug.msg_enable, ENC28J60_MSG_DEFAULT);
mutex_init(&priv->lock);
INIT_WORK(&priv->tx_work, enc28j60_tx_work_handler);
INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
@@ -1582,13 +1567,12 @@ static int enc28j60_probe(struct spi_device *spi)
if (!enc28j60_chipset_init(dev)) {
if (netif_msg_probe(priv))
- dev_info(&spi->dev, DRV_NAME " chip not found\n");
+ dev_info(&spi->dev, "chip not found\n");
ret = -EIO;
goto error_irq;
}
- macaddr = of_get_mac_address(spi->dev.of_node);
- if (macaddr)
+ if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
ether_addr_copy(dev->dev_addr, macaddr);
else
eth_hw_addr_random(dev);
@@ -1600,8 +1584,8 @@ static int enc28j60_probe(struct spi_device *spi)
ret = request_irq(spi->irq, enc28j60_irq, 0, DRV_NAME, priv);
if (ret < 0) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, DRV_NAME ": request irq %d failed "
- "(ret = %d)\n", spi->irq, ret);
+ dev_err(&spi->dev, "request irq %d failed (ret = %d)\n",
+ spi->irq, ret);
goto error_irq;
}
@@ -1616,11 +1600,10 @@ static int enc28j60_probe(struct spi_device *spi)
ret = register_netdev(dev);
if (ret) {
if (netif_msg_probe(priv))
- dev_err(&spi->dev, "register netdev " DRV_NAME
- " failed (ret = %d)\n", ret);
+ dev_err(&spi->dev, "register netdev failed (ret = %d)\n",
+ ret);
goto error_register;
}
- dev_info(&dev->dev, DRV_NAME " driver registered\n");
return 0;
@@ -1636,9 +1619,6 @@ static int enc28j60_remove(struct spi_device *spi)
{
struct enc28j60_net *priv = spi_get_drvdata(spi);
- if (netif_msg_drv(priv))
- printk(KERN_DEBUG DRV_NAME ": remove\n");
-
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
free_netdev(priv->netdev);
@@ -1660,22 +1640,7 @@ static struct spi_driver enc28j60_driver = {
.probe = enc28j60_probe,
.remove = enc28j60_remove,
};
-
-static int __init enc28j60_init(void)
-{
- msec20_to_jiffies = msecs_to_jiffies(20);
-
- return spi_register_driver(&enc28j60_driver);
-}
-
-module_init(enc28j60_init);
-
-static void __exit enc28j60_exit(void)
-{
- spi_unregister_driver(&enc28j60_driver);
-}
-
-module_exit(enc28j60_exit);
+module_spi_driver(enc28j60_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Claudio Lanconelli <lanconelli.claudio@eptar.com>");
diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig
index 549898d5d450..f0d0e09f60e2 100644
--- a/drivers/net/ethernet/netronome/Kconfig
+++ b/drivers/net/ethernet/netronome/Kconfig
@@ -19,6 +19,7 @@ config NFP
tristate "Netronome(R) NFP4000/NFP6000 NIC driver"
depends on PCI && PCI_MSI
depends on VXLAN || VXLAN=n
+ select NET_DEVLINK
---help---
This driver supports the Netronome(R) NFP4000/NFP6000 based
cards working as a advanced Ethernet NIC. It works with both
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index e336f6ee94f5..c56e31d9f8a4 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -160,9 +160,9 @@ nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
struct nfp_flower_priv *priv = app->priv;
switch (tun->key.tp_dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
return NFP_FL_TUNNEL_VXLAN;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
return NFP_FL_TUNNEL_GENEVE;
/* FALLTHROUGH */
@@ -582,60 +582,23 @@ static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
}
}
-static int
-nfp_fl_pedit(const struct flow_action_entry *act,
- struct tc_cls_flower_offload *flow,
- char *nfp_action, int *a_len, u32 *csum_updated)
-{
- struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
+struct nfp_flower_pedit_acts {
struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
struct nfp_fl_set_ip4_addrs set_ip_addr;
- enum flow_action_mangle_base htype;
struct nfp_fl_set_tport set_tport;
struct nfp_fl_set_eth set_eth;
+};
+
+static int
+nfp_fl_commit_mangle(struct tc_cls_flower_offload *flow, char *nfp_action,
+ int *a_len, struct nfp_flower_pedit_acts *set_act,
+ u32 *csum_updated)
+{
+ struct flow_rule *rule = tc_cls_flower_offload_flow_rule(flow);
size_t act_size = 0;
u8 ip_proto = 0;
- u32 offset;
- int err;
-
- memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
- memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
- memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
- memset(&set_ip6_src, 0, sizeof(set_ip6_src));
- memset(&set_ip_addr, 0, sizeof(set_ip_addr));
- memset(&set_tport, 0, sizeof(set_tport));
- memset(&set_eth, 0, sizeof(set_eth));
-
- htype = act->mangle.htype;
- offset = act->mangle.offset;
-
- switch (htype) {
- case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
- err = nfp_fl_set_eth(act, offset, &set_eth);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
- err = nfp_fl_set_ip4(act, offset, &set_ip_addr,
- &set_ip_ttl_tos);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
- err = nfp_fl_set_ip6(act, offset, &set_ip6_dst,
- &set_ip6_src, &set_ip6_tc_hl_fl);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_TCP);
- break;
- case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
- err = nfp_fl_set_tport(act, offset, &set_tport,
- NFP_FL_ACTION_OPCODE_SET_UDP);
- break;
- default:
- return -EOPNOTSUPP;
- }
- if (err)
- return err;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -644,77 +607,82 @@ nfp_fl_pedit(const struct flow_action_entry *act,
ip_proto = match.key->ip_proto;
}
- if (set_eth.head.len_lw) {
- act_size = sizeof(set_eth);
- memcpy(nfp_action, &set_eth, act_size);
+ if (set_act->set_eth.head.len_lw) {
+ act_size = sizeof(set_act->set_eth);
+ memcpy(nfp_action, &set_act->set_eth, act_size);
*a_len += act_size;
}
- if (set_ip_ttl_tos.head.len_lw) {
+
+ if (set_act->set_ip_ttl_tos.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_ttl_tos);
- memcpy(nfp_action, &set_ip_ttl_tos, act_size);
+ act_size = sizeof(set_act->set_ip_ttl_tos);
+ memcpy(nfp_action, &set_act->set_ip_ttl_tos, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip_addr.head.len_lw) {
+
+ if (set_act->set_ip_addr.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip_addr);
- memcpy(nfp_action, &set_ip_addr, act_size);
+ act_size = sizeof(set_act->set_ip_addr);
+ memcpy(nfp_action, &set_act->set_ip_addr, act_size);
*a_len += act_size;
/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_tc_hl_fl.head.len_lw) {
+
+ if (set_act->set_ip6_tc_hl_fl.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_tc_hl_fl);
- memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
+ act_size = sizeof(set_act->set_ip6_tc_hl_fl);
+ memcpy(nfp_action, &set_act->set_ip6_tc_hl_fl, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
+
+ if (set_act->set_ip6_dst.head.len_lw &&
+ set_act->set_ip6_src.head.len_lw) {
/* TC compiles set src and dst IPv6 address as a single action,
* the hardware requires this to be 2 separate actions.
*/
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
- act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(&nfp_action[sizeof(set_act->set_ip6_src)],
+ &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_dst.head.len_lw) {
+ } else if (set_act->set_ip6_dst.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_dst);
- memcpy(nfp_action, &set_ip6_dst, act_size);
+ act_size = sizeof(set_act->set_ip6_dst);
+ memcpy(nfp_action, &set_act->set_ip6_dst, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
- } else if (set_ip6_src.head.len_lw) {
+ } else if (set_act->set_ip6_src.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_ip6_src);
- memcpy(nfp_action, &set_ip6_src, act_size);
+ act_size = sizeof(set_act->set_ip6_src);
+ memcpy(nfp_action, &set_act->set_ip6_src, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
}
- if (set_tport.head.len_lw) {
+ if (set_act->set_tport.head.len_lw) {
nfp_action += act_size;
- act_size = sizeof(set_tport);
- memcpy(nfp_action, &set_tport, act_size);
+ act_size = sizeof(set_act->set_tport);
+ memcpy(nfp_action, &set_act->set_tport, act_size);
*a_len += act_size;
/* Hardware will automatically fix TCP/UDP checksum. */
@@ -725,7 +693,40 @@ nfp_fl_pedit(const struct flow_action_entry *act,
}
static int
-nfp_flower_output_action(struct nfp_app *app, const struct flow_action_entry *act,
+nfp_fl_pedit(const struct flow_action_entry *act,
+ struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len,
+ u32 *csum_updated, struct nfp_flower_pedit_acts *set_act)
+{
+ enum flow_action_mangle_base htype;
+ u32 offset;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+
+ switch (htype) {
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
+ return nfp_fl_set_eth(act, offset, &set_act->set_eth);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
+ return nfp_fl_set_ip4(act, offset, &set_act->set_ip_addr,
+ &set_act->set_ip_ttl_tos);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
+ return nfp_fl_set_ip6(act, offset, &set_act->set_ip6_dst,
+ &set_act->set_ip6_src,
+ &set_act->set_ip6_tc_hl_fl);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_TCP);
+ case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
+ return nfp_fl_set_tport(act, offset, &set_act->set_tport,
+ NFP_FL_ACTION_OPCODE_SET_UDP);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nfp_flower_output_action(struct nfp_app *app,
+ const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev, bool last,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
@@ -775,7 +776,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
struct nfp_fl_payload *nfp_fl, int *a_len,
struct net_device *netdev,
enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
- int *out_cnt, u32 *csum_updated)
+ int *out_cnt, u32 *csum_updated,
+ struct nfp_flower_pedit_acts *set_act)
{
struct nfp_fl_set_ipv4_udp_tun *set_tun;
struct nfp_fl_pre_tunnel *pre_tun;
@@ -860,7 +862,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
case FLOW_ACTION_MANGLE:
if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len],
- a_len, csum_updated))
+ a_len, csum_updated, set_act))
return -EOPNOTSUPP;
break;
case FLOW_ACTION_CSUM:
@@ -880,12 +882,49 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act,
return 0;
}
+static bool nfp_fl_check_mangle_start(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry prev_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == 0)
+ return true;
+
+ prev_act = flow_act->entries[current_act_idx - 1];
+
+ return prev_act.id != FLOW_ACTION_MANGLE;
+}
+
+static bool nfp_fl_check_mangle_end(struct flow_action *flow_act,
+ int current_act_idx)
+{
+ struct flow_action_entry current_act;
+ struct flow_action_entry next_act;
+
+ current_act = flow_act->entries[current_act_idx];
+ if (current_act.id != FLOW_ACTION_MANGLE)
+ return false;
+
+ if (current_act_idx == flow_act->num_entries)
+ return true;
+
+ next_act = flow_act->entries[current_act_idx + 1];
+
+ return next_act.id != FLOW_ACTION_MANGLE;
+}
+
int nfp_flower_compile_action(struct nfp_app *app,
struct tc_cls_flower_offload *flow,
struct net_device *netdev,
struct nfp_fl_payload *nfp_flow)
{
int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
+ struct nfp_flower_pedit_acts set_act;
enum nfp_flower_tun_type tun_type;
struct flow_action_entry *act;
u32 csum_updated = 0;
@@ -899,12 +938,18 @@ int nfp_flower_compile_action(struct nfp_app *app,
out_cnt = 0;
flow_action_for_each(i, act, &flow->rule->action) {
+ if (nfp_fl_check_mangle_start(&flow->rule->action, i))
+ memset(&set_act, 0, sizeof(set_act));
err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len,
netdev, &tun_type, &tun_out_cnt,
- &out_cnt, &csum_updated);
+ &out_cnt, &csum_updated, &set_act);
if (err)
return err;
act_cnt++;
+ if (nfp_fl_check_mangle_end(&flow->rule->action, i))
+ nfp_fl_commit_mangle(flow,
+ &nfp_flow->action_data[act_len],
+ &act_len, &set_act, &csum_updated);
}
/* We optimise when the action list is small, this can unfortunately
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index c0945a5fd1a4..f6ca8dc9cc92 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -34,9 +34,6 @@ struct nfp_app;
#define NFP_FL_MASK_REUSE_TIME_NS 40000
#define NFP_FL_MASK_ID_LOCATION 1
-#define NFP_FL_VXLAN_PORT 4789
-#define NFP_FL_GENEVE_PORT 6081
-
/* Extra features bitmap. */
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 450d7296fd57..9f16920da81d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -195,7 +195,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_opts(rule, &enc_op);
switch (enc_ports.key->dst) {
- case htons(NFP_FL_VXLAN_PORT):
+ case htons(IANA_VXLAN_UDP_PORT):
*tun_type = NFP_FL_TUNNEL_VXLAN;
key_layer |= NFP_FLOWER_LAYER_VXLAN;
key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
@@ -203,7 +203,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (enc_op.key)
return -EOPNOTSUPP;
break;
- case htons(NFP_FL_GENEVE_PORT):
+ case htons(GENEVE_UDP_PORT):
if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
return -EOPNOTSUPP;
*tun_type = NFP_FL_TUNNEL_GENEVE;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h
index f8d422713705..a6fda07fce43 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_app.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h
@@ -433,6 +433,6 @@ int nfp_app_nic_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
int nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id);
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev);
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index e9eca99cf493..8e7591241e7c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -354,6 +354,8 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
{
struct nfp_eth_table_port eth_port;
struct devlink *devlink;
+ const u8 *serial;
+ int serial_len;
int ret;
rtnl_lock();
@@ -362,10 +364,10 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
if (ret)
return ret;
- devlink_port_type_eth_set(&port->dl_port, port->netdev);
+ serial_len = nfp_cpp_serial(port->app->cpp, &serial);
devlink_port_attrs_set(&port->dl_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
eth_port.label_port, eth_port.is_split,
- eth_port.label_subport);
+ eth_port.label_subport, serial, serial_len);
devlink = priv_to_devlink(app->pf);
@@ -377,13 +379,23 @@ void nfp_devlink_port_unregister(struct nfp_port *port)
devlink_port_unregister(&port->dl_port);
}
-struct devlink *nfp_devlink_get_devlink(struct net_device *netdev)
+void nfp_devlink_port_type_eth_set(struct nfp_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->netdev);
+}
+
+void nfp_devlink_port_type_clear(struct nfp_port *port)
{
- struct nfp_app *app;
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *nfp_devlink_get_devlink_port(struct net_device *netdev)
+{
+ struct nfp_port *port;
- app = nfp_app_from_netdev(netdev);
- if (!app)
+ port = nfp_port_from_netdev(netdev);
+ if (!port)
return NULL;
- return priv_to_devlink(app->pf);
+ return &port->dl_port;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6d1b8816552e..bde9695b9f8a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -909,7 +909,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1;
- if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more))
+ if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, netdev_xmit_more()))
nfp_net_tx_xmit_more_flush(tx_ring);
return NETDEV_TX_OK;
@@ -3324,8 +3324,11 @@ nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
struct nfp_net *nn = netdev_priv(netdev);
int n;
+ /* If port is defined, devlink_port is registered and devlink core
+ * is taking care of name formatting.
+ */
if (nn->port)
- return nfp_port_get_phys_port_name(netdev, name, len);
+ return -EOPNOTSUPP;
if (nn->dp.is_vf || nn->vnic_no_name)
return -EOPNOTSUPP;
@@ -3530,8 +3533,7 @@ const struct net_device_ops nfp_net_netdev_ops = {
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_bpf = nfp_net_xdp,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
/**
@@ -3548,7 +3550,7 @@ void nfp_net_info(struct nfp_net *nn)
nn->fw_ver.resv, nn->fw_ver.class,
nn->fw_ver.major, nn->fw_ver.minor,
nn->max_mtu);
- nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
nn->cap,
nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
@@ -3564,7 +3566,6 @@ void nfp_net_info(struct nfp_net *nn)
nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
- nn->cap & NFP_NET_CFG_CTRL_L2SWITCH ? "L2SWITCH " : "",
nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 372adea10e14..f5d564bbb55a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -104,8 +104,6 @@
#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
-#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
-#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
#define NFP_NET_CFG_CTRL_BPF (0x1 << 27) /* BPF offload capable */
@@ -130,7 +128,6 @@
#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
-#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 690b62718dbb..851e31e0ba8e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/firmware.h>
+#include <linux/sfp.h>
#include "nfpcore/nfp.h"
#include "nfpcore/nfp_nsp.h"
@@ -152,6 +153,8 @@ static const struct nfp_et_stat nfp_mac_et_stats[] = {
#define NN_RVEC_GATHER_STATS 9
#define NN_RVEC_PER_Q_STATS 3
+#define SFP_SFF_REV_COMPLIANCE 1
+
static void nfp_net_get_nspinfo(struct nfp_app *app, char *version)
{
struct nfp_nsp *nsp;
@@ -1096,6 +1099,130 @@ nfp_app_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
buffer);
}
+static int
+nfp_port_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ unsigned int read_len;
+ struct nfp_nsp *nsp;
+ int err = 0;
+ u8 data;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ switch (eth_port->interface) {
+ case NFP_INTERFACE_SFP:
+ case NFP_INTERFACE_SFP28:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF8472_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (!data) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP:
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ SFP_SFF_REV_COMPLIANCE, &data,
+ 1, &read_len);
+ if (err < 0)
+ goto exit_close_nsp;
+
+ if (data < 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ }
+ break;
+ case NFP_INTERFACE_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Unsupported module 0x%x detected\n",
+ eth_port->interface);
+ err = -EINVAL;
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
+static int
+nfp_port_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct nfp_eth_table_port *eth_port;
+ struct nfp_port *port;
+ struct nfp_nsp *nsp;
+ int err;
+
+ port = nfp_port_from_netdev(netdev);
+ eth_port = __nfp_port_get_eth_port(port);
+ if (!eth_port)
+ return -EOPNOTSUPP;
+
+ nsp = nfp_nsp_open(port->app->cpp);
+ if (IS_ERR(nsp)) {
+ err = PTR_ERR(nsp);
+ netdev_err(netdev, "Failed to access the NSP: %d\n", err);
+ return err;
+ }
+
+ if (!nfp_nsp_has_read_module_eeprom(nsp)) {
+ netdev_info(netdev, "reading module EEPROM not supported. Please update flash\n");
+ err = -EOPNOTSUPP;
+ goto exit_close_nsp;
+ }
+
+ err = nfp_nsp_read_module_eeprom(nsp, eth_port->eth_index,
+ eeprom->offset, data, eeprom->len,
+ &eeprom->len);
+ if (err < 0) {
+ if (eeprom->len) {
+ netdev_warn(netdev,
+ "Incomplete read from module EEPROM: %d\n",
+ err);
+ err = 0;
+ } else {
+ netdev_err(netdev,
+ "Reading from module EEPROM failed: %d\n",
+ err);
+ }
+ }
+
+exit_close_nsp:
+ nfp_nsp_close(nsp);
+ return err;
+}
+
static int nfp_net_set_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -1253,6 +1380,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
@@ -1272,6 +1401,8 @@ const struct ethtool_ops nfp_port_ethtool_ops = {
.set_dump = nfp_app_set_dump,
.get_dump_flag = nfp_app_get_dump_flag,
.get_dump_data = nfp_app_get_dump_data,
+ .get_module_info = nfp_port_get_module_info,
+ .get_module_eeprom = nfp_port_get_module_eeprom,
.get_link_ksettings = nfp_net_get_link_ksettings,
.set_link_ksettings = nfp_net_set_link_ksettings,
.get_fecparam = nfp_port_get_fecparam,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 08f5fdbd8e41..986464d4a206 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -150,34 +150,39 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nn->id = id;
+ if (nn->port) {
+ err = nfp_devlink_port_register(pf->app, nn->port);
+ if (err)
+ return err;
+ }
+
err = nfp_net_init(nn);
if (err)
- return err;
+ goto err_devlink_port_clean;
nfp_net_debugfs_vnic_add(nn, pf->ddir);
- if (nn->port) {
- err = nfp_devlink_port_register(pf->app, nn->port);
- if (err)
- goto err_dfs_clean;
- }
+ if (nn->port)
+ nfp_devlink_port_type_eth_set(nn->port);
nfp_net_info(nn);
if (nfp_net_is_data_vnic(nn)) {
err = nfp_app_vnic_init(pf->app, nn);
if (err)
- goto err_devlink_port_clean;
+ goto err_devlink_port_type_clean;
}
return 0;
-err_devlink_port_clean:
+err_devlink_port_type_clean:
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
-err_dfs_clean:
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+err_devlink_port_clean:
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
return err;
}
@@ -221,9 +226,11 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
if (nfp_net_is_data_vnic(nn))
nfp_app_vnic_clean(pf->app, nn);
if (nn->port)
- nfp_devlink_port_unregister(nn->port);
+ nfp_devlink_port_type_clear(nn->port);
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_clean(nn);
+ if (nn->port)
+ nfp_devlink_port_unregister(nn->port);
}
static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 94d228c04496..08e9bfa95f9b 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -272,8 +272,7 @@ const struct net_device_ops nfp_repr_netdev_ops = {
.ndo_fix_features = nfp_repr_fix_features,
.ndo_set_features = nfp_port_set_features,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
- .ndo_get_devlink = nfp_devlink_get_devlink,
+ .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
};
void
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c
index 93c5bfc0510b..fcd16877e6e0 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c
@@ -30,22 +30,6 @@ struct nfp_port *nfp_port_from_netdev(struct net_device *netdev)
return NULL;
}
-int nfp_port_get_port_parent_id(struct net_device *netdev,
- struct netdev_phys_item_id *ppid)
-{
- struct nfp_port *port;
- const u8 *serial;
-
- port = nfp_port_from_netdev(netdev);
- if (!port)
- return -EOPNOTSUPP;
-
- ppid->id_len = nfp_cpp_serial(port->app->cpp, &serial);
- memcpy(&ppid->id, serial, ppid->id_len);
-
- return 0;
-}
-
int nfp_port_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data)
{
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.h b/drivers/net/ethernet/netronome/nfp/nfp_port.h
index 90ae053f5c07..d7fd203bb180 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_port.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_port.h
@@ -131,6 +131,8 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
+void nfp_devlink_port_type_eth_set(struct nfp_port *port);
+void nfp_devlink_port_type_clear(struct nfp_port *port);
/**
* Mac stats (0x0000 - 0x0200)
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
index 3a4e224a64b7..42cf4fd875ea 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c
@@ -79,6 +79,8 @@
#define NFP_VERSIONS_NCSI_OFF 22
#define NFP_VERSIONS_CFGR_OFF 26
+#define NSP_SFF_EEPROM_BLOCK_LEN 8
+
enum nfp_nsp_cmd {
SPCODE_NOOP = 0, /* No operation */
SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */
@@ -95,6 +97,7 @@ enum nfp_nsp_cmd {
SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */
SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */
SPCODE_VERSIONS = 21, /* Report FW versions */
+ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */
};
struct nfp_nsp_dma_buf {
@@ -965,3 +968,62 @@ const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash,
return (const char *)&buf[buf_off];
}
+
+static int
+__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size)
+{
+ struct nfp_nsp_command_buf_arg module_eeprom = {
+ {
+ .code = SPCODE_READ_SFF_EEPROM,
+ .option = size,
+ },
+ .in_buf = buf,
+ .in_size = size,
+ .out_buf = buf,
+ .out_size = size,
+ };
+
+ return nfp_nsp_command_buf(state, &module_eeprom);
+}
+
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len)
+{
+ struct eeprom_buf {
+ u8 metalen;
+ __le16 length;
+ __le16 offset;
+ __le16 readlen;
+ u8 eth_index;
+ u8 data[0];
+ } __packed *buf;
+ int bufsz, ret;
+
+ BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8);
+
+ /* Buffer must be large enough and rounded to the next block size. */
+ bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN));
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->metalen =
+ offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN;
+ buf->length = cpu_to_le16(len);
+ buf->offset = cpu_to_le16(offset);
+ buf->eth_index = eth_index;
+
+ ret = __nfp_nsp_module_eeprom(state, buf, bufsz);
+
+ *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen));
+ if (*read_len)
+ memcpy(data, buf->data, *read_len);
+
+ if (!ret && *read_len < len)
+ ret = -EIO;
+
+ kfree(buf);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
index bd9c358c646f..22ee6985ee1c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h
@@ -22,6 +22,9 @@ int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw);
int nfp_nsp_mac_reinit(struct nfp_nsp *state);
int nfp_nsp_load_stored_fw(struct nfp_nsp *state);
int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size);
+int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index,
+ unsigned int offset, void *data,
+ unsigned int len, unsigned int *read_len);
static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state)
{
@@ -43,6 +46,11 @@ static inline bool nfp_nsp_has_versions(struct nfp_nsp *state)
return nfp_nsp_get_abi_ver_minor(state) > 27;
}
+static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state)
+{
+ return nfp_nsp_get_abi_ver_minor(state) > 28;
+}
+
enum nfp_eth_interface {
NFP_INTERFACE_NONE = 0,
NFP_INTERFACE_SFP = 1,
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5bf46310f60..5ffaee9f53b1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1355,7 +1355,7 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
const int nh_off = skb_network_offset(skb);
const int nh_len = skb_network_header_len(skb);
const int nfrags = skb_shinfo(skb)->nr_frags;
- int cs_size, i, fill, hdr, cpyhdr, evt;
+ int cs_size, i, fill, hdr, evt;
dma_addr_t csdma;
fund = XCT_FUN_ST | XCT_FUN_RR_8BRES |
@@ -1396,7 +1396,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
fill++;
/* Copy the result into the TCP packet */
- cpyhdr = fill;
CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) |
XCT_FUN_LLEN(2) | XCT_FUN_SE;
CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T;
@@ -1839,7 +1838,7 @@ static void __exit pasemi_mac_cleanup_module(void)
pci_unregister_driver(&pasemi_mac_driver);
}
-int pasemi_mac_init_module(void)
+static int pasemi_mac_init_module(void)
{
int err;
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 43a57ec296fd..512186adab00 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -492,6 +492,9 @@ enum qed_mf_mode_bit {
/* Allow DSCP to TC mapping */
QED_MF_DSCP_TO_TC_MAP,
+
+ /* Do not insert a vlan tag with id 0 */
+ QED_MF_DONT_ADD_VLAN0_TAG,
};
enum qed_ufp_mode {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 69966dfc6e3d..5c6a276f69ac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -204,9 +204,7 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
else
p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
- /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
- if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
- test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
+ if (test_bit(QED_MF_DONT_ADD_VLAN0_TAG, &p_hwfn->cdev->mf_bits))
p_data->arr[type].dont_add_vlan0 = true;
/* QM reconf data */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9df8c4b3b54e..195573793352 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3157,12 +3157,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
BIT(QED_MF_UFP_SPECIFIC) |
- BIT(QED_MF_8021Q_TAGGING);
+ BIT(QED_MF_8021Q_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_BD:
cdev->mf_bits = BIT(QED_MF_OVLAN_CLSS) |
BIT(QED_MF_LLH_PROTO_CLSS) |
- BIT(QED_MF_8021AD_TAGGING);
+ BIT(QED_MF_8021AD_TAGGING) |
+ BIT(QED_MF_DONT_ADD_VLAN0_TAG);
break;
case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 63a78162cfaf..92fe226980fd 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -498,8 +498,7 @@ struct qede_reload_args {
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback);
+ struct net_device *sb_dev);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index b4c8949933f1..f0a2ca23f63a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -652,9 +652,9 @@ static void qede_get_drvinfo(struct net_device *ndev,
{
char mfw[ETHTOOL_FWVERS_LEN], storm[ETHTOOL_FWVERS_LEN];
struct qede_dev *edev = netdev_priv(ndev);
+ char mbi[ETHTOOL_FWVERS_LEN];
strlcpy(info->driver, "qede", sizeof(info->driver));
- strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
snprintf(storm, ETHTOOL_FWVERS_LEN, "%d.%d.%d.%d",
edev->dev_info.common.fw_major,
@@ -668,13 +668,27 @@ static void qede_get_drvinfo(struct net_device *ndev,
(edev->dev_info.common.mfw_rev >> 8) & 0xFF,
edev->dev_info.common.mfw_rev & 0xFF);
- if ((strlen(storm) + strlen(mfw) + strlen("mfw storm ")) <
- sizeof(info->fw_version)) {
+ if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) <
+ sizeof(info->version))
+ snprintf(info->version, sizeof(info->version),
+ "%s [storm %s]", DRV_MODULE_VERSION, storm);
+ else
+ snprintf(info->version, sizeof(info->version),
+ "%s %s", DRV_MODULE_VERSION, storm);
+
+ if (edev->dev_info.common.mbi_version) {
+ snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d",
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_2_MASK) >> QED_MBI_VERSION_2_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_1_MASK) >> QED_MBI_VERSION_1_OFFSET,
+ (edev->dev_info.common.mbi_version &
+ QED_MBI_VERSION_0_MASK) >> QED_MBI_VERSION_0_OFFSET);
snprintf(info->fw_version, sizeof(info->fw_version),
- "mfw %s storm %s", mfw, storm);
+ "mbi %s [mfw %s]", mbi, mfw);
} else {
snprintf(info->fw_version, sizeof(info->fw_version),
- "%s %s", mfw, storm);
+ "mfw %s", mfw);
}
strlcpy(info->bus_info, pci_name(edev->pdev), sizeof(info->bus_info));
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 31b046e24565..954015d2011a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1665,12 +1665,12 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
- if (skb->xmit_more)
+ if (netdev_xmit_more())
qede_update_tx_producer(txq);
netif_tx_stop_queue(netdev_txq);
@@ -1696,8 +1696,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct qede_dev *edev = netdev_priv(dev);
int total_txq;
@@ -1705,7 +1704,7 @@ u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
return QEDE_TSS_COUNT(edev) ?
- fallback(dev, skb, NULL) % total_txq : 0;
+ netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
}
/* 8B udp header + 8B base tunnel header + 32B option length */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 04aa592f35c3..ad335bca3273 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -840,7 +840,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- if (!skb->xmit_more || netif_queue_stopped(dev))
+ if (!netdev_xmit_more() || netif_queue_stopped(dev))
iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 19efa88f3f02..88eb9e05d2a1 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6267,7 +6267,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
*/
smp_mb();
if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS))
- netif_wake_queue(dev);
+ netif_start_queue(dev);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8154b38c08f7..4f648394e645 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1615,8 +1615,7 @@ drop:
}
static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/* If skb needs TX timestamp, it is handled in network control queue */
return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC :
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index fa296a7c255d..30a49802fb51 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2288,11 +2288,11 @@ static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, __be32 dst,
nh = fi->fib_nh;
nh_on_port = (fi->fib_dev == ofdpa_port->dev);
- has_gw = !!nh->nh_gw;
+ has_gw = !!nh->fib_nh_gw4;
if (has_gw && nh_on_port) {
err = ofdpa_port_ipv4_nh(ofdpa_port, flags,
- nh->nh_gw, &index);
+ nh->fib_nh_gw4, &index);
if (err)
return err;
@@ -2749,7 +2749,7 @@ static int ofdpa_fib4_add(struct rocker *rocker,
fen_info->tb_id, 0);
if (err)
return err;
- fen_info->fi->fib_nh->nh_flags |= RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
return 0;
}
@@ -2764,7 +2764,7 @@ static int ofdpa_fib4_del(struct rocker *rocker,
ofdpa_port = ofdpa_port_dev_lower_find(fen_info->fi->fib_dev, rocker);
if (!ofdpa_port)
return 0;
- fen_info->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ fen_info->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
return ofdpa_port_fib_ipv4(ofdpa_port, htonl(fen_info->dst),
fen_info->dst_len, fen_info->fi,
fen_info->tb_id, OFDPA_OP_FLAG_REMOVE);
@@ -2791,7 +2791,7 @@ static void ofdpa_fib4_abort(struct rocker *rocker)
rocker);
if (!ofdpa_port)
continue;
- flow_entry->fi->fib_nh->nh_flags &= ~RTNH_F_OFFLOAD;
+ flow_entry->fi->fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
ofdpa_flow_tbl_del(ofdpa_port, OFDPA_OP_FLAG_REMOVE,
flow_entry);
}
diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c
index 3409bbf5b19f..c5059f456f37 100644
--- a/drivers/net/ethernet/sfc/falcon/tx.c
+++ b/drivers/net/ethernet/sfc/falcon/tx.c
@@ -321,7 +321,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
/* Pass off to hardware */
- if (!skb->xmit_more || netif_xmit_stopped(tx_queue->core_txq)) {
+ if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
/* There could be packets left on the partner queue if those
@@ -333,7 +333,7 @@ netdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
ef4_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = netdev_xmit_more();
}
tx_queue->tx_packets++;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 06c8f282263f..e182055ec2eb 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -478,8 +478,6 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
next = skb->next;
skb->next = NULL;
- if (next)
- skb->xmit_more = true;
efx_enqueue_skb(tx_queue, skb);
skb = next;
}
@@ -506,7 +504,7 @@ static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue,
netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
{
unsigned int old_insert_count = tx_queue->insert_count;
- bool xmit_more = skb->xmit_more;
+ bool xmit_more = netdev_xmit_more();
bool data_mapped = false;
unsigned int segments;
unsigned int skb_len;
@@ -533,7 +531,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (rc)
goto err;
#ifdef EFX_USE_PIO
- } else if (skb_len <= efx_piobuf_size && !skb->xmit_more &&
+ } else if (skb_len <= efx_piobuf_size && !xmit_more &&
efx_nic_may_tx_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
@@ -559,8 +557,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
- /* There could be packets left on the partner queue if those
- * SKBs had skb->xmit_more set. If we do not push those they
+ /* There could be packets left on the partner queue if
+ * xmit_more was set. If we do not push those they
* could be left for a long time and cause a netdev watchdog.
*/
if (txq2->xmit_more_available)
@@ -568,7 +566,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
efx_nic_push_buffers(tx_queue);
} else {
- tx_queue->xmit_more_available = skb->xmit_more;
+ tx_queue->xmit_more_available = xmit_more;
}
if (segments) {
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 644e42c181ee..01ea0d6f8819 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -101,8 +101,7 @@ static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb,
}
static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet_port *port = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 590172818b92..96b883f965f6 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -234,8 +234,7 @@ static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb,
}
static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct vnet *vp = netdev_priv(dev);
struct vnet_port *port = __tx_port_find(vp, skb);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index 99d86e39ff54..bf6c1c6779ff 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -995,7 +995,7 @@ static void xlgmac_dev_xmit(struct xlgmac_channel *channel)
smp_wmb();
ring->cur = cur_index + 1;
- if (!pkt_info->skb->xmit_more ||
+ if (!netdev_xmit_more() ||
netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
channel->queue_index)))
xlgmac_tx_start_xmit(channel, ring);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index a98aedae1b41..c2740dbe9154 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -140,7 +140,7 @@ static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
static void davinci_mdio_enable(struct davinci_mdio_data *data)
{
/* set enable and clock divider */
- __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+ writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
}
static int davinci_mdio_reset(struct mii_bus *bus)
@@ -159,7 +159,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
msleep(PHY_MAX_ADDR * data->access_time);
/* dump hardware version info */
- ver = __raw_readl(&data->regs->version);
+ ver = readl(&data->regs->version);
dev_info(data->dev,
"davinci mdio revision %d.%d, bus freq %ld\n",
(ver >> 8) & 0xff, ver & 0xff,
@@ -169,7 +169,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
goto done;
/* get phy mask from the alive register */
- phy_mask = __raw_readl(&data->regs->alive);
+ phy_mask = readl(&data->regs->alive);
if (phy_mask) {
/* restrict mdio bus to live phys only */
dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
@@ -196,11 +196,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
u32 reg;
while (time_after(timeout, jiffies)) {
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
- reg = __raw_readl(&regs->control);
+ reg = readl(&regs->control);
if ((reg & CONTROL_IDLE) == 0) {
usleep_range(100, 200);
continue;
@@ -216,7 +216,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
return -EAGAIN;
}
- reg = __raw_readl(&regs->user[0].access);
+ reg = readl(&regs->user[0].access);
if ((reg & USERACCESS_GO) == 0)
return 0;
@@ -263,7 +263,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -271,7 +271,7 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (ret < 0)
break;
- reg = __raw_readl(&data->regs->user[0].access);
+ reg = readl(&data->regs->user[0].access);
ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
break;
}
@@ -307,7 +307,7 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (ret < 0)
break;
- __raw_writel(reg, &data->regs->user[0].access);
+ writel(reg, &data->regs->user[0].access);
ret = wait_for_user_access(data);
if (ret == -EAGAIN)
@@ -472,9 +472,9 @@ static int davinci_mdio_runtime_suspend(struct device *dev)
u32 ctrl;
/* shutdown the scan state machine */
- ctrl = __raw_readl(&data->regs->control);
+ ctrl = readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
- __raw_writel(ctrl, &data->regs->control);
+ writel(ctrl, &data->regs->control);
wait_for_idle(data);
return 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 5583d993480d..98d1a45c0606 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/hash.h>
+#include <net/ipv6_stubs.h>
#include <net/dst_metadata.h>
#include <net/gro_cells.h>
#include <net/rtnetlink.h>
@@ -22,8 +23,6 @@
#define GENEVE_NETDEV_VER "0.6"
-#define GENEVE_UDP_PORT 6081
-
#define GENEVE_N_VID (1u << 24)
#define GENEVE_VID_MASK (GENEVE_N_VID - 1)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 7a145172d503..c06e31747288 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1271,20 +1271,17 @@ static const struct genl_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
.doit = gtp_genl_new_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_DELPDP,
.doit = gtp_genl_del_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = GTP_CMD_GETPDP,
.doit = gtp_genl_get_pdp,
.dumpit = gtp_genl_dump_pdp,
- .policy = gtp_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -1294,6 +1291,7 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.version = 0,
.hdrsize = 0,
.maxattr = GTPA_MAX,
+ .policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = gtp_genl_ops,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index e0dce373cdd9..fdbeb7070d42 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -966,7 +966,7 @@ int netvsc_send(struct net_device *ndev,
/* Keep aggregating only if stack says more data is coming
* and not doing mixed modes send and not flow blocked
*/
- xmit_more = skb->xmit_more &&
+ xmit_more = netdev_xmit_more() &&
!packet->cp_partial &&
!netif_xmit_stopped(netdev_get_tx_queue(ndev, packet->q_idx));
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b20fb0fb595b..06393b215102 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -328,7 +328,7 @@ static inline int netvsc_get_tx_queue(struct net_device *ndev,
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
- * This is basically similar to default (__netdev_pick_tx) with the added step
+ * This is basically similar to default (netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
@@ -351,8 +351,7 @@ static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
}
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_device_context *ndc = netdev_priv(ndev);
struct net_device *vf_netdev;
@@ -364,10 +363,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
- txq = vf_ops->ndo_select_queue(vf_netdev, skb,
- sb_dev, fallback);
+ txq = vf_ops->ndo_select_queue(vf_netdev, skb, sb_dev);
else
- txq = fallback(vf_netdev, skb, NULL);
+ txq = netdev_pick_tx(vf_netdev, skb, NULL);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 3b88846de31b..707285953750 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -598,37 +598,31 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] =
static const struct genl_ops hwsim_nl_ops[] = {
{
.cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
{
.cmd = MAC802154_HWSIM_CMD_NEW_EDGE,
- .policy = hwsim_genl_policy,
.doit = hwsim_new_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_DEL_EDGE,
- .policy = hwsim_genl_policy,
.doit = hwsim_del_edge_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = MAC802154_HWSIM_CMD_SET_EDGE,
- .policy = hwsim_genl_policy,
.doit = hwsim_set_edge_lqi,
.flags = GENL_UNS_ADMIN_PERM,
},
@@ -638,6 +632,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC802154_HWSIM",
.version = 1,
.maxattr = MAC802154_HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.module = THIS_MODULE,
.ops = hwsim_nl_ops,
.n_ops = ARRAY_SIZE(hwsim_nl_ops),
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 64a982563d59..263bfafdb004 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2175,8 +2175,9 @@ static int copy_tx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sa_stats(struct sk_buff *skb,
- struct macsec_rx_sa_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sa_stats(struct sk_buff *skb,
+ struct macsec_rx_sa_stats __percpu *pstats)
{
struct macsec_rx_sa_stats sum = {0, };
int cpu;
@@ -2201,8 +2202,8 @@ static int copy_rx_sa_stats(struct sk_buff *skb,
return 0;
}
-static int copy_rx_sc_stats(struct sk_buff *skb,
- struct pcpu_rx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
{
struct macsec_rx_sc_stats sum = {0, };
int cpu;
@@ -2265,8 +2266,8 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_tx_sc_stats(struct sk_buff *skb,
- struct pcpu_tx_sc_stats __percpu *pstats)
+static noinline_for_stack int
+copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
{
struct macsec_tx_sc_stats sum = {0, };
int cpu;
@@ -2305,8 +2306,8 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
return 0;
}
-static int copy_secy_stats(struct sk_buff *skb,
- struct pcpu_secy_stats __percpu *pstats)
+static noinline_for_stack int
+copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
{
struct macsec_dev_stats sum = {0, };
int cpu;
@@ -2410,8 +2411,9 @@ cancel:
return 1;
}
-static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
- struct sk_buff *skb, struct netlink_callback *cb)
+static noinline_for_stack int
+dump_secy(struct macsec_secy *secy, struct net_device *dev,
+ struct sk_buff *skb, struct netlink_callback *cb)
{
struct macsec_rx_sc *rx_sc;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
@@ -2637,60 +2639,50 @@ static const struct genl_ops macsec_genl_ops[] = {
{
.cmd = MACSEC_CMD_GET_TXSC,
.dumpit = macsec_dump_txsc,
- .policy = macsec_genl_policy,
},
{
.cmd = MACSEC_CMD_ADD_RXSC,
.doit = macsec_add_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSC,
.doit = macsec_del_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSC,
.doit = macsec_upd_rxsc,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_TXSA,
.doit = macsec_add_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_TXSA,
.doit = macsec_del_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_TXSA,
.doit = macsec_upd_txsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_ADD_RXSA,
.doit = macsec_add_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_DEL_RXSA,
.doit = macsec_del_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = MACSEC_CMD_UPD_RXSA,
.doit = macsec_upd_rxsa,
- .policy = macsec_genl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2700,6 +2692,7 @@ static struct genl_family macsec_fam __ro_after_init = {
.hdrsize = 0,
.version = MACSEC_GENL_VERSION,
.maxattr = MACSEC_ATTR_MAX,
+ .policy = macsec_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = macsec_genl_ops,
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0c0f105657d3..4a6be8fab884 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -24,6 +24,7 @@
#include <linux/notifier.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
@@ -34,6 +35,7 @@
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <linux/netpoll.h>
+#include <linux/phy.h>
#define MACVLAN_HASH_BITS 8
#define MACVLAN_HASH_SIZE (1<<MACVLAN_HASH_BITS)
@@ -822,6 +824,30 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ struct ifreq ifrr;
+ int err = -EOPNOTSUPP;
+
+ strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
+ ifrr.ifr_ifru = ifr->ifr_ifru;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ case SIOCGHWTSTAMP:
+ if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
+ err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
+ break;
+ }
+
+ if (!err)
+ ifr->ifr_ifru = ifrr.ifr_ifru;
+
+ return err;
+}
+
/*
* macvlan network devices have devices nesting below it and are a special
* "super class" of normal network devices; split their locks off into a
@@ -1020,6 +1046,26 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
return __ethtool_get_link_ksettings(vlan->lowerdev, cmd);
}
+static int macvlan_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct net_device *real_dev = macvlan_dev_real_dev(dev);
+ const struct ethtool_ops *ops = real_dev->ethtool_ops;
+ struct phy_device *phydev = real_dev->phydev;
+
+ if (phydev && phydev->drv && phydev->drv->ts_info) {
+ return phydev->drv->ts_info(phydev, info);
+ } else if (ops->get_ts_info) {
+ return ops->get_ts_info(real_dev, info);
+ } else {
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ }
+
+ return 0;
+}
+
static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -1094,6 +1140,7 @@ static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_link_ksettings = macvlan_ethtool_get_link_ksettings,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
+ .get_ts_info = macvlan_ethtool_get_ts_info,
};
static const struct net_device_ops macvlan_netdev_ops = {
@@ -1103,6 +1150,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
.ndo_stop = macvlan_stop,
.ndo_start_xmit = macvlan_start_xmit,
.ndo_change_mtu = macvlan_change_mtu,
+ .ndo_do_ioctl = macvlan_do_ioctl,
.ndo_fix_features = macvlan_fix_features,
.ndo_change_rx_flags = macvlan_change_rx_flags,
.ndo_set_mac_address = macvlan_set_mac_address,
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index ed1166adaa2f..b16a1221d19b 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -115,8 +115,7 @@ static netdev_tx_t net_failover_start_xmit(struct sk_buff *skb,
static u16 net_failover_select_queue(struct net_device *dev,
struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct net_failover_info *nfo_info = netdev_priv(dev);
struct net_device *primary_dev;
@@ -127,10 +126,9 @@ static u16 net_failover_select_queue(struct net_device *dev,
const struct net_device_ops *ops = primary_dev->netdev_ops;
if (ops->ndo_select_queue)
- txq = ops->ndo_select_queue(primary_dev, skb,
- sb_dev, fallback);
+ txq = ops->ndo_select_queue(primary_dev, skb, sb_dev);
else
- txq = fallback(primary_dev, skb, NULL);
+ txq = netdev_pick_tx(primary_dev, skb, NULL);
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 520657945b82..1c66e92c717c 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -273,13 +273,13 @@ config BCM87XX_PHY
Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs.
config BCM_CYGNUS_PHY
- tristate "Broadcom Cygnus SoC internal PHY"
- depends on ARCH_BCM_CYGNUS || COMPILE_TEST
+ tristate "Broadcom Cygnus/Omega SoC internal PHY"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on MDIO_BCM_IPROC
select BCM_NET_PHYLIB
---help---
This PHY driver is for the 1G internal PHYs of the Broadcom
- Cygnus Family SoC.
+ Cygnus and Omega Family SoC.
Currently supports internal PHY's used in the BCM11300,
BCM11320, BCM11350, BCM11360, BCM58300, BCM58302,
diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c
index 37218e5d7cc9..b7133c3f7437 100644
--- a/drivers/net/phy/aquantia_main.c
+++ b/drivers/net/phy/aquantia_main.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/bitfield.h>
#include <linux/phy.h>
#include "aquantia.h"
@@ -22,20 +23,33 @@
#define PHY_ID_AQCS109 0x03a1b5c2
#define PHY_ID_AQR405 0x03a1b4b0
+#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK GENMASK(7, 3)
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR 0
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI 2
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII 6
+#define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII 10
+
#define MDIO_AN_VEND_PROV 0xc400
#define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15)
#define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0)
+#define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4
#define MDIO_AN_TX_VEND_STATUS1 0xc800
-#define MDIO_AN_TX_VEND_STATUS1_10BASET (0x0 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_100BASETX (0x1 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_1000BASET (0x2 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_10GBASET (0x3 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_2500BASET (0x4 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_5000BASET (0x5 << 1)
-#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK (0x7 << 1)
+#define MDIO_AN_TX_VEND_STATUS1_RATE_MASK GENMASK(3, 1)
+#define MDIO_AN_TX_VEND_STATUS1_10BASET 0
+#define MDIO_AN_TX_VEND_STATUS1_100BASETX 1
+#define MDIO_AN_TX_VEND_STATUS1_1000BASET 2
+#define MDIO_AN_TX_VEND_STATUS1_10GBASET 3
+#define MDIO_AN_TX_VEND_STATUS1_2500BASET 4
+#define MDIO_AN_TX_VEND_STATUS1_5000BASET 5
#define MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX BIT(0)
+#define MDIO_AN_TX_VEND_INT_STATUS1 0xcc00
+#define MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT BIT(1)
+
#define MDIO_AN_TX_VEND_INT_STATUS2 0xcc01
#define MDIO_AN_TX_VEND_INT_MASK2 0xd401
@@ -44,8 +58,42 @@
#define MDIO_AN_RX_LP_STAT1 0xe820
#define MDIO_AN_RX_LP_STAT1_1000BASET_FULL BIT(15)
#define MDIO_AN_RX_LP_STAT1_1000BASET_HALF BIT(14)
+#define MDIO_AN_RX_LP_STAT1_SHORT_REACH BIT(13)
+#define MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT BIT(12)
+#define MDIO_AN_RX_LP_STAT1_AQ_PHY BIT(2)
+
+#define MDIO_AN_RX_LP_STAT4 0xe823
+#define MDIO_AN_RX_LP_STAT4_FW_MAJOR GENMASK(15, 8)
+#define MDIO_AN_RX_LP_STAT4_FW_MINOR GENMASK(7, 0)
+
+#define MDIO_AN_RX_VEND_STAT3 0xe832
+#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0)
+
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_FW_ID 0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR GENMASK(7, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT1 0xc885
+#define VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID GENMASK(7, 4)
+#define VEND1_GLOBAL_RSVD_STAT1_PROV_ID GENMASK(3, 0)
+
+#define VEND1_GLOBAL_RSVD_STAT9 0xc88d
+#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
+#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+
#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
@@ -72,6 +120,88 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+ int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+ SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
+ SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
+};
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+ u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+};
+
+static int aqr107_get_sset_count(struct phy_device *phydev)
+{
+ return AQR107_SGMII_STAT_SZ;
+}
+
+static void aqr107_get_strings(struct phy_device *phydev, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++)
+ strscpy(data + i * ETH_GSTRING_LEN, aqr107_hw_stats[i].name,
+ ETH_GSTRING_LEN);
+}
+
+static u64 aqr107_get_stat(struct phy_device *phydev, int index)
+{
+ const struct aqr107_hw_stat *stat = aqr107_hw_stats + index;
+ int len_l = min(stat->size, 16);
+ int len_h = stat->size - len_l;
+ u64 ret;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg);
+ if (val < 0)
+ return U64_MAX;
+
+ ret = val & GENMASK(len_l - 1, 0);
+ if (len_h) {
+ val = phy_read_mmd(phydev, MDIO_MMD_C22EXT, stat->reg + 1);
+ if (val < 0)
+ return U64_MAX;
+
+ ret += (val & GENMASK(len_h - 1, 0)) << 16;
+ }
+
+ return ret;
+}
+
+static void aqr107_get_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ u64 val;
+ int i;
+
+ for (i = 0; i < AQR107_SGMII_STAT_SZ; i++) {
+ val = aqr107_get_stat(phydev, i);
+ if (val == U64_MAX)
+ phydev_err(phydev, "Reading HW Statistics failed for %s\n",
+ aqr107_hw_stats[i].name);
+ else
+ priv->sgmii_stats[i] += val;
+
+ data[i] = priv->sgmii_stats[i];
+ }
+}
+
static int aqr_config_aneg(struct phy_device *phydev)
{
bool changed = false;
@@ -112,41 +242,22 @@ static int aqr_config_aneg(struct phy_device *phydev)
static int aqr_config_intr(struct phy_device *phydev)
{
+ bool en = phydev->interrupts == PHY_INTERRUPT_ENABLED;
int err;
- if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2,
- MDIO_AN_TX_VEND_INT_MASK2_LINK);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK,
- VEND1_GLOBAL_INT_STD_MASK_ALL);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK,
- VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
- VEND1_GLOBAL_INT_VEND_MASK_AN);
- } else {
- err = phy_write_mmd(phydev, MDIO_MMD_AN,
- MDIO_AN_TX_VEND_INT_MASK2, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_STD_MASK, 0);
- if (err < 0)
- return err;
-
- err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
- VEND1_GLOBAL_INT_VEND_MASK, 0);
- }
+ err = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_MASK2,
+ en ? MDIO_AN_TX_VEND_INT_MASK2_LINK : 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_STD_MASK,
+ en ? VEND1_GLOBAL_INT_STD_MASK_ALL : 0);
+ if (err < 0)
+ return err;
- return err;
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_INT_VEND_MASK,
+ en ? VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 |
+ VEND1_GLOBAL_INT_VEND_MASK_AN : 0);
}
static int aqr_ack_interrupt(struct phy_device *phydev)
@@ -178,13 +289,309 @@ static int aqr_read_status(struct phy_device *phydev)
return genphy_c45_read_status(phydev);
}
+static int aqr107_read_downshift_event(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_INT_STATUS1);
+ if (val < 0)
+ return val;
+
+ return !!(val & MDIO_AN_TX_VEND_INT_STATUS1_DOWNSHIFT);
+}
+
+static int aqr107_read_rate(struct phy_device *phydev)
+{
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_TX_VEND_STATUS1);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_AN_TX_VEND_STATUS1_RATE_MASK, val)) {
+ case MDIO_AN_TX_VEND_STATUS1_10BASET:
+ phydev->speed = SPEED_10;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_100BASETX:
+ phydev->speed = SPEED_100;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_1000BASET:
+ phydev->speed = SPEED_1000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_2500BASET:
+ phydev->speed = SPEED_2500;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_5000BASET:
+ phydev->speed = SPEED_5000;
+ break;
+ case MDIO_AN_TX_VEND_STATUS1_10GBASET:
+ phydev->speed = SPEED_10000;
+ break;
+ default:
+ phydev->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ if (val & MDIO_AN_TX_VEND_STATUS1_FULL_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ return 0;
+}
+
+static int aqr107_read_status(struct phy_device *phydev)
+{
+ int val, ret;
+
+ ret = aqr_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+ return 0;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_IF_STATUS);
+ if (val < 0)
+ return val;
+
+ switch (FIELD_GET(MDIO_PHYXS_VEND_IF_STATUS_TYPE_MASK, val)) {
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_KR:
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_XFI:
+ phydev->interface = PHY_INTERFACE_MODE_10GKR;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ default:
+ phydev->interface = PHY_INTERFACE_MODE_NA;
+ break;
+ }
+
+ val = aqr107_read_downshift_event(phydev);
+ if (val <= 0)
+ return val;
+
+ phydev_warn(phydev, "Downshift occurred! Cabling may be defective.\n");
+
+ /* Read downshifted rate from vendor register */
+ return aqr107_read_rate(phydev);
+}
+
+static int aqr107_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+
+ *data = enable && cnt ? cnt : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int aqr107_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val = 0;
+
+ if (!FIELD_FIT(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt))
+ return -E2BIG;
+
+ if (cnt != DOWNSHIFT_DEV_DISABLE) {
+ val = MDIO_AN_VEND_PROV_DOWNSHIFT_EN;
+ val |= FIELD_PREP(MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, cnt);
+ }
+
+ return phy_modify_mmd(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV,
+ MDIO_AN_VEND_PROV_DOWNSHIFT_EN |
+ MDIO_AN_VEND_PROV_DOWNSHIFT_MASK, val);
+}
+
+static int aqr107_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int aqr107_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return aqr107_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/* If we configure settings whilst firmware is still initializing the chip,
+ * then these settings may be overwritten. Therefore make sure chip
+ * initialization has completed. Use presence of the firmware ID as
+ * indicator for initialization having completed.
+ * The chip also provides a "reset completed" bit, but it's cleared after
+ * read. Therefore function would time out if called again.
+ */
+static int aqr107_wait_reset_complete(struct phy_device *phydev)
+{
+ int val, retries = 100;
+
+ do {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return val;
+ msleep(20);
+ } while (!val && --retries);
+
+ return val ? 0 : -ETIMEDOUT;
+}
+
+static void aqr107_chip_info(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor, build_id, prov_id;
+ int val;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(VEND1_GLOBAL_FW_ID_MAJOR, val);
+ fw_minor = FIELD_GET(VEND1_GLOBAL_FW_ID_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT1);
+ if (val < 0)
+ return;
+
+ build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
+ prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
+
+ phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+ fw_major, fw_minor, build_id, prov_id);
+}
+
+static int aqr107_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ phydev->interface != PHY_INTERFACE_MODE_10GKR)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
static int aqcs109_config_init(struct phy_device *phydev)
{
+ int ret;
+
+ /* Check that the PHY interface type is compatible */
+ if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+ phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
+ return -ENODEV;
+
+ ret = aqr107_wait_reset_complete(phydev);
+ if (!ret)
+ aqr107_chip_info(phydev);
+
/* AQCS109 belongs to a chip family partially supporting 10G and 5G.
* PMA speed ability bits are the same for all members of the family,
* AQCS109 however supports speeds up to 2.5G only.
*/
- return phy_set_max_speed(phydev, SPEED_2500);
+ ret = phy_set_max_speed(phydev, SPEED_2500);
+ if (ret)
+ return ret;
+
+ /* ensure that a latched downshift event is cleared */
+ aqr107_read_downshift_event(phydev);
+
+ return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
+static void aqr107_link_change_notify(struct phy_device *phydev)
+{
+ u8 fw_major, fw_minor;
+ bool downshift, short_reach, afr;
+ int mode, val;
+
+ if (phydev->state != PHY_RUNNING || phydev->autoneg == AUTONEG_DISABLE)
+ return;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT1);
+ /* call failed or link partner is no Aquantia PHY */
+ if (val < 0 || !(val & MDIO_AN_RX_LP_STAT1_AQ_PHY))
+ return;
+
+ short_reach = val & MDIO_AN_RX_LP_STAT1_SHORT_REACH;
+ downshift = val & MDIO_AN_RX_LP_STAT1_AQRATE_DOWNSHIFT;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_LP_STAT4);
+ if (val < 0)
+ return;
+
+ fw_major = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MAJOR, val);
+ fw_minor = FIELD_GET(MDIO_AN_RX_LP_STAT4_FW_MINOR, val);
+
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_RX_VEND_STAT3);
+ if (val < 0)
+ return;
+
+ afr = val & MDIO_AN_RX_VEND_STAT3_AFR;
+
+ phydev_dbg(phydev, "Link partner is Aquantia PHY, FW %u.%u%s%s%s\n",
+ fw_major, fw_minor,
+ short_reach ? ", short reach mode" : "",
+ downshift ? ", fast-retrain downshift advertised" : "",
+ afr ? ", fast reframe advertised" : "");
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_RSVD_STAT9);
+ if (val < 0)
+ return;
+
+ mode = FIELD_GET(VEND1_GLOBAL_RSVD_STAT9_MODE, val);
+ if (mode == VEND1_GLOBAL_RSVD_STAT9_1000BT2)
+ phydev_info(phydev, "Aquantia 1000Base-T2 mode active\n");
+}
+
+static int aqr107_suspend(struct phy_device *phydev)
+{
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_resume(struct phy_device *phydev)
+{
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, MDIO_CTRL1,
+ MDIO_CTRL1_LPOWER);
+}
+
+static int aqr107_probe(struct phy_device *phydev)
+{
+ phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+ sizeof(struct aqr107_priv), GFP_KERNEL);
+ if (!phydev->priv)
+ return -ENOMEM;
+
+ return aqr_hwmon_probe(phydev);
}
static struct phy_driver aqr_driver[] = {
@@ -233,23 +640,40 @@ static struct phy_driver aqr_driver[] = {
.name = "Aquantia AQR107",
.aneg_done = genphy_c45_aneg_done,
.get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
+ .config_init = aqr107_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
.name = "Aquantia AQCS109",
.aneg_done = genphy_c45_aneg_done,
.get_features = genphy_c45_pma_read_abilities,
- .probe = aqr_hwmon_probe,
+ .probe = aqr107_probe,
.config_init = aqcs109_config_init,
.config_aneg = aqr_config_aneg,
.config_intr = aqr_config_intr,
.ack_interrupt = aqr_ack_interrupt,
- .read_status = aqr_read_status,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index f3e96191eb6f..f315ab468a0d 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -324,8 +324,6 @@ static int at803x_config_intr(struct phy_device *phydev)
static void at803x_link_change_notify(struct phy_device *phydev)
{
- struct at803x_priv *priv = phydev->priv;
-
/*
* Conduct a hardware reset for AT8030 every time a link loss is
* signalled. This is necessary to circumvent a hardware bug that
@@ -333,25 +331,19 @@ static void at803x_link_change_notify(struct phy_device *phydev)
* in the FIFO. In such cases, the FIFO enters an error mode it
* cannot recover from by software.
*/
- if (phydev->state == PHY_NOLINK) {
- if (phydev->mdio.reset && !priv->phy_reset) {
- struct at803x_context context;
+ if (phydev->state == PHY_NOLINK && phydev->mdio.reset) {
+ struct at803x_context context;
- at803x_context_save(phydev, &context);
+ at803x_context_save(phydev, &context);
- phy_device_reset(phydev, 1);
- msleep(1);
- phy_device_reset(phydev, 0);
- msleep(1);
+ phy_device_reset(phydev, 1);
+ msleep(1);
+ phy_device_reset(phydev, 0);
+ msleep(1);
- at803x_context_restore(phydev, &context);
+ at803x_context_restore(phydev, &context);
- phydev_dbg(phydev, "%s(): phy was reset\n",
- __func__);
- priv->phy_reset = true;
- }
- } else {
- priv->phy_reset = false;
+ phydev_dbg(phydev, "%s(): phy was reset\n", __func__);
}
}
diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
index ab8e12922bf9..625b7cb76285 100644
--- a/drivers/net/phy/bcm-cygnus.c
+++ b/drivers/net/phy/bcm-cygnus.c
@@ -10,6 +10,10 @@
#include <linux/netdevice.h>
#include <linux/phy.h>
+struct bcm_omega_phy_priv {
+ u64 *stats;
+};
+
/* Broadcom Cygnus Phy specific registers */
#define MII_BCM_CYGNUS_AFE_VDAC_ICTRL_0 0x91E5 /* VDAL Control register */
@@ -121,6 +125,130 @@ static int bcm_cygnus_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
+static int bcm_omega_config_init(struct phy_device *phydev)
+{
+ u8 count, rev;
+ int ret = 0;
+
+ rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
+ pr_info_once("%s: %s PHY revision: 0x%02x\n",
+ phydev_name(phydev), phydev->drv->name, rev);
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ switch (rev) {
+ case 0x00:
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_downshift_get(phydev, &count);
+ if (ret)
+ return ret;
+
+ /* Only enable EEE if Wirespeed/downshift is disabled */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return bcm_phy_enable_apd(phydev, true);
+}
+
+static int bcm_omega_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm_omega_config_init(phydev);
+ if (ret)
+ return ret;
+
+ /* 28nm Gigabit PHYs come out of reset without any half-duplex
+ * or "hub" compliant advertised mode, fix that. This does not
+ * cause any problems with the PHY library since genphy_config_aneg()
+ * gracefully handles auto-negotiated and forced modes.
+ */
+ return genphy_config_aneg(phydev);
+}
+
+static int bcm_omega_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return bcm_phy_downshift_get(phydev, (u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int bcm_omega_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna,
+ const void *data)
+{
+ u8 count = *(u8 *)data;
+ int ret;
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ ret = bcm_phy_downshift_set(phydev, count);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ return ret;
+
+ /* Disable EEE advertisement since this prevents the PHY
+ * from successfully linking up, trigger auto-negotiation restart
+ * to let the MAC decide what to do.
+ */
+ ret = bcm_phy_set_eee(phydev, count == DOWNSHIFT_DEV_DISABLE);
+ if (ret)
+ return ret;
+
+ return genphy_restart_aneg(phydev);
+}
+
+static void bcm_omega_get_phy_stats(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_omega_phy_priv *priv = phydev->priv;
+
+ bcm_phy_get_stats(phydev, priv->stats, stats, data);
+}
+
+static int bcm_omega_probe(struct phy_device *phydev)
+{
+ struct bcm_omega_phy_priv *priv;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ phydev->priv = priv;
+
+ priv->stats = devm_kcalloc(&phydev->mdio.dev,
+ bcm_phy_get_sset_count(phydev), sizeof(u64),
+ GFP_KERNEL);
+ if (!priv->stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
static struct phy_driver bcm_cygnus_phy_driver[] = {
{
.phy_id = PHY_ID_BCM_CYGNUS,
@@ -132,10 +260,27 @@ static struct phy_driver bcm_cygnus_phy_driver[] = {
.config_intr = bcm_phy_config_intr,
.suspend = genphy_suspend,
.resume = bcm_cygnus_resume,
-} };
+}, {
+ .phy_id = PHY_ID_BCM_OMEGA,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom Omega Combo GPHY",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm_omega_config_init,
+ .suspend = genphy_suspend,
+ .resume = bcm_omega_resume,
+ .get_tunable = bcm_omega_get_tunable,
+ .set_tunable = bcm_omega_set_tunable,
+ .get_sset_count = bcm_phy_get_sset_count,
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm_omega_get_phy_stats,
+ .probe = bcm_omega_probe,
+}
+};
static struct mdio_device_id __maybe_unused bcm_cygnus_phy_tbl[] = {
{ PHY_ID_BCM_CYGNUS, 0xfffffff0, },
+ { PHY_ID_BCM_OMEGA, 0xfffffff0, },
{ }
};
MODULE_DEVICE_TABLE(mdio, bcm_cygnus_phy_tbl);
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index a75642051b8b..e0d3310957ff 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -371,6 +371,58 @@ void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
}
EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev)
+{
+ /* Reset R_CAL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_AL/RC_CAL Engine */
+ bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+}
+EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset);
+
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
+{
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
+
+ /* Change Ki to 011 */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
+
+ bcm_phy_r_rc_cal_reset(phydev);
+
+ /* write AFE_RXCONFIG_0 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index 17faaefcfd60..5ecacb4e64f0 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -9,6 +9,24 @@
#include <linux/brcmphy.h>
#include <linux/phy.h>
+/* 28nm only register definitions */
+#define MISC_ADDR(base, channel) base, channel
+
+#define DSP_TAP10 MISC_ADDR(0x0a, 0)
+#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
+#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
+#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
+
+#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
+#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
+#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
+#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
+#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
+#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
+#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
+#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
+
+
int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
@@ -45,5 +63,7 @@ int bcm_phy_get_sset_count(struct phy_device *phydev);
void bcm_phy_get_strings(struct phy_device *phydev, u8 *data);
void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
struct ethtool_stats *stats, u64 *data);
+void bcm_phy_r_rc_cal_reset(struct phy_device *phydev);
+int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev);
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index b8415f8fae14..a75e1b283541 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -37,77 +37,10 @@
#define MII_BCM7XXX_SHD_3_TL4 0x23
#define MII_BCM7XXX_TL4_RST_MSK (BIT(2) | BIT(1))
-/* 28nm only register definitions */
-#define MISC_ADDR(base, channel) base, channel
-
-#define DSP_TAP10 MISC_ADDR(0x0a, 0)
-#define PLL_PLLCTRL_1 MISC_ADDR(0x32, 1)
-#define PLL_PLLCTRL_2 MISC_ADDR(0x32, 2)
-#define PLL_PLLCTRL_4 MISC_ADDR(0x33, 0)
-
-#define AFE_RXCONFIG_0 MISC_ADDR(0x38, 0)
-#define AFE_RXCONFIG_1 MISC_ADDR(0x38, 1)
-#define AFE_RXCONFIG_2 MISC_ADDR(0x38, 2)
-#define AFE_RX_LP_COUNTER MISC_ADDR(0x38, 3)
-#define AFE_TX_CONFIG MISC_ADDR(0x39, 0)
-#define AFE_VDCA_ICTRL_0 MISC_ADDR(0x39, 1)
-#define AFE_VDAC_OTHERS_0 MISC_ADDR(0x39, 3)
-#define AFE_HPF_TRIM_OTHERS MISC_ADDR(0x3a, 0)
-
struct bcm7xxx_phy_priv {
u64 *stats;
};
-static void r_rc_cal_reset(struct phy_device *phydev)
-{
- /* Reset R_CAL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
-
- /* Disable Reset R_AL/RC_CAL Engine */
- bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
-}
-
-static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
-{
- /* Increase VCO range to prevent unlocking problem of PLL at low
- * temp
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
-
- /* Change Ki to 011 */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
-
- /* Disable loading of TVCO buffer to bandgap, set bandgap trim
- * to 111
- */
- bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
-
- /* Adjust bias current trim by -3 */
- bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
-
- /* Switch to CORE_BASE1E */
- phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
-
- r_rc_cal_reset(phydev);
-
- /* write AFE_RXCONFIG_0 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
-
- /* write AFE_RXCONFIG_1 */
- bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
-
- /* write AFE_RX_LP_COUNTER */
- bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
-
- /* write AFE_HPF_TRIM_OTHERS */
- bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
-
- /* write AFTE_TX_CONFIG */
- bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
-
- return 0;
-}
-
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
{
/* AFE_RXCONFIG_0 */
@@ -143,7 +76,7 @@ static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -171,7 +104,7 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
bcm_phy_write_misc(phydev, DSP_TAP10, 0x011b);
/* Reset R_CAL/RC_CAL engine */
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -196,7 +129,7 @@ static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
/* Enable ffe zero detection for Vitesse interoperability */
bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
- r_rc_cal_reset(phydev);
+ bcm_phy_r_rc_cal_reset(phydev);
return 0;
}
@@ -227,7 +160,7 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
switch (rev) {
case 0xa0:
case 0xb0:
- ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+ ret = bcm_phy_28nm_a0b0_afe_config_init(phydev);
break;
case 0xd0:
ret = bcm7xxx_28nm_d0_afe_config_init(phydev);
@@ -657,7 +590,6 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
- BCM7XXX_28NM_GPHY(PHY_ID_BCM_OMEGA, "Broadcom Omega Combo GPHY"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 3ccba37bd6dd..65350186d514 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -29,6 +29,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/marvell_phy.h>
+#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/io.h>
@@ -91,6 +92,14 @@
#define MII_88E1510_TEMP_SENSOR 0x1b
#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+#define MII_88E1540_COPPER_CTRL3 0x1a
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK GENMASK(11, 10)
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS 0
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS 1
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS 2
+#define MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS 3
+#define MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN BIT(9)
+
#define MII_88E6390_MISC_TEST 0x1b
#define MII_88E6390_MISC_TEST_SAMPLE_1S 0
#define MII_88E6390_MISC_TEST_SAMPLE_10MS BIT(14)
@@ -1025,6 +1034,101 @@ static int m88e1145_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e1540_get_fld(struct phy_device *phydev, u8 *msecs)
+{
+ int val;
+
+ val = phy_read(phydev, MII_88E1540_COPPER_CTRL3);
+ if (val < 0)
+ return val;
+
+ if (!(val & MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN)) {
+ *msecs = ETHTOOL_PHY_FAST_LINK_DOWN_OFF;
+ return 0;
+ }
+
+ val = FIELD_GET(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ switch (val) {
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS:
+ *msecs = 0;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS:
+ *msecs = 10;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS:
+ *msecs = 20;
+ break;
+ case MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS:
+ *msecs = 40;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m88e1540_set_fld(struct phy_device *phydev, const u8 *msecs)
+{
+ struct ethtool_eee eee;
+ int val, ret;
+
+ if (*msecs == ETHTOOL_PHY_FAST_LINK_DOWN_OFF)
+ return phy_clear_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+
+ /* According to the Marvell data sheet EEE must be disabled for
+ * Fast Link Down detection to work properly
+ */
+ ret = phy_ethtool_get_eee(phydev, &eee);
+ if (!ret && eee.eee_enabled) {
+ phydev_warn(phydev, "Fast Link Down detection requires EEE to be disabled!\n");
+ return -EBUSY;
+ }
+
+ if (*msecs <= 5)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_00MS;
+ else if (*msecs <= 15)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_10MS;
+ else if (*msecs <= 30)
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_20MS;
+ else
+ val = MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_40MS;
+
+ val = FIELD_PREP(MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+
+ ret = phy_modify(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_LINK_DOWN_DELAY_MASK, val);
+ if (ret)
+ return ret;
+
+ return phy_set_bits(phydev, MII_88E1540_COPPER_CTRL3,
+ MII_88E1540_COPPER_CTRL3_FAST_LINK_DOWN);
+}
+
+static int m88e1540_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_get_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int m88e1540_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_FAST_LINK_DOWN:
+ return m88e1540_set_fld(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
/* The VOD can be out of specification on link up. Poke an
* undocumented register, in an undocumented page, with a magic value
* to fix this.
@@ -2247,6 +2351,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
{
.phy_id = MARVELL_PHY_ID_88E1545,
@@ -2307,6 +2413,8 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
+ .get_tunable = m88e1540_get_tunable,
+ .set_tunable = m88e1540_set_tunable,
},
};
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 100b401b1f4a..fc06e8c9a64b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -48,6 +48,8 @@ enum {
MV_AN_STAT1000 = 0x8001, /* 1000base-T status register */
/* Vendor2 MMD registers */
+ MV_V2_PORT_CTRL = 0xf001,
+ MV_V2_PORT_CTRL_PWRDOWN = 0x0800,
MV_V2_TEMP_CTRL = 0xf08a,
MV_V2_TEMP_CTRL_MASK = 0xc000,
MV_V2_TEMP_CTRL_SAMPLE = 0x0000,
@@ -226,11 +228,19 @@ static int mv3310_probe(struct phy_device *phydev)
static int mv3310_suspend(struct phy_device *phydev)
{
- return 0;
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
}
static int mv3310_resume(struct phy_device *phydev)
{
+ int ret;
+
+ ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL,
+ MV_V2_PORT_CTRL_PWRDOWN);
+ if (ret)
+ return ret;
+
return mv3310_hwmon_config(phydev, true);
}
@@ -474,6 +484,8 @@ static struct phy_driver mv3310_drivers[] = {
.name = "mv88x2110",
.get_features = genphy_c45_pma_read_abilities,
.probe = mv3310_probe,
+ .suspend = mv3310_suspend,
+ .resume = mv3310_resume,
.soft_reset = genphy_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 8295bc7c8c20..4a28fb29adaa 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -92,10 +92,7 @@ static int unimac_mdio_poll(void *wait_func_data)
usleep_range(1000, 2000);
} while (--timeout);
- if (!timeout)
- return -ETIMEDOUT;
-
- return 0;
+ return -ETIMEDOUT;
}
static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
@@ -292,7 +289,7 @@ static int unimac_mdio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
- dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus at 0x%p\n", priv->base);
+ dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus\n");
return 0;
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index 9e24d9569424..05c3e87ffc2d 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -262,12 +262,30 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
+ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
+ if (val < 0)
+ return val;
+
+ if (!(val & MDIO_AN_STAT1_COMPLETE)) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->lp_advertising);
+ mii_10gbt_stat_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, 0);
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ return 0;
+ }
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->lp_advertising,
+ val & MDIO_AN_STAT1_LPABLE);
+
/* Read the link partner's base page advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
if (val < 0)
return val;
- mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ mii_adv_mod_linkmode_adv_t(phydev->lp_advertising, val);
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3745220c5c98..5938c5acf3b3 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -891,9 +891,6 @@ void phy_state_machine(struct work_struct *work)
old_state = phydev->state;
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
-
switch (phydev->state) {
case PHY_DOWN:
case PHY_READY:
@@ -940,10 +937,13 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);
- if (old_state != phydev->state)
+ if (old_state != phydev->state) {
phydev_dbg(phydev, "PHY state change %s -> %s\n",
phy_state_to_str(old_state),
phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 77068c545de0..f7a6d0ffb1ac 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1723,10 +1723,8 @@ int genphy_update_link(struct phy_device *phydev)
if (status < 0)
return status;
- if ((status & BMSR_LSTATUS) == 0)
- phydev->link = 0;
- else
- phydev->link = 1;
+ phydev->link = status & BMSR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
return 0;
}
@@ -1743,19 +1741,21 @@ EXPORT_SYMBOL(genphy_update_link);
*/
int genphy_read_status(struct phy_device *phydev)
{
- int adv;
- int err;
- int lpa;
- int lpagb = 0;
+ int adv, lpa, lpagb, err;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
linkmode_zero(phydev->lp_advertising);
- if (AUTONEG_ENABLE == phydev->autoneg) {
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
phydev->supported) ||
linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
@@ -1785,14 +1785,8 @@ int genphy_read_status(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
phy_resolve_aneg_linkmode(phydev);
- } else {
+ } else if (phydev->autoneg == AUTONEG_DISABLE) {
int bmcr = phy_read(phydev, MII_BMCR);
if (bmcr < 0)
@@ -1809,9 +1803,6 @@ int genphy_read_status(struct phy_device *phydev)
phydev->speed = SPEED_100;
else
phydev->speed = SPEED_10;
-
- phydev->pause = 0;
- phydev->asym_pause = 0;
}
return 0;
@@ -1887,6 +1878,54 @@ int genphy_config_init(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_init);
+/**
+ * genphy_read_abilities - read PHY abilities from Clause 22 registers
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates
+ * phydev->supported accordingly.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_read_abilities(struct phy_device *phydev)
+{
+ int val;
+
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ val = phy_read(phydev, MII_BMSR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported,
+ val & BMSR_ANEGCAPABLE);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, phydev->supported,
+ val & BMSR_100FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, phydev->supported,
+ val & BMSR_100HALF);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, phydev->supported,
+ val & BMSR_10FULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, phydev->supported,
+ val & BMSR_10HALF);
+
+ if (val & BMSR_ESTATEN) {
+ val = phy_read(phydev, MII_ESTATUS);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ phydev->supported, val & ESTATUS_1000_TFULL);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ phydev->supported, val & ESTATUS_1000_THALF);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(genphy_read_abilities);
+
/* This is used for the phy device which doesn't support the MMD extended
* register access, but it does have side effect when we are trying to access
* the MMD register via indirect method.
@@ -2104,12 +2143,17 @@ static int phy_probe(struct device *dev)
*/
if (phydrv->features) {
linkmode_copy(phydev->supported, phydrv->features);
- } else {
+ } else if (phydrv->get_features) {
err = phydrv->get_features(phydev);
- if (err)
- goto out;
+ } else if (phydev->is_c45) {
+ err = genphy_c45_pma_read_abilities(phydev);
+ } else {
+ err = genphy_read_abilities(phydev);
}
+ if (err)
+ goto out;
+
of_set_phy_supported(phydev);
linkmode_copy(phydev->advertising, phydev->supported);
@@ -2177,11 +2221,11 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
int retval;
/* Either the features are hard coded, or dynamically
- * determine. It cannot be both or neither
+ * determined. It cannot be both.
*/
- if (WARN_ON((!new_driver->features && !new_driver->get_features) ||
- (new_driver->features && new_driver->get_features))) {
- pr_err("%s: Driver features are missing\n", new_driver->name);
+ if (WARN_ON(new_driver->features && new_driver->get_features)) {
+ pr_err("%s: features and get_features must not both be set\n",
+ new_driver->name);
return -EINVAL;
}
@@ -2243,8 +2287,7 @@ static struct phy_driver genphy_driver = {
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
.soft_reset = genphy_no_soft_reset,
- .config_init = genphy_config_init,
- .features = PHY_GBIT_ALL_PORTS_FEATURES,
+ .get_features = genphy_read_abilities,
.aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 10df52ccddfe..d6a10f323117 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -151,21 +151,14 @@ static int rtl8211_config_aneg(struct phy_device *phydev)
static int rtl8211c_config_init(struct phy_device *phydev)
{
/* RTL8211C has an issue when operating in Gigabit slave mode */
- phy_set_bits(phydev, MII_CTRL1000,
- CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
-
- return genphy_config_init(phydev);
+ return phy_set_bits(phydev, MII_CTRL1000,
+ CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
}
static int rtl8211f_config_init(struct phy_device *phydev)
{
- int ret;
u16 val = 0;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
/* enable TX-delay for rgmii-id and rgmii-txid, otherwise disable it */
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -192,10 +185,6 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
{
int ret;
- ret = genphy_config_init(phydev);
- if (ret < 0)
- return ret;
-
ret = phy_set_bits(phydev, RTL8366RB_POWER_SAVE,
RTL8366RB_POWER_SAVE_ON);
if (ret) {
@@ -210,11 +199,9 @@ static struct phy_driver realtek_drvs[] = {
{
PHY_ID_MATCH_EXACT(0x00008201),
.name = "RTL8201CP Ethernet",
- .features = PHY_BASIC_FEATURES,
}, {
PHY_ID_MATCH_EXACT(0x001cc816),
.name = "RTL8201F Fast Ethernet",
- .features = PHY_BASIC_FEATURES,
.ack_interrupt = &rtl8201_ack_interrupt,
.config_intr = &rtl8201_config_intr,
.suspend = genphy_suspend,
@@ -224,14 +211,12 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc910),
.name = "RTL8211 Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_aneg = rtl8211_config_aneg,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x001cc912),
.name = "RTL8211B Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211b_config_intr,
.read_mmd = &genphy_read_mmd_unsupported,
@@ -241,14 +226,12 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc913),
.name = "RTL8211C Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = rtl8211c_config_init,
.read_mmd = &genphy_read_mmd_unsupported,
.write_mmd = &genphy_write_mmd_unsupported,
}, {
PHY_ID_MATCH_EXACT(0x001cc914),
.name = "RTL8211DN Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = rtl821x_ack_interrupt,
.config_intr = rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -256,7 +239,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc915),
.name = "RTL8211E Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.ack_interrupt = &rtl821x_ack_interrupt,
.config_intr = &rtl8211e_config_intr,
.suspend = genphy_suspend,
@@ -264,7 +246,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc916),
.name = "RTL8211F Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8211f_config_init,
.ack_interrupt = &rtl8211f_ack_interrupt,
.config_intr = &rtl8211f_config_intr,
@@ -275,8 +256,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc800),
.name = "Generic Realtek PHY",
- .features = PHY_GBIT_FEATURES,
- .config_init = genphy_config_init,
.suspend = genphy_suspend,
.resume = genphy_resume,
.read_page = rtl821x_read_page,
@@ -284,7 +263,6 @@ static struct phy_driver realtek_drvs[] = {
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
- .features = PHY_GBIT_FEATURES,
.config_init = &rtl8366rb_config_init,
/* These interrupts are handled by the irq controller
* embedded inside the RTL8366RB, they get unmasked when the
diff --git a/drivers/net/phy/rockchip.c b/drivers/net/phy/rockchip.c
index 95abf7072f32..9053b1d01906 100644
--- a/drivers/net/phy/rockchip.c
+++ b/drivers/net/phy/rockchip.c
@@ -104,41 +104,14 @@ static int rockchip_integrated_phy_config_init(struct phy_device *phydev)
static void rockchip_link_change_notify(struct phy_device *phydev)
{
- int speed = SPEED_10;
-
- if (phydev->autoneg == AUTONEG_ENABLE) {
- int reg = phy_read(phydev, MII_SPECIAL_CONTROL_STATUS);
-
- if (reg < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", reg);
- return;
- }
-
- if (reg & MII_SPEED_100)
- speed = SPEED_100;
- else if (reg & MII_SPEED_10)
- speed = SPEED_10;
- } else {
- int bmcr = phy_read(phydev, MII_BMCR);
-
- if (bmcr < 0) {
- phydev_err(phydev, "phy_read err: %d.\n", bmcr);
- return;
- }
-
- if (bmcr & BMCR_SPEED100)
- speed = SPEED_100;
- else
- speed = SPEED_10;
- }
-
/*
* If mode switch happens from 10BT to 100BT, all DSP/AFE
* registers are set to default values. So any AFE/DSP
* registers have to be re-initialized in this case.
*/
- if ((phydev->speed == SPEED_10) && (speed == SPEED_100)) {
+ if (phydev->state == PHY_RUNNING && phydev->speed == SPEED_100) {
int ret = rockchip_integrated_phy_analog_init(phydev);
+
if (ret)
phydev_err(phydev, "rockchip_integrated_phy_analog_init err: %d.\n",
ret);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6ed96fdfd96d..a622ec33453a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -38,13 +38,11 @@
* Helpers
**********/
-#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
-
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
{
struct team_port *port = rtnl_dereference(dev->rx_handler_data);
- return team_port_exists(dev) ? port : NULL;
+ return netif_is_team_port(dev) ? port : NULL;
}
/*
@@ -1143,7 +1141,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return -EINVAL;
}
- if (team_port_exists(port_dev)) {
+ if (netif_is_team_port(port_dev)) {
NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
netdev_err(dev, "Device %s is already a port "
"of a team device\n", portname);
@@ -1691,8 +1689,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
/*
* This helper function exists to help dev_pick_tx get the correct
@@ -2726,24 +2723,20 @@ static const struct genl_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.doit = team_nl_cmd_noop,
- .policy = team_nl_policy,
},
{
.cmd = TEAM_CMD_OPTIONS_SET,
.doit = team_nl_cmd_options_set,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_OPTIONS_GET,
.doit = team_nl_cmd_options_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
{
.cmd = TEAM_CMD_PORT_LIST_GET,
.doit = team_nl_cmd_port_list_get,
- .policy = team_nl_policy,
.flags = GENL_ADMIN_PERM,
},
};
@@ -2756,6 +2749,7 @@ static struct genl_family team_nl_family __ro_after_init = {
.name = TEAM_GENL_NAME,
.version = TEAM_GENL_VERSION,
.maxattr = TEAM_ATTR_MAX,
+ .policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = team_nl_ops,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e9ca1c088d0b..24d0220b9ba0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -606,8 +606,7 @@ static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
}
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct tun_struct *tun = netdev_priv(dev);
u16 ret;
@@ -1043,7 +1042,7 @@ static int tun_net_close(struct net_device *dev)
static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
{
#ifdef CONFIG_RPS
- if (tun->numqueues == 1 && static_key_false(&rps_needed)) {
+ if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -2873,8 +2872,7 @@ err_free_dev:
return err;
}
-static void tun_get_iff(struct net *net, struct tun_struct *tun,
- struct ifreq *ifr)
+static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
@@ -3103,10 +3101,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
+ net = dev_net(tun->dev);
ret = 0;
switch (cmd) {
case TUNGETIFF:
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
if (tfile->detached)
ifr.ifr_flags |= IFF_DETACH_QUEUE;
@@ -3328,6 +3327,13 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = tun_net_change_carrier(tun->dev, (bool)carrier);
break;
+ case TUNGETDEVNETNS:
+ ret = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ goto unlock;
+ ret = open_related_ns(&net->ns, get_net_ns);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -3457,7 +3463,7 @@ static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
rtnl_lock();
tun = tun_get(tfile);
if (tun)
- tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ tun_get_iff(tun, &ifr);
rtnl_unlock();
if (tun)
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 0362acd5cdca..28321aca48fe 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -23,6 +23,7 @@
#include <linux/usb/cdc_ncm.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/ipv6_stubs.h>
/* alternative VLAN for IP session 0 if not untagged */
#define MBIM_IPS0_VID 4094
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7eb38ea9ba56..ba246fc475ae 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1568,7 +1568,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
- bool kick = !skb->xmit_more;
+ bool kick = !netdev_xmit_more();
bool use_napi = sq->napi.weight;
/* Free up any pending old buffers before queueing new ones. */
@@ -1925,7 +1925,7 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
+static void virtnet_clean_affinity(struct virtnet_info *vi)
{
int i;
@@ -1949,7 +1949,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
int stride;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
return;
}
@@ -1999,7 +1999,7 @@ static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
node);
- virtnet_clean_affinity(vi, cpu);
+ virtnet_clean_affinity(vi);
return 0;
}
@@ -2735,7 +2735,7 @@ static void virtnet_del_vqs(struct virtnet_info *vi)
{
struct virtio_device *vdev = vi->vdev;
- virtnet_clean_affinity(vi, -1);
+ virtnet_clean_affinity(vi);
vdev->config->del_vqs(vdev);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index d76dfed8d9bb..5994d5415a03 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -20,6 +20,7 @@
#include <linux/ethtool.h>
#include <net/arp.h>
#include <net/ndisc.h>
+#include <net/ipv6_stubs.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/rtnetlink.h>
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0838af04d681..4cc7b222859c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3620,35 +3620,29 @@ done:
static const struct genl_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
- .policy = hwsim_genl_policy,
.doit = hwsim_register_received_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_FRAME,
- .policy = hwsim_genl_policy,
.doit = hwsim_cloned_frame_received_nl,
},
{
.cmd = HWSIM_CMD_TX_INFO_FRAME,
- .policy = hwsim_genl_policy,
.doit = hwsim_tx_info_frame_received_nl,
},
{
.cmd = HWSIM_CMD_NEW_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_new_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_DEL_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_del_radio_nl,
.flags = GENL_UNS_ADMIN_PERM,
},
{
.cmd = HWSIM_CMD_GET_RADIO,
- .policy = hwsim_genl_policy,
.doit = hwsim_get_radio_nl,
.dumpit = hwsim_dump_radio_nl,
},
@@ -3658,6 +3652,7 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.name = "MAC80211_HWSIM",
.version = 1,
.maxattr = HWSIM_ATTR_MAX,
+ .policy = hwsim_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
.ops = hwsim_ops,
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 20cee5c397fb..f6da8edab7f1 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -1282,8 +1282,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 6da12518e693..783198844dd7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -148,8 +148,7 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
}
static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
@@ -162,7 +161,8 @@ static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
- return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
+ return netdev_pick_tx(dev, skb, NULL) %
+ dev->real_num_tx_queues;
xenvif_set_skb_hash(vif, skb);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c914c24f880b..80c30321de41 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -543,8 +543,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
}
static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
- struct net_device *sb_dev,
- select_queue_fallback_t fallback)
+ struct net_device *sb_dev)
{
unsigned int num_queues = dev->real_num_tx_queues;
u32 hash;