aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c159
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c40
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c349
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c105
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h80
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c472
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c17
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c62
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c135
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
83 files changed, 1654 insertions, 1236 deletions
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36da059388dc..8cc651d37a7f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -384,7 +384,7 @@ enum cb_status {
cb_ok = 0x2000,
};
-/**
+/*
* cb_command - Command Block flags
* @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
*/
@@ -1531,7 +1531,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
- if (!in_interrupt() && (err = e100_self_test(nic)))
+ if ((err = e100_self_test(nic)))
return err;
if ((err = e100_phy_init(nic)))
@@ -2155,7 +2155,7 @@ static int e100_rx_alloc_list(struct nic *nic)
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
- if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
+ if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL)))
return -ENOMEM;
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
@@ -2593,7 +2593,7 @@ static void e100_diag_test(struct net_device *netdev,
{
struct ethtool_cmd cmd;
struct nic *nic = netdev_priv(netdev);
- int i, err;
+ int i;
memset(data, 0, E100_TEST_LEN * sizeof(u64));
data[0] = !mii_link_ok(&nic->mii);
@@ -2601,7 +2601,7 @@ static void e100_diag_test(struct net_device *netdev,
if (test->flags & ETH_TEST_FL_OFFLINE) {
/* save speed, duplex & autoneg settings */
- err = mii_ethtool_gset(&nic->mii, &cmd);
+ mii_ethtool_gset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_down(nic);
@@ -2610,7 +2610,7 @@ static void e100_diag_test(struct net_device *netdev,
data[4] = e100_loopback_test(nic, lb_phy);
/* restore speed, duplex & autoneg settings */
- err = mii_ethtool_sset(&nic->mii, &cmd);
+ mii_ethtool_sset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_up(nic);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4e7a0810eaeb..4c0c9433bd60 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -129,7 +129,6 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
*/
static void e1000_phy_init_script(struct e1000_hw *hw)
{
- u32 ret_val;
u16 phy_saved_data;
if (hw->phy_init_script) {
@@ -138,7 +137,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
/* Save off the current value of register 0x2F5B to be restored
* at the end of this routine.
*/
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
@@ -377,7 +376,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
{
u32 ctrl;
u32 ctrl_ext;
- u32 icr;
u32 manc;
u32 led_ctrl;
s32 ret_val;
@@ -502,7 +500,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr = er32(ICR);
+ er32(ICR);
/* If MWI was previously enabled, reenable it. */
if (hw->mac_type == e1000_82542_rev2_0) {
@@ -1897,7 +1895,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
/**
* e1000_config_mac_to_phy - sync phy and mac settings
* @hw: Struct containing variables accessed by shared code
- * @mii_reg: data to write to the MII control register
*
* Sets MAC speed and duplex settings to reflect the those in the PHY
* The contents of the PHY register containing the needed information need to
@@ -2370,16 +2367,13 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
*/
s32 e1000_check_for_link(struct e1000_hw *hw)
{
- u32 rxcw = 0;
- u32 ctrl;
u32 status;
u32 rctl;
u32 icr;
- u32 signal = 0;
s32 ret_val;
u16 phy_data;
- ctrl = er32(CTRL);
+ er32(CTRL);
status = er32(STATUS);
/* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
@@ -2388,12 +2382,9 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
if ((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) {
- rxcw = er32(RXCW);
+ er32(RXCW);
if (hw->media_type == e1000_media_type_fiber) {
- signal =
- (hw->mac_type >
- e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
if (status & E1000_STATUS_LU)
hw->get_link_status = false;
}
@@ -2922,7 +2913,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
- * @data: data to write to the PHY
+ * @phy_data: data to write to the PHY
*
* Writes a value to a PHY register
*/
@@ -4410,17 +4401,9 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
static void e1000_clear_vfta(struct e1000_hw *hw)
{
u32 offset;
- u32 vfta_value = 0;
- u32 vfta_offset = 0;
- u32 vfta_bit_in_reg = 0;
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /* If the offset we want to clear is the same offset of the
- * manageability VLAN ID, then clear all bits except that of the
- * manageability unit
- */
- vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
E1000_WRITE_FLUSH();
}
}
@@ -4675,78 +4658,76 @@ s32 e1000_led_off(struct e1000_hw *hw)
*/
static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
- volatile u32 temp;
-
- temp = er32(CRCERRS);
- temp = er32(SYMERRS);
- temp = er32(MPC);
- temp = er32(SCC);
- temp = er32(ECOL);
- temp = er32(MCC);
- temp = er32(LATECOL);
- temp = er32(COLC);
- temp = er32(DC);
- temp = er32(SEC);
- temp = er32(RLEC);
- temp = er32(XONRXC);
- temp = er32(XONTXC);
- temp = er32(XOFFRXC);
- temp = er32(XOFFTXC);
- temp = er32(FCRUC);
-
- temp = er32(PRC64);
- temp = er32(PRC127);
- temp = er32(PRC255);
- temp = er32(PRC511);
- temp = er32(PRC1023);
- temp = er32(PRC1522);
-
- temp = er32(GPRC);
- temp = er32(BPRC);
- temp = er32(MPRC);
- temp = er32(GPTC);
- temp = er32(GORCL);
- temp = er32(GORCH);
- temp = er32(GOTCL);
- temp = er32(GOTCH);
- temp = er32(RNBC);
- temp = er32(RUC);
- temp = er32(RFC);
- temp = er32(ROC);
- temp = er32(RJC);
- temp = er32(TORL);
- temp = er32(TORH);
- temp = er32(TOTL);
- temp = er32(TOTH);
- temp = er32(TPR);
- temp = er32(TPT);
-
- temp = er32(PTC64);
- temp = er32(PTC127);
- temp = er32(PTC255);
- temp = er32(PTC511);
- temp = er32(PTC1023);
- temp = er32(PTC1522);
-
- temp = er32(MPTC);
- temp = er32(BPTC);
+ er32(CRCERRS);
+ er32(SYMERRS);
+ er32(MPC);
+ er32(SCC);
+ er32(ECOL);
+ er32(MCC);
+ er32(LATECOL);
+ er32(COLC);
+ er32(DC);
+ er32(SEC);
+ er32(RLEC);
+ er32(XONRXC);
+ er32(XONTXC);
+ er32(XOFFRXC);
+ er32(XOFFTXC);
+ er32(FCRUC);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+
+ er32(GPRC);
+ er32(BPRC);
+ er32(MPRC);
+ er32(GPTC);
+ er32(GORCL);
+ er32(GORCH);
+ er32(GOTCL);
+ er32(GOTCH);
+ er32(RNBC);
+ er32(RUC);
+ er32(RFC);
+ er32(ROC);
+ er32(RJC);
+ er32(TORL);
+ er32(TORH);
+ er32(TOTL);
+ er32(TOTH);
+ er32(TPR);
+ er32(TPT);
+
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(MPTC);
+ er32(BPTC);
if (hw->mac_type < e1000_82543)
return;
- temp = er32(ALGNERRC);
- temp = er32(RXERRC);
- temp = er32(TNCRS);
- temp = er32(CEXTERR);
- temp = er32(TSCTC);
- temp = er32(TSCTFC);
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
if (hw->mac_type <= e1000_82544)
return;
- temp = er32(MGTPRC);
- temp = er32(MGTPDC);
- temp = er32(MGTPTC);
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
}
/**
@@ -4778,8 +4759,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
/**
* e1000_update_adaptive - update adaptive IFS
* @hw: Struct containing variables accessed by shared code
- * @tx_packets: Number of transmits since last callback
- * @total_collisions: Number of collisions since last callback
*
* Called during the callback/watchdog routine to update IFS value based on
* the ratio of transmits to collisions.
@@ -5064,8 +5043,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
/**
* e1000_check_downshift - Check if Downshift occurred
* @hw: Struct containing variables accessed by shared code
- * @downshift: output parameter : 0 - No Downshift occurred.
- * 1 - Downshift occurred.
*
* returns: - E1000_ERR_XXX
* E1000_SUCCESS
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1e6ec081fd9d..5e28cf4fa2cd 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -199,8 +199,10 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
- * e1000_get_hw_dev - return device
- * used by hardware layer to print debugging information
+ * e1000_get_hw_dev - helper function for getting netdev
+ * @hw: pointer to HW struct
+ *
+ * return device used by hardware layer to print debugging information
*
**/
struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
@@ -354,7 +356,7 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
/**
* e1000_configure - configure the hardware for RX and TX
- * @adapter = private board structure
+ * @adapter: private board structure
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
@@ -534,7 +536,6 @@ void e1000_down(struct e1000_adapter *adapter)
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -3489,8 +3490,9 @@ exit:
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3787,7 +3789,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
/**
* e1000_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct containing references to driver info
+ * @budget: budget given to driver for receive packets
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
@@ -3818,6 +3821,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ * @tx_ring: ring to clean
**/
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
@@ -3933,7 +3937,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
@@ -3970,6 +3974,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
/**
* e1000_consume_page - helper function for jumbo Rx path
+ * @bi: software descriptor shadow data
+ * @skb: skb being modified
+ * @length: length of data being added
**/
static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
u16 length)
@@ -4003,6 +4010,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
+ * @stats: point to stats struct
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
@@ -4548,6 +4556,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
+ * @rx_ring: pointer to ring struct
+ * @cleaned_count: number of new Rx buffers to try to allocate
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@@ -4662,7 +4672,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
- * @adapter:
+ * @adapter: address of board private structure
**/
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
@@ -4718,10 +4728,10 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
}
/**
- * e1000_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * e1000_ioctl - handle ioctl calls
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -4737,9 +4747,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* e1000_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4b103cca8a39..be9c695dde12 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1072,7 +1072,6 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
/**
* e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
* @hw: pointer to the HW structure
- * @duplex: current duplex setting
*
* Configure the KMRN interface by applying last minute quirks for
* 10/100 operation.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8fc9208382c..03215b0aee4b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -895,6 +895,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
mask |= BIT(18);
break;
default:
@@ -1560,6 +1561,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b1447221669e..69a2329ea463 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -102,6 +102,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
+#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
+#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
+#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
#define E1000_REVISION_4 4
@@ -127,6 +131,7 @@ enum e1000_mac_type {
e1000_pch_cnp,
e1000_pch_tgp,
e1000_pch_adp,
+ e1000_pch_mtp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b2f2fcfdf732..9aa6fad8ed47 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -320,6 +320,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -464,6 +465,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -708,6 +710,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -743,7 +746,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/**
* __e1000_access_emi_reg_locked - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
*
@@ -1648,6 +1651,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2102,6 +2106,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -2266,7 +2271,7 @@ release:
/**
* e1000_configure_k1_ich8lan - Configure K1 power state
* @hw: pointer to the HW structure
- * @enable: K1 state to configure
+ * @k1_enable: K1 state to configure
*
* Configure the K1 power state based on the provided parameter.
* Assumes semaphore already acquired.
@@ -2405,8 +2410,10 @@ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
}
/**
- * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_hv_phy_workarounds_ich8lan - apply PHY workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -2694,8 +2701,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
}
/**
- * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_lv_phy_workarounds_ich8lan - apply ich8 specific workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -3141,6 +3150,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4086,6 +4096,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 664e8ccc88d2..b30f00891c03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -501,6 +501,7 @@ rx_ring_summary:
/**
* e1000_desc_unused - calculate if we have unused descriptors
+ * @ring: pointer to ring struct to perform calculation on
**/
static int e1000_desc_unused(struct e1000_ring *ring)
{
@@ -577,6 +578,7 @@ static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
* @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -601,8 +603,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
- * @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct sk_buff *skb)
@@ -673,6 +674,8 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -741,6 +744,8 @@ map_skb:
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -844,6 +849,7 @@ no_buffers:
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
* @rx_ring: Rx descriptor ring
* @cleaned_count: number of buffers to allocate this pass
+ * @gfp: flags for allocation
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
@@ -933,6 +939,8 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
/**
* e1000_clean_rx_irq - Send received data up the network stack
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1327,6 +1335,8 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1517,9 +1527,6 @@ next_desc:
return cleaned;
}
-/**
- * e1000_consume_page - helper function
- **/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
u16 length)
{
@@ -1531,7 +1538,9 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1994,6 +2003,7 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
/**
* e1000_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
*
* e1000_configure_msix sets up the hardware to properly
* generate MSI-X interrupts.
@@ -2072,6 +2082,7 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2127,6 +2138,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
*
* e1000_request_msix allocates MSI-X vectors and requests interrupts from the
* kernel.
@@ -2180,6 +2192,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
/**
* e1000_request_irq - initialize interrupts
+ * @adapter: board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2240,6 +2253,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
@@ -2262,6 +2276,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
/**
* e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
@@ -2332,6 +2347,8 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
/**
* e1000_alloc_ring_dma - allocate memory for a ring structure
+ * @adapter: board private structure
+ * @ring: ring struct for which to allocate dma
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2507,7 +2524,6 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
/**
* e1000_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
* @itr_setting: current adapter->itr
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
@@ -3049,12 +3065,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
}
}
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+
/**
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -3570,6 +3587,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3605,6 +3623,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
/**
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
+ * @config: timestamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3808,6 +3827,7 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
/**
* e1000_power_down_phy - Power down the PHY
+ * @adapter: board private structure
*
* Power down the PHY so no link is implied when interface is down.
* The PHY cannot be powered down if management or WoL is active.
@@ -3820,6 +3840,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
/**
* e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ * @adapter: board private structure
*
* We want to clear all pending descriptors from the TX ring.
* zeroing happens when the HW reads the regs. We assign the ring itself as
@@ -3854,6 +3875,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ * @adapter: board private structure
*
* Mark all descriptors in the RX ring as consumed and disable the rx ring
*/
@@ -3886,6 +3908,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ * @adapter: board private structure
*
* In i219, the descriptor rings must be emptied before resetting the HW
* or before changing the device state to D3 during runtime (runtime PM).
@@ -3968,6 +3991,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
/**
* e1000e_reset - bring the hardware into a known good state
+ * @adapter: board private structure
*
* This function boots the hardware and enables some settings that
* require a configuration cycle of the hardware - those cannot be
@@ -4081,6 +4105,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4847,7 +4872,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
/**
* e1000_update_phy_info - timre call-back to update PHY info
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
*
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
@@ -5187,7 +5212,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
/**
* e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
**/
static void e1000_watchdog(struct timer_list *t)
{
@@ -5972,8 +5997,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of the hung queue (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6174,7 +6200,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_ioctl - control hardware time stamping
* @netdev: network interface device structure
- * @ifreq: interface request
+ * @ifr: interface request
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -7853,6 +7879,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index e11c877595fb..bdd9dc163f15 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2311,6 +2311,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
/**
* e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
* @page: page to access
+ * @reg: register to check
*
* Returns the phy address for the page requested.
**/
@@ -2728,6 +2729,7 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
* @offset: register offset to be read
* @data: pointer to the read data
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
* and stores the retrieved information in data. Release any acquired
@@ -2836,6 +2838,7 @@ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
* @offset: register offset to write to
* @data: data to write at register offset
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 34b988d70488..f3f671311855 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -144,7 +144,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
/**
* e1000e_phc_getsynctime - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
- * @cts: structure containing timestamp
+ * @xtstamp: structure containing timestamp
*
* Read device and system (ART) clock simultaneously and return the scaled
* clock values in ns.
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index d88dd41a9442..99b8252eb969 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 140212bfe08b..9e3103fae723 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -221,8 +221,6 @@ static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
- WARN_ON(in_interrupt());
-
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a7e212d1caa2..537300e762f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -35,6 +35,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
@@ -90,7 +91,7 @@
#define I40E_OEM_RELEASE_MASK 0x0000ffff
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -133,7 +134,6 @@ enum i40e_state_t {
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
__I40E_MACVLAN_SYNC_PENDING,
- __I40E_UDP_FILTER_SYNC_PENDING,
__I40E_TEMP_LINK_POLLING,
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
@@ -478,8 +478,8 @@ struct i40e_pf {
struct list_head l3_flex_pit_list;
struct list_head l4_flex_pit_list;
- struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
- u16 pending_udp_bitmap;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c897a2863e4f..593912b17609 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -541,6 +541,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+
fallthrough;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index edec3df78971..ee394aacef4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -85,8 +85,8 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c0c8efe42fce..1e960c3c7ef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -24,6 +24,8 @@
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+/* API version 1.10 for X722 devices adds ability to request FEC encoding */
+#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index befd3018183f..a2dba32383f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -278,8 +278,6 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
- * @client: pointer to a client struct in the client list.
- * @existing: if there was already an existing instance
*
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6ab52cbd697a..adc9e4fa4789 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3766,9 +3766,7 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
- * @buff: buffer for result
* @persist: True if start of LLDP should be persistent across power cycles
- * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -5395,6 +5393,7 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
@@ -5439,6 +5438,7 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d3ad2e3aa838..d7c13ca9be7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -604,10 +604,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
- rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.hdr_addr);
}
}
} else if (cnt == 3) {
@@ -625,10 +624,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 825c104ecba1..26ba1f3eb2d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -891,6 +891,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
break;
case I40E_PHY_TYPE_SGMII:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -1481,12 +1482,16 @@ static int i40e_set_fec_param(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 fec_cfg = 0;
- int err = 0;
if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
- hw->device_id != I40E_DEV_ID_25G_B) {
- err = -EPERM;
- goto done;
+ hw->device_id != I40E_DEV_ID_25G_B &&
+ hw->device_id != I40E_DEV_ID_KX_X722)
+ return -EPERM;
+
+ if (hw->mac.type == I40E_MAC_X722 &&
+ !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
+ return -EOPNOTSUPP;
}
switch (fecparam->fec) {
@@ -1508,14 +1513,10 @@ static int i40e_set_fec_param(struct net_device *netdev,
default:
dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
fecparam->fec);
- err = -EINVAL;
- goto done;
+ return -EINVAL;
}
- err = i40e_set_fec_cfg(netdev, fec_cfg);
-
-done:
- return err;
+ return i40e_set_fec_cfg(netdev, fec_cfg);
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -1967,7 +1968,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
@@ -4951,8 +4952,7 @@ flags_complete:
}
}
- if (((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ if (changed_flags & I40E_FLAG_RS_FEC &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -4960,6 +4960,15 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B &&
+ pf->hw.device_id != I40E_DEV_ID_KX_X722) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
* changed in the code above.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e433fdbf2c3..4f8a2154b93f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -287,6 +287,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
/**
* i40e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number timing out
*
* If any port has noticed a Tx timeout, it is likely that the whole
* device is munged, not just the one netdev port, so go for the full
@@ -1609,6 +1610,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
+ * @lut: pointer to lookup table of lut_size
+ * @lut_size: size of the lookup table
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
@@ -3122,12 +3125,12 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
* @ring: The Tx or Rx ring
*
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
**/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
{
bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
int qid = ring->queue_index;
@@ -3138,7 +3141,7 @@ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -3157,7 +3160,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
if (ring_is_xdp(ring))
- ring->xsk_umem = i40e_xsk_umem(ring);
+ ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -3280,12 +3283,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
kfree(ring->rx_bi);
- ring->xsk_umem = i40e_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
ret = i40e_alloc_rx_bi_zc(ring);
if (ret)
return ret;
- ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -3320,8 +3324,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- /* use 32 byte descriptors */
- rx_ctx.dsize = 1;
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
/* descriptor type is always zero
* rx_ctx.dtype = 0;
@@ -3368,8 +3372,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
@@ -3380,7 +3384,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
*/
dev_info(&vsi->back->pdev->dev,
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
ring->queue_index, pf_q);
}
@@ -5814,7 +5818,6 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
/**
* i40e_channel_setup_queue_map - Setup a channel queue map
* @pf: ptr to PF device
- * @vsi: the VSI being setup
* @ctxt: VSI context structure
* @ch: ptr to channel structure
*
@@ -6057,8 +6060,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
/**
* i40e_setup_channel - setup new channel using uplink element
* @pf: ptr to PF device
- * @type: type of channel to be created (VMDq2/VF)
- * @uplink_seid: underlying HW switching element (VEB) ID
+ * @vsi: pointer to the VSI to set up the channel within
* @ch: ptr to channel structure
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
@@ -6623,6 +6625,25 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
+ } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
+ req_fec = "None";
+ fec = "None";
+ an = "False";
+
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = "True";
+
+ if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = "CL74 FC-FEC/BASE-R";
+
+ if (pf->hw.phy.link_info.req_fec_info &
+ I40E_AQ_REQUEST_FEC_KR)
+ req_fec = "CL74 FC-FEC/BASE-R";
+
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
} else {
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
@@ -6689,7 +6710,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- WARN_ON(in_interrupt());
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
@@ -7779,7 +7799,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct flow_cls_offload
+ * @f: Pointer to struct flow_cls_offload
* @filter: Pointer to cloud filter structure
*
**/
@@ -8160,8 +8180,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
/**
* i40e_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @np: net device to configure
+ * @cls_flower: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
struct flow_cls_offload *cls_flower)
@@ -8462,9 +8482,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
- WARN_ON(in_interrupt());
-
-
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9585,6 +9602,7 @@ end_reconstitute:
/**
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
+ * @list_type: AQ capability to be queried
**/
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
@@ -10383,106 +10401,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(u8 type)
-{
- switch (type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- return "vxlan";
- case UDP_TUNNEL_TYPE_GENEVE:
- return "geneve";
- default:
- return "unknown";
- }
-}
-
-/**
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
-{
- int i;
-
- /* loop through and set pending bit for all active UDP filters */
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].port)
- pf->pending_udp_bitmap |= BIT_ULL(i);
- }
-
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-}
-
-/**
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 filter_index, type;
- u16 port;
- int i;
-
- if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
- return;
-
- /* acquire RTNL to maintain state of flags and port requests */
- rtnl_lock();
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
- struct i40e_udp_port_config *udp_port;
- i40e_status ret = 0;
-
- udp_port = &pf->udp_ports[i];
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
-
- port = READ_ONCE(udp_port->port);
- type = READ_ONCE(udp_port->type);
- filter_index = READ_ONCE(udp_port->filter_index);
-
- /* release RTNL while we wait on AQ command */
- rtnl_unlock();
-
- if (port)
- ret = i40e_aq_add_udp_tunnel(hw, port,
- type,
- &filter_index,
- NULL);
- else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
- ret = i40e_aq_del_udp_tunnel(hw, filter_index,
- NULL);
-
- /* reacquire RTNL so we can update filter_index */
- rtnl_lock();
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(type),
- port ? "add" : "delete",
- port,
- filter_index,
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- if (port) {
- /* failed to add, just reset port,
- * drop pending bit for any deletion
- */
- udp_port->port = 0;
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
- }
- } else if (port) {
- /* record filter index on success */
- udp_port->filter_index = filter_index;
- }
- }
- }
-
- rtnl_unlock();
-}
-
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -10522,7 +10440,6 @@ static void i40e_service_task(struct work_struct *work)
pf->vsi[pf->lan_vsi]);
}
i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
} else {
i40e_reset_subtask(pf);
}
@@ -10546,7 +10463,7 @@ static void i40e_service_task(struct work_struct *work)
/**
* i40e_service_timer - timer callback
- * @data: pointer to PF struct
+ * @t: timer list pointer
**/
static void i40e_service_timer(struct timer_list *t)
{
@@ -11185,11 +11102,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -11222,7 +11138,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int err, v_idx, num_q_vectors, current_cpu;
+ int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -11232,15 +11148,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- current_cpu = cpumask_first(cpu_online_mask);
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
- if (unlikely(current_cpu >= nr_cpu_ids))
- current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -12228,131 +12139,48 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-/**
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
- * @pf: board private structure
- * @port: The UDP port to look up
- *
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
- **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
-{
- u8 i;
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- /* Do not report ports with pending deletions as
- * being available.
- */
- if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
- continue;
- if (pf->udp_ports[i].port == port)
- return i;
- }
-
- return i;
-}
-
-/**
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 next_idx;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n", port);
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u8 type, filter_index;
+ i40e_status ret;
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- port);
- return;
- }
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ I40E_AQC_TUNNEL_TYPE_NGE;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
- return;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
- break;
- default:
- return;
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].port = port;
- pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
+ return 0;
}
-/**
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
- goto not_found;
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ i40e_status ret;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
- goto not_found;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
- goto not_found;
- break;
- default:
- goto not_found;
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+ netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].port = 0;
-
- /* Toggle pending bit instead of setting it. This way if we are
- * deleting a port that has yet to be added we just clear the pending
- * bit and don't have to worry about it.
- */
- pf->pending_udp_bitmap ^= BIT_ULL(idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-
- return;
-not_found:
- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- port);
+ return 0;
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -12379,6 +12207,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -12644,7 +12473,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->xdp_rings[i]->xsk_umem)
+ if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
@@ -12923,8 +12752,8 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -12957,8 +12786,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
- .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -13022,6 +12851,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
+
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= hw_enc_features;
@@ -14422,7 +14253,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
/* repopulate tunnel port filters */
- i40e_sync_udp_filters(pf);
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
return ret;
}
@@ -15151,6 +14982,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_switch_setup;
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+ UDP_TUNNEL_TYPE_GENEVE;
+
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
* the other PFs. We allocate space for more than the guarantee with
@@ -15160,6 +14999,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
else
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ff7b19c6bc73..7a879614ca55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -259,7 +259,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @pf: The PF private data structure
- * @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 424f02077e2e..b5b12299931f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* i40e_trace() macro enables shared code to refer to trace points
* like:
*
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3e5c566ceb01..d43ce13a93c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -533,11 +533,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
- struct i40e_32b_rx_wb_qw0 *qw0;
+ struct i40e_16b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
@@ -636,7 +636,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */
@@ -1335,7 +1335,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -1369,7 +1369,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
i40e_clear_rx_bi_zc(rx_ring);
else
i40e_clear_rx_bi(rx_ring);
@@ -1418,7 +1418,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1755,7 +1755,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -1953,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- prefetchw(rx_buffer->page);
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1992,10 +1991,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2075,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
+
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -2300,6 +2295,19 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -2579,7 +2587,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
@@ -2607,7 +2615,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
i40e_clean_rx_irq(ring, budget_per_ring);
@@ -3503,7 +3511,7 @@ dma_error:
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
- * @xdp: data to transmit
+ * @xdpf: data to transmit
* @xdp_ring: XDP Tx ring
**/
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
@@ -3698,7 +3706,9 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* i40e_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
- * @xdp: XDP buffer
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
*
* Returns number of frames successfully sent. Frames that fail are
* free'ed via XDP return API.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4036893d6825..2feed920ef8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -110,7 +110,7 @@ enum i40e_dyn_idx_t {
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define i40e_rx_desc i40e_16byte_rx_desc
#define I40E_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -388,7 +388,7 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -482,7 +482,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 667c4dc4b39f..19da3b22160f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -21,9 +21,9 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
-/**
+/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
- **/
+ */
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
@@ -37,7 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
/**
* i40e_update_tx_stats - Update the egress statistics for the Tx ring
* @tx_ring: Tx ring to update
- * @total_packet: total packets sent
+ * @total_packets: total packets sent
* @total_bytes: total bytes sent
**/
static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
@@ -99,19 +99,6 @@ static inline bool i40e_rx_is_programming_status(u64 qword1)
return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 52410d609ba1..c0bdc666f557 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -595,6 +595,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
+#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
u64 flags;
/* Used in set switch config AQ command */
@@ -628,7 +629,7 @@ union i40e_16byte_rx_desc {
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
- struct {
+ struct i40e_16b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -647,6 +648,9 @@ union i40e_16byte_rx_desc {
__le64 status_error_len;
} qword1;
} wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
};
union i40e_32byte_rx_desc {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 47bfb2e95e2d..c96e2f2d4cba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2244,7 +2244,8 @@ error_param:
}
/**
- * i40e_validate_queue_map
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
* @vsi_id: vsi id
* @queuemap: Tx or Rx queue map
*
@@ -3160,8 +3161,8 @@ err:
/**
* i40e_validate_cloud_filter
- * @mask: mask for TC filter
- * @data: data for TC filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
*
* This function validates cloud filter programmed as TC filter for ADq
**/
@@ -3294,7 +3295,7 @@ err:
/**
* i40e_find_vsi_from_seid - searches for the vsi with the given seid
* @vf: pointer to the VF info
- * @seid - seid of the vsi it is searching for
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 8ce57b507a21..6acede0acdca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -29,14 +29,16 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
* @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
@@ -53,7 +55,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -80,21 +82,22 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
* @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
int err;
- umem = xdp_get_umem_from_qid(netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -106,7 +109,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -118,20 +121,21 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
*
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
- i40e_xsk_umem_disable(vsi, qid);
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
}
/**
@@ -191,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
- xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
@@ -254,6 +258,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+}
+
+/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -265,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
- bool failure = false;
struct sk_buff *skb;
+ bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -274,13 +290,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- !i40e_alloc_rx_buffers_zc(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -310,7 +319,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi);
+ xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
@@ -355,14 +364,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -385,11 +397,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -416,7 +428,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
@@ -448,7 +460,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
struct i40e_tx_buffer *tx_bi;
@@ -488,13 +500,13 @@ skip:
tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -526,7 +538,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
if (queue_id >= vsi->num_queue_pairs)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -565,7 +577,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -585,14 +597,15 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
}
/**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
* @vsi: vsi
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
@@ -600,7 +613,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (xdp_get_umem_from_qid(netdev, i))
+ if (xsk_get_pool_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index c524c142127f..7adfd8539247 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -5,12 +5,12 @@
#define _I40E_XSK_H_
struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index baf2fe26f302..1f60518eb0e5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -85,8 +85,8 @@ struct iavf_adminq_info {
/**
* iavf_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cf539db79af9..95543dfd4fe7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -147,6 +147,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
/**
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that is timing out
**/
static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -2572,8 +2573,8 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
}
/**
- * iavf_del_all_cloud_filters - delete all cloud filters
- * on the traffic classes
+ * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
+ * @adapter: board private structure
**/
static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
{
@@ -2592,7 +2593,7 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function processes the config information provided by the
* user to configure traffic classes/queue channels and packages the
@@ -2690,7 +2691,7 @@ exit:
/**
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
* @adapter: board private structure
- * @cls_flower: pointer to struct flow_cls_offload
+ * @f: pointer to struct flow_cls_offload
* @filter: pointer to cloud filter structure
*/
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
@@ -3064,8 +3065,8 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @adapter: board private structure
+ * @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
@@ -3112,7 +3113,7 @@ static LIST_HEAD(iavf_block_cb_list);
* iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type: type of offload
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function is the callback to ndo_setup_tc in the
* netdev_ops.
@@ -3768,8 +3769,7 @@ err_dma:
/**
* iavf_suspend - Power management suspend routine
- * @pdev: PCI device information struct
- * @state: unused
+ * @dev_d: device info pointer
*
* Called when the system (VM) is entering sleep/suspend.
**/
@@ -3799,7 +3799,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
/**
* iavf_resume - Power management resume routine
- * @pdev: PCI device information struct
+ * @dev_d: device info pointer
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 1058e68a02b4..82fda6f5abf0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* iavf_trace() macro enables shared code to refer to trace points
* like:
*
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ca041b39ffda..256fa07d54d5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1309,10 +1309,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -1376,10 +1373,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
+
/* build an skb around the page buffer */
skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index dd3348f9da9d..e5b9ba42dd00 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -454,7 +454,6 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
/**
* iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
@@ -514,6 +513,7 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
return count != IAVF_MAX_BUFFER_TXD;
}
/**
+ * txring_txq - helper to convert from a ring to a queue
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fe140ff38f74..a0723831c4e4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -284,6 +284,10 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+ bool devlink_port_registered;
+
u16 max_frame;
u16 rx_buf_len;
@@ -321,9 +325,9 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ struct xsk_buff_pool **xsk_pools;
+ u16 num_xsk_pools_used;
+ u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -375,9 +379,6 @@ enum ice_pf_flags {
struct ice_pf {
struct pci_dev *pdev;
- /* devlink port data */
- struct devlink_port devlink_port;
-
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
@@ -507,25 +508,25 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
}
/**
- * ice_xsk_umem - get XDP UMEM bound to a ring
- * @ring - ring to use
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: ring to use
*
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xdp_umem **umems = ring->vsi->xsk_umems;
+ struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return umems[qid];
+ return pools[qid];
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ba9375218fef..b06fbe99d8e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1422,7 +1422,7 @@ struct ice_aqc_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/**
+/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
@@ -1826,8 +1826,8 @@ struct ice_aqc_event_lan_overflow {
* @opcode: AQ command opcode
* @datalen: length in bytes of indirect/external data buffer
* @retval: return value from firmware
- * @cookie_h: opaque data high-half
- * @cookie_l: opaque data low-half
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 87008476d8fe..fe4320e2d1f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
- ring->xsk_umem = ice_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
- xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
- dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ if (ring->xsk_pool) {
+ if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
@@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
- dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 111d6bfe4222..511da59bd6f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,18 +6,14 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
- dsn[0], dsn[1], dsn[2], dsn[3],
- dsn[4], dsn[5], dsn[6], dsn[7]);
-
- return 0;
+ snprintf(buf, len, "%8phD", dsn);
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
@@ -106,6 +102,13 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+{
+ snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+
+ return 0;
+}
+
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
@@ -150,6 +153,7 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
};
@@ -180,11 +184,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
return err;
}
- err = ice_info_get_dsn(pf, buf, sizeof(buf));
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
- return err;
- }
+ ice_info_get_dsn(pf, buf, sizeof(buf));
err = devlink_info_serial_number_put(req, buf);
if (err) {
@@ -233,8 +233,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
/**
* ice_devlink_flash_update - Update firmware stored in flash on the device
* @devlink: pointer to devlink associated with device to update
- * @path: the path of the firmware file to use via request_firmware
- * @component: name of the component to update, or NULL
+ * @params: flash update parameters
* @extack: netlink extended ACK structure
*
* Perform a device flash update. The bulk of the update logic is contained
@@ -243,38 +242,52 @@ static int ice_devlink_info_get(struct devlink *devlink,
* Returns: zero on success, or an error code on failure.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
const struct firmware *fw;
+ u8 preservation;
int err;
- /* individual component update is not yet supported */
- if (component)
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
+ }
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
- err = ice_check_for_pending_update(pf, component, extack);
+ err = ice_check_for_pending_update(pf, NULL, extack);
if (err)
return err;
- err = request_firmware(&fw, path, dev);
+ err = request_firmware(&fw, params->file_name, dev);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
return err;
}
+ dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
+
devlink_flash_update_begin_notify(devlink);
- devlink_flash_update_status_notify(devlink, "Preparing to flash",
- component, 0, 0);
- err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, preservation, extack);
devlink_flash_update_end_notify(devlink);
release_firmware(fw);
@@ -283,6 +296,7 @@ ice_devlink_flash_update(struct devlink *devlink, const char *path,
}
static const struct devlink_ops ice_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -352,55 +366,66 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
- * ice_devlink_create_port - Create a devlink port for this PF
- * @pf: the PF to create a port for
+ * ice_devlink_create_port - Create a devlink port for this VSI
+ * @vsi: the VSI to create a port for
*
- * Create and register a devlink_port for this PF. Note that although each
- * physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function ID.
+ * Create and register a devlink_port for this VSI.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_pf *pf)
+int ice_devlink_create_port(struct ice_vsi *vsi)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct device *dev = ice_pf_to_dev(pf);
struct devlink_port_attrs attrs = {};
+ struct ice_port_info *pi;
+ struct devlink *devlink;
+ struct device *dev;
+ struct ice_pf *pf;
int err;
- if (!vsi) {
- dev_err(dev, "%s: unable to find main VSI\n", __func__);
- return -EIO;
- }
+ /* Currently we only create devlink_port instances for PF VSIs */
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ devlink = priv_to_devlink(pf);
+ dev = ice_pf_to_dev(pf);
+ pi = pf->hw.port_info;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.pf_id;
- devlink_port_attrs_set(&pf->devlink_port, &attrs);
- err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ attrs.phys.port_number = pi->lport;
+ devlink_port_attrs_set(&vsi->devlink_port, &attrs);
+ err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
+ vsi->devlink_port_registered = true;
+
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
+ * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
+ * @vsi: the VSI to cleanup
*
- * Unregisters the devlink_port structure associated with this PF.
+ * Unregisters the devlink_port structure associated with this VSI.
*/
-void ice_devlink_destroy_port(struct ice_pf *pf)
+void ice_devlink_destroy_port(struct ice_vsi *vsi)
{
- devlink_port_type_clear(&pf->devlink_port);
- devlink_port_unregister(&pf->devlink_port);
+ if (!vsi->devlink_port_registered)
+ return;
+
+ devlink_port_type_clear(&vsi->devlink_port);
+ devlink_port_unregister(&vsi->devlink_port);
+
+ vsi->devlink_port_registered = false;
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -413,6 +438,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
@@ -456,6 +482,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -468,6 +495,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6e806a08dc23..e07e74426bde 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,8 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_pf *pf);
-void ice_devlink_destroy_port(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_vsi *vsi);
+void ice_devlink_destroy_port(struct ice_vsi *vsi);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d7430ce6af26..2d27f66ac853 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,8 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
- &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1647,8 +1646,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
- 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 6834df14332f..59c0c6a0f8c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -556,7 +556,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index b17ae3e20157..9095b4d274ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -489,8 +489,6 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].in_use = false;
- hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
@@ -505,8 +503,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
+ if (hw->tnl.tbl[i].boost_entry) {
hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
}
}
@@ -1626,104 +1627,59 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
}
/**
- * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
+ * @port: returns open port
*/
-static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
{
+ bool res = false;
u16 i;
+ mutex_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
- if (index)
- *index = i;
- return true;
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
}
- return false;
-}
-
-/**
- * ice_tunnel_port_in_use
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
- */
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
-{
- bool res;
-
- mutex_lock(&hw->tnl_lock);
- res = ice_tunnel_port_in_use_hlpr(hw, port, index);
mutex_unlock(&hw->tnl_lock);
return res;
}
/**
- * ice_find_free_tunnel_entry
+ * ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
- * @type: tunnel type
- * @index: optionally returns index
+ * @type: type of tunnel
+ * @idx: linear index
*
- * Returns whether there is a free tunnel entry, and optionally its index
+ * Stack assumes we have 2 linear tables with indexes [0, count_valid),
+ * but really the port table may be sprase, and types are mixed, so convert
+ * the stack index into the device index.
*/
-static bool
-ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *index)
+static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 idx)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
- hw->tnl.tbl[i].type == type) {
- if (index)
- *index = i;
- return true;
- }
+ if (hw->tnl.tbl[i].valid &&
+ hw->tnl.tbl[i].type == type &&
+ idx--)
+ return i;
- return false;
-}
-
-/**
- * ice_get_open_tunnel_port - retrieve an open tunnel port
- * @hw: pointer to the HW structure
- * @type: tunnel type (TNL_ALL will return any open port)
- * @port: returns open port
- */
-bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port)
-{
- bool res = false;
- u16 i;
-
- mutex_lock(&hw->tnl_lock);
-
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
- *port = hw->tnl.tbl[i].port;
- res = true;
- break;
- }
-
- mutex_unlock(&hw->tnl_lock);
-
- return res;
+ WARN_ON_ONCE(1);
+ return 0;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
* @type: type of tunnel
* @port: port of tunnel to create
*
@@ -1731,27 +1687,16 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+static enum ice_status
+ice_create_tunnel(struct ice_hw *hw, u16 index,
+ enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 index;
mutex_lock(&hw->tnl_lock);
- if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
- hw->tnl.tbl[index].ref++;
- status = 0;
- goto ice_create_tunnel_end;
- }
-
- if (!ice_find_free_tunnel_entry(hw, type, &index)) {
- status = ICE_ERR_OUT_OF_RANGE;
- goto ice_create_tunnel_end;
- }
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1790,11 +1735,8 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
- if (!status) {
+ if (!status)
hw->tnl.tbl[index].port = port;
- hw->tnl.tbl[index].in_use = true;
- hw->tnl.tbl[index].ref = 1;
- }
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1808,46 +1750,31 @@ ice_create_tunnel_end:
/**
* ice_destroy_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
+ * @type: type of tunnel
* @port: port of tunnel to destroy (ignored if the all parameter is true)
- * @all: flag that states to destroy all tunnels
*
* Destroys a tunnel or all tunnels by creating an update package buffer
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+static enum ice_status
+ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
+ u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 count = 0;
- u16 index;
- u16 size;
- u16 i;
mutex_lock(&hw->tnl_lock);
- if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
- if (hw->tnl.tbl[index].ref > 1) {
- hw->tnl.tbl[index].ref--;
- status = 0;
- goto ice_destroy_tunnel_end;
- }
-
- /* determine count */
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port))
- count++;
-
- if (!count) {
- status = ICE_ERR_PARAM;
+ if (WARN_ON(!hw->tnl.tbl[index].valid ||
+ hw->tnl.tbl[index].type != type ||
+ hw->tnl.tbl[index].port != port)) {
+ status = ICE_ERR_OUT_OF_RANGE;
goto ice_destroy_tunnel_end;
}
- /* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count);
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1859,13 +1786,13 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
goto ice_destroy_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1873,26 +1800,14 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port)) {
- memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_rx->tcam));
- memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_tx->tcam));
- hw->tnl.tbl[i].marked = true;
- }
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status)
- for (i = 0; i < hw->tnl.count &&
- i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].marked) {
- hw->tnl.tbl[i].ref = 0;
- hw->tnl.tbl[i].port = 0;
- hw->tnl.tbl[i].in_use = false;
- hw->tnl.tbl[i].marked = false;
- }
+ hw->tnl.tbl[index].port = 0;
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1903,6 +1818,52 @@ ice_destroy_tunnel_end:
return status;
}
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+ u16 index;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+ index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+
+ status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
+ return 0;
+}
+
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+
+ status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
+ ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error removing UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* PTG Management */
/**
@@ -4915,7 +4876,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 568ea519af51..20deddb807c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -19,12 +19,11 @@
#define ICE_PKG_CNT 4
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port);
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index c1c99a267a98..24063c1351b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -298,6 +298,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ __TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -311,11 +312,8 @@ struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
- u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
- u8 in_use;
- u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
@@ -323,6 +321,7 @@ struct ice_tunnel_entry {
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
+ u16 valid_count[__TNL_TYPE_CNT];
};
struct ice_pkg_es {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fe677621dd51..eadc85aee389 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -99,6 +99,54 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* UDP Packet types for non-tunneled packets or tunneled
* packets with inner UDP.
*/
@@ -250,11 +298,23 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
+ (const unsigned long *)ice_ipv4_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
(const unsigned long *)ice_ptypes_ipv4_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
+ (const unsigned long *)ice_ipv6_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
(const unsigned long *)ice_ptypes_ipv6_il;
@@ -385,7 +445,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
@@ -999,7 +1059,7 @@ enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 3913da2116d2..829f90b1e998 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -194,8 +194,8 @@ struct ice_flow_entry {
u16 entry_sz;
};
-#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
-#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
struct ice_flow_prof {
struct list_head l_entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8968fdd4816b..8f81b95e679c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -43,6 +43,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
enum ice_status status;
u8 *package_data;
+ dev_dbg(dev, "Sending PLDM record package data to firmware\n");
+
package_data = kmemdup(data, length, GFP_KERNEL);
if (!package_data)
return -ENOMEM;
@@ -229,6 +231,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
comp_tbl->cvs_len = component->version_len;
memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+ dev_dbg(dev, "Sending component table to firmware:\n");
+
status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
transfer_flag, &comp_response,
&comp_response_code, NULL);
@@ -279,11 +283,14 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
memset(&event, 0, sizeof(event));
+ dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
+ block_size, module, offset);
+
status = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (status) {
- dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
- module, offset, ice_stat_str(status),
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
+ module, block_size, offset, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -297,8 +304,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
if (err) {
- dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
- module, err);
+ dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
+ module, block_size, offset, err);
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return -EIO;
}
@@ -324,8 +331,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
- module, offset,
+ dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
+ module, block_size, offset,
ice_aq_str((enum ice_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
@@ -356,12 +363,15 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
const u8 *image, u32 length,
struct netlink_ext_ack *extack)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct devlink *devlink;
u32 offset = 0;
bool last_cmd;
u8 *block;
int err;
+ dev_dbg(dev, "Beginning write of flash component '%s', module 0x%02x\n", component, module);
+
devlink = priv_to_devlink(pf);
devlink_flash_update_status_notify(devlink, "Flashing",
@@ -394,6 +404,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
component, offset, length);
} while (!last_cmd);
+ dev_dbg(dev, "Completed write of flash component '%s', module 0x%02x\n", component, module);
+
if (err)
devlink_flash_update_status_notify(devlink, "Flashing failed",
component, length, length);
@@ -431,6 +443,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
enum ice_status status;
int err;
+ dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
+
memset(&event, 0, sizeof(event));
devlink = priv_to_devlink(pf);
@@ -476,6 +490,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
+ dev_dbg(dev, "Completed erase of flash component '%s', module 0x%02x\n", component, module);
+
out_notify_devlink:
if (err)
devlink_flash_update_status_notify(devlink, "Erasing failed",
@@ -614,14 +630,9 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
- int err;
/* Finally, notify firmware to activate the written NVM banks */
- err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
- if (err)
- return err;
-
- return 0;
+ return ice_switch_flash_banks(pf, priv->activate_flags, extack);
}
static const struct pldmfw_ops ice_fwu_ops = {
@@ -636,6 +647,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
* @pf: private device driver structure
* @fw: firmware object pointing to the relevant firmware file
+ * @preservation: preservation level to request from firmware
* @extack: netlink extended ACK structure
*
* Parse the data for a given firmware file, verifying that it is a valid PLDM
@@ -649,7 +661,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack)
+ u8 preservation, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -657,13 +669,24 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
enum ice_status status;
int err;
+ switch (preservation) {
+ case ICE_AQC_NVM_PRESERVE_ALL:
+ case ICE_AQC_NVM_PRESERVE_SELECTED:
+ case ICE_AQC_NVM_NO_PRESERVATION:
+ case ICE_AQC_NVM_FACTORY_DEFAULT:
+ break;
+ default:
+ WARN(1, "Unexpected preservation level request %u", preservation);
+ return -EINVAL;
+ }
+
memset(&priv, 0, sizeof(priv));
priv.context.ops = &ice_fwu_ops;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
- priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+ priv.activate_flags = preservation;
status = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 79472cc618b4..c6390f6851ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -5,7 +5,7 @@
#define _ICE_FW_UPDATE_H_
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack);
+ u8 preservation, struct netlink_ext_ack *extack);
int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ebbb8f54871c..3df67486d42d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_devlink.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -1755,7 +1756,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
return ret;
for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+ vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -2616,8 +2617,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
unregister_netdev(vsi->netdev);
+ ice_devlink_destroy_port(vsi);
+ }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 54a7f55eb8c1..2dea4d0e9415 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -486,7 +486,6 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- WARN_ON(in_interrupt());
ice_prepare_for_reset(pf);
@@ -1057,7 +1056,9 @@ struct ice_aq_task {
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task *task;
+ unsigned long start;
long ret;
int err;
@@ -1074,6 +1075,8 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
hlist_add_head(&task->entry, &pf->aq_wait_list);
spin_unlock_bh(&pf->aq_wait_lock);
+ start = jiffies;
+
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
timeout);
switch (task->state) {
@@ -1092,6 +1095,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
break;
}
+ dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
+ jiffies_to_msecs(jiffies - start),
+ jiffies_to_msecs(timeout),
+ opcode);
+
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
@@ -2273,7 +2281,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
return 0;
@@ -2417,7 +2425,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset reset bits
+ * rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
*/
@@ -2517,13 +2525,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_umems) {
+ if (!ret && prog && vsi->xsk_pools) {
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2549,8 +2557,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_SETUP_XSK_UMEM:
- return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -2873,6 +2881,7 @@ static void ice_set_ops(struct net_device *netdev)
}
netdev->netdev_ops = &ice_netdev_ops;
+ netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
}
@@ -2953,7 +2962,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
- err = ice_devlink_create_port(pf);
+ err = ice_devlink_create_port(vsi);
if (err)
return err;
@@ -2994,7 +3003,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (err)
goto err_free_netdev;
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -3007,7 +3016,7 @@ err_free_netdev:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
err_destroy_devlink_port:
- ice_devlink_destroy_port(pf);
+ ice_devlink_destroy_port(vsi);
return err;
}
@@ -3971,7 +3980,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
- int err;
+ int i, err;
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
@@ -4066,11 +4075,37 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_devlink_init_regions(pf);
+ pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
+ pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
+ pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
+ i = 0;
+ if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_VXLAN];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_VXLAN;
+ i++;
+ }
+ if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_GENEVE];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_GENEVE;
+ i++;
+ }
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
@@ -4216,7 +4251,6 @@ probe_done:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
- ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -4331,7 +4365,6 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
- ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
@@ -6569,70 +6602,6 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- enum ice_tunnel_type tnl_type;
- u16 port = ntohs(ti->port);
- enum ice_status status;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- tnl_type = TNL_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- tnl_type = TNL_GENEVE;
- break;
- default:
- netdev_err(netdev, "Unknown tunnel type\n");
- return;
- }
-
- status = ice_create_tunnel(&pf->hw, tnl_type, port);
- if (status == ICE_ERR_OUT_OF_RANGE)
- netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
- port);
- else if (status)
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
-}
-
-/**
- * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- enum ice_status status;
- bool retval;
-
- retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
- if (!retval) {
- netdev_info(netdev, "port %d not found in UDP tunnels list\n",
- port);
- return;
- }
-
- status = ice_destroy_tunnel(&pf->hw, port, false);
- if (status)
- netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
- port);
-}
-
-/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -6824,6 +6793,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = ice_udp_tunnel_add,
- .ndo_udp_tunnel_del = ice_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9d0d6b0025cf..eae75260fe20 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -145,7 +145,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -375,7 +375,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
@@ -1616,7 +1610,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ice_clean_tx_irq_zc(ring, budget) :
ice_clean_tx_irq(ring, budget);
@@ -1646,7 +1640,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
+ cleaned = ring->xsk_pool ?
ice_clean_rx_irq_zc(ring, budget_per_ring) :
ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 51b4df7a59d2..ff1a1cbd078e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -43,7 +43,7 @@
/**
* ice_compute_pad - compute the padding
- * rx_buf_len: buffer length
+ * @rx_buf_len: buffer length
*
* Figure out the size of half page based on given buffer length and
* then subtract the skb_shared_info followed by subtraction of the
@@ -295,7 +295,7 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4cdccfadf274..2226a291a394 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -676,6 +676,9 @@ struct ice_hw {
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 71497776ac62..ec7f6c64132e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -871,7 +871,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* If there are not enough resources available, return an error. This should
* always be caught by ice_set_per_vf_res().
*
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 20ac5fca68c6..797886524054 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
@@ -260,21 +260,21 @@ free_buf:
}
/**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
- if (vsi->xsk_umems)
+ if (vsi->xsk_pools)
return 0;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
+ if (!vsi->xsk_pools) {
+ vsi->num_xsk_pools = 0;
return -ENOMEM;
}
@@ -282,73 +282,73 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
*/
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
+ vsi->xsk_pools[qid] = NULL;
+ vsi->num_xsk_pools_used--;
- if (vsi->num_xsk_umems_used == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
+ if (vsi->num_xsk_pools_used == 0) {
+ kfree(vsi->xsk_pools);
+ vsi->xsk_pools = NULL;
+ vsi->num_xsk_pools = 0;
}
}
/**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+ !vsi->xsk_pools[qid])
return -EINVAL;
- xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_umem(vsi, qid);
+ xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
+ ice_xsk_remove_pool(vsi, qid);
return 0;
}
/**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
* @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_umems)
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_umems)
+ if (!vsi->num_xsk_pools)
+ vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_pools)
return -EINVAL;
- err = ice_xsk_alloc_umems(vsi);
+ err = ice_xsk_alloc_pools(vsi);
if (err)
return err;
- if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
+ vsi->xsk_pools[qid] = pool;
+ vsi->num_xsk_pools_used++;
- err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -357,17 +357,17 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
}
/**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
- bool if_running, umem_present = !!umem;
- int ret = 0, umem_failure = 0;
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
@@ -375,26 +375,26 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
- goto xsk_umem_if_up;
+ goto xsk_pool_if_up;
}
}
- umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
- ice_xsk_umem_disable(vsi, qid);
+ pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+ ice_xsk_pool_disable(vsi, qid);
-xsk_umem_if_up:
+xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
- if (!ret && umem_present)
+ if (!ret && pool_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
- if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
- umem_present ? "en" : "dis", umem_failure);
- return umem_failure;
+ if (pool_failure) {
+ netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+ pool_present ? "en" : "dis", pool_failure);
+ return pool_failure;
}
return ret;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];
do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
@@ -595,7 +595,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
}
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -814,7 +814,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (queue_id >= vsi->num_txq)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -833,20 +833,20 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
}
/**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
* @vsi: VSI to be checked
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_umems)
+ if (!vsi->xsk_pools)
return false;
- for (i = 0; i < vsi->num_xsk_umems; i++) {
- if (vsi->xsk_umems[i])
+ for (i = 0; i < vsi->num_xsk_pools; i++) {
+ if (vsi->xsk_pools[i])
return true;
}
@@ -854,7 +854,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
}
/**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
@@ -872,7 +872,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fc1a06b4df36..fad783690134 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -9,7 +9,8 @@
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
@@ -19,8 +20,8 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
- struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+ struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a32391e82762..50863fd87d53 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2554,7 +2554,7 @@ out:
/**
* __igb_access_emi_reg - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
**/
@@ -2590,7 +2590,7 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
@@ -2646,7 +2646,7 @@ out:
* igb_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c393cb2c0f16..9265901455cd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -357,13 +357,14 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
/**
* igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
- * @words: number of words to read
+ * @offset: offset to read from
+ * @words: number of words to read (unused)
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
- u16 words __always_unused, u16 *data)
+ u16 __always_unused words, u16 *data)
{
s32 ret_val = 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 3254737c07a3..fd8eb2f9ab9d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -166,6 +166,7 @@ static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
* @vlan: VLAN id to add or remove
* @vind: VMDq output index that maps queue to VLAN id
* @vlan_on: if true add filter, if false remove
+ * @vlvf_bypass: skip VLVF if no match is found
*
* Sets or clears a bit in the VLAN filter table array based on VLAN id
* and if we are adding or removing the filter
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 46debd991bfe..33cceb77e960 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -9,6 +9,7 @@
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
+ * @unlock: skip locking or not
*
* returns SUCCESS if it successfully read message from buffer
**/
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2f015b60a995..0286d2fceee4 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -19,6 +19,8 @@
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <net/xdp.h>
+
struct igb_adapter;
#define E1000_PCS_CFG_IGN_SD 1
@@ -79,6 +81,12 @@ struct igb_adapter;
#define IGB_I210_RX_LATENCY_100 2213
#define IGB_I210_RX_LATENCY_1000 448
+/* XDP */
+#define IGB_XDP_PASS 0
+#define IGB_XDP_CONSUMED BIT(0)
+#define IGB_XDP_TX BIT(1)
+#define IGB_XDP_REDIR BIT(2)
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -132,17 +140,62 @@ struct vf_mac_filter {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_1536 1536
#define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_TS_HDR_LEN 16
-#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IGB_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IGB_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
+
+static inline int igb_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int igb_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IGB_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IGB_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return igb_compute_pad(rx_buf_len);
+}
+
+#define IGB_SKB_PAD igb_skb_pad()
#else
-#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -194,13 +247,22 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+enum igb_tx_buf_type {
+ IGB_TYPE_SKB = 0,
+ IGB_TYPE_XDP,
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ enum igb_tx_buf_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
@@ -248,6 +310,7 @@ struct igb_ring_container {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
+ struct bpf_prog *xdp_prog;
struct device *dev; /* device pointer for dma mapping */
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
@@ -288,6 +351,7 @@ struct igb_ring {
struct u64_stats_sync rx_syncp;
};
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -339,7 +403,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
return IGB_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
- return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+ return IGB_MAX_FRAME_BUILD_SKB;
#endif
return IGB_RXBUFFER_2048;
}
@@ -467,6 +531,7 @@ struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
unsigned long state;
unsigned int flags;
@@ -643,6 +708,9 @@ enum igb_boards {
extern char igb_driver_name[];
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring,
+ struct xdp_frame *xdpf);
int igb_open(struct net_device *netdev);
int igb_close(struct net_device *netdev);
int igb_up(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 6e8231c1ddf0..28baf203459a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -961,6 +961,10 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d9c3a6b169f9..5fc2c381da55 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -30,6 +30,8 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
@@ -549,8 +551,7 @@ exit:
/**
* igb_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
+ * @data: opaque pointer to adapter struct
*
* Returns the I2C data bit value
**/
@@ -2220,7 +2221,6 @@ void igb_down(struct igb_adapter *adapter)
void igb_reinit_locked(struct igb_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igb_down(adapter);
@@ -2824,6 +2824,147 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct igb_adapter *adapter = netdev_priv(dev);
+ bool running = netif_running(dev);
+ struct bpf_prog *old_prog;
+ bool need_reset;
+
+ /* verify igb ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (frame_size > igb_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
+
+ /* device is up and bpf is added/removed, must setup the RX queues */
+ if (need_reset && running) {
+ igb_close(dev);
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* bpf is just replaced, RXQ and MTU are already setup */
+ if (!need_reset)
+ return 0;
+
+ if (running)
+ igb_open(dev);
+
+ return 0;
+}
+
+static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return igb_xdp_setup(dev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ u32 ret;
+
+ if (unlikely(!xdpf))
+ return IGB_XDP_CONSUMED;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int igb_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ int drops = 0;
+ int i;
+
+ if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ if (err != IGB_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ __netif_tx_unlock(nq);
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return n - drops;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2848,6 +2989,8 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = igb_features_check,
.ndo_setup_tc = igb_setup_tc,
+ .ndo_bpf = igb_xdp,
+ .ndo_xdp_xmit = igb_xdp_xmit,
};
/**
@@ -3388,7 +3531,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- if ((hw->mac.type >= e1000_i210 ||
+ if ((hw->mac.type == e1000_82576 &&
+ rd32(E1000_EECD) & E1000_EECD_PRES) ||
+ (hw->mac.type >= e1000_i210 ||
igb_get_flash_presence_i210(hw))) {
ret_val = igb_read_part_string(hw, part_str,
E1000_PBANUM_LENGTH);
@@ -3868,6 +4013,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/**
* igb_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: indicates whether we are in a resume call
*
* Returns 0 on success, negative value on failure
*
@@ -3985,6 +4131,7 @@ int igb_open(struct net_device *netdev)
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: indicates we are in a suspend call
*
* Returns 0, this is not allowed to fail
*
@@ -4178,6 +4325,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
+ struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
int size;
@@ -4200,6 +4348,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
return 0;
err:
@@ -4504,6 +4659,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 rxdctl = 0;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4708,6 +4867,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
{
igb_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -5219,7 +5380,7 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
/**
* igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void igb_watchdog(struct timer_list *t)
{
@@ -6077,6 +6238,80 @@ dma_error:
return -1;
}
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ union e1000_adv_tx_desc *tx_desc;
+ u32 len, cmd_type, olinfo_status;
+ struct igb_tx_buffer *tx_buffer;
+ dma_addr_t dma;
+ u16 i;
+
+ len = xdpf->len;
+
+ if (unlikely(!igb_desc_unused(tx_ring)))
+ return IGB_XDP_CONSUMED;
+
+ dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return IGB_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = tx_ring->next_to_use;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->type = IGB_TYPE_XDP;
+ tx_buffer->xdpf = xdpf;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+
+ /* set the timestamp */
+ tx_buffer->time_stamp = jiffies;
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ tx_ring->next_to_use = i;
+
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return IGB_XDP_TX;
+}
+
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
@@ -6105,6 +6340,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGB_TYPE_SKB;
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
@@ -6192,8 +6428,9 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
/**
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6256,6 +6493,19 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (adapter->xdp_prog) {
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (max_frame > igb_rx_bufsz(ring)) {
+ netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
+ }
+ }
+ }
+
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -7809,7 +8059,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7993,8 +8246,8 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -8033,23 +8286,21 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
@@ -8057,24 +8308,24 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
+ xdp->data += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
}
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGB_RX_HDR_LEN)
- headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
- (va + headlen) - page_address(rx_buffer->page),
+ (xdp->data + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
@@ -8090,32 +8341,29 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGB_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGB_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
@@ -8133,6 +8381,79 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
+static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IGB_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ result = IGB_XDP_REDIR;
+ else
+ result = IGB_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
+static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -8187,7 +8508,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
* igb_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -8229,6 +8549,10 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8287,6 +8611,11 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size)
{
@@ -8330,10 +8659,20 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+#endif
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8360,13 +8699,38 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_buffer = igb_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data -
+ igb_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
+#endif
+ skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ igb_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_packets++;
+ total_bytes += size;
+ } else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ &xdp, rx_desc);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -8406,6 +8770,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
+ if (xdp_xmit & IGB_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ igb_xdp_ring_update_tail(tx_ring);
+ }
+
u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
@@ -8419,11 +8792,6 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
-}
-
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -8460,14 +8828,16 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = igb_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * igb_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: rx descriptor ring to allocate new receive buffers
+ * @cleaned_count: count of buffers to allocate
**/
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
@@ -8536,9 +8906,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/**
* igb_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -8566,9 +8936,9 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* igb_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 490368d3d03c..7cc5428c3b3d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,8 +957,8 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
/**
* igb_ptp_get_ts_config - get hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -1141,8 +1141,8 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
/**
* igb_ptp_set_ts_config - set hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
**/
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 19269f5d52bc..ee9f8c1dca83 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -61,7 +61,7 @@ static const struct igbvf_info *igbvf_info_tbl[] = {
/**
* igbvf_desc_unused - calculate if we have unused descriptors
- * @rx_ring: address of receive ring structure
+ * @ring: address of receive ring structure
**/
static int igbvf_desc_unused(struct igbvf_ring *ring)
{
@@ -74,6 +74,8 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
/**
* igbvf_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
+ * @skb: skb to indicate to stack
* @status: descriptor status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -233,6 +235,8 @@ no_buffers:
/**
* igbvf_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
+ * @work_done: output parameter used to indicate completed work
+ * @work_to_do: input parameter setting limit of work
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -406,6 +410,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
/**
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
+ * @tx_ring: ring being initialized
*
* Return 0 on success, negative on failure
**/
@@ -444,6 +449,7 @@ err:
/**
* igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure
+ * @rx_ring: ring being initialized
*
* Returns 0 on success, negative on failure
**/
@@ -540,7 +546,7 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
/**
* igbvf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
+ * @rx_ring: ring structure pointer to free buffers from
**/
static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
{
@@ -760,7 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @tx_ring: ring structure to clean descriptors from
*
* returns true if ring is completely cleaned
**/
@@ -1891,7 +1897,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
/**
* igbvf_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer list pointer containing private struct
**/
static void igbvf_watchdog(struct timer_list *t)
{
@@ -2372,8 +2378,9 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
/**
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue timing out (unused)
**/
-static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 2d566f3c827b..35baae900c1f 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -215,6 +215,8 @@ struct igc_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
+ ktime_t ptp_reset_start; /* Reset time in clock mono */
};
void igc_up(struct igc_adapter *adapter);
@@ -548,6 +550,7 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index cc5a6cf531c7..fd37d2c203af 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -215,6 +215,11 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I225_LMVP:
case IGC_DEV_ID_I225_IT:
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_IT:
+ case IGC_DEV_ID_I221_V:
+ case IGC_DEV_ID_I226_BLANK_NVM:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f1f464967f87..32f5fd684139 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -324,22 +324,10 @@
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
-#define IGC_RXDEXT_STATERR_CE 0x01000000
-#define IGC_RXDEXT_STATERR_SE 0x02000000
-#define IGC_RXDEXT_STATERR_SEQ 0x04000000
-#define IGC_RXDEXT_STATERR_CXE 0x10000000
-#define IGC_RXDEXT_STATERR_TCPE 0x20000000
+#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
-/* Same mask, but for extended and packet split descriptors */
-#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
- IGC_RXDEXT_STATERR_CE | \
- IGC_RXDEXT_STATERR_SE | \
- IGC_RXDEXT_STATERR_SEQ | \
- IGC_RXDEXT_STATERR_CXE | \
- IGC_RXDEXT_STATERR_RXE)
-
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
@@ -409,7 +397,7 @@
#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
/* Time Sync Transmit Control bit definitions */
-#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 44410c2265d6..61d331ce38cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -321,6 +321,9 @@ static void igc_ethtool_get_regs(struct net_device *netdev,
for (i = 0; i < 8; i++)
regs_buff[205 + i] = rd32(IGC_ETQF(i));
+
+ regs_buff[213] = adapter->stats.tlpic;
+ regs_buff[214] = adapter->stats.rlpic;
}
static void igc_ethtool_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index b9fe51b91c47..55dae7c4703f 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -24,6 +24,11 @@
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I225_IT 0x0D9F
+#define IGC_DEV_ID_I226_LM 0x125B
+#define IGC_DEV_ID_I226_V 0x125C
+#define IGC_DEV_ID_I226_IT 0x125D
+#define IGC_DEV_ID_I221_V 0x125E
+#define IGC_DEV_ID_I226_BLANK_NVM 0x125F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
@@ -125,9 +130,6 @@ struct igc_nvm_info {
struct igc_nvm_operations ops;
enum igc_nvm_type type;
- u32 flash_bank_size;
- u32 flash_base_addr;
-
u16 word_size;
u16 delay_usec;
u16 address_bits;
@@ -153,7 +155,6 @@ struct igc_phy_info {
u8 mdix;
bool is_mdix;
- bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
@@ -239,6 +240,8 @@ struct igc_hw_stats {
u64 prc511;
u64 prc1023;
u64 prc1522;
+ u64 tlpic;
+ u64 rlpic;
u64 gprc;
u64 bprc;
u64 mprc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9593aa4eea36..9112dff075cf 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -47,6 +47,11 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -1428,7 +1433,7 @@ static void igc_rx_checksum(struct igc_ring *ring,
/* TCP/UDP checksum error bit is set */
if (igc_test_staterr(rx_desc,
- IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_L4E |
IGC_RXDEXT_STATERR_IPE)) {
/* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
@@ -1550,10 +1555,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGC_SKB_PAD, truesize);
@@ -1589,10 +1591,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
@@ -1743,8 +1742,7 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (unlikely((igc_test_staterr(rx_desc,
- IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
if (!(netdev->features & NETIF_F_RXALL)) {
@@ -3685,6 +3683,8 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc511 += rd32(IGC_PRC511);
adapter->stats.prc1023 += rd32(IGC_PRC1023);
adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.tlpic += rd32(IGC_TLPIC);
+ adapter->stats.rlpic += rd32(IGC_RLPIC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
@@ -3778,6 +3778,8 @@ void igc_down(struct igc_adapter *adapter)
set_bit(__IGC_DOWN, &adapter->state);
+ igc_ptp_suspend(adapter);
+
/* disable receives in the hardware */
rctl = rd32(IGC_RCTL);
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
@@ -3831,7 +3833,6 @@ void igc_down(struct igc_adapter *adapter)
void igc_reinit_locked(struct igc_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igc_down(adapter);
@@ -4659,7 +4660,7 @@ int igc_close(struct net_device *netdev)
/**
* igc_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
* @cmd: ioctl command
**/
static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -4700,14 +4701,35 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
return 0;
}
-static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
+{
+ struct timespec64 b;
+
+ b = ktime_to_timespec64(base_time);
+
+ return timespec64_compare(now, &b) > 0;
+}
+
+static bool validate_schedule(struct igc_adapter *adapter,
+ const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct timespec64 now;
size_t n;
if (qopt->cycle_time_extension)
return false;
+ igc_ptp_read(adapter, &now);
+
+ /* If we program the controller's BASET registers with a time
+ * in the future, it will hold all the packets until that
+ * time, causing a lot of TX Hangs, so to avoid that, we
+ * reject schedules that would start in the future.
+ */
+ if (!is_base_time_past(qopt->base_time, &now))
+ return false;
+
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
@@ -4762,7 +4784,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (adapter->base_time)
return -EALREADY;
- if (!validate_schedule(qopt))
+ if (!validate_schedule(adapter, qopt))
return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 6a9b5102aa55..ac0b9c85da7c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
+#include <linux/ktime.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -16,17 +17,12 @@
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
/* SYSTIM read access for I225 */
-static void igc_ptp_read_i225(struct igc_adapter *adapter,
- struct timespec64 *ts)
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
struct igc_hw *hw = &adapter->hw;
u32 sec, nsec;
- /* The timestamp latches on lowest register read. For I210/I211, the
- * lowest register is SYSTIMR. Since we only need to provide nanosecond
- * resolution, we can ignore it.
- */
- rd32(IGC_SYSTIMR);
+ /* The timestamp is latched when SYSTIML is read. */
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
@@ -39,9 +35,6 @@ static void igc_ptp_write_i225(struct igc_adapter *adapter,
{
struct igc_hw *hw = &adapter->hw;
- /* Writing the SYSTIMR register is not necessary as it only
- * provides sub-nanosecond resolution.
- */
wr32(IGC_SYSTIML, ts->tv_nsec);
wr32(IGC_SYSTIMH, ts->tv_sec);
}
@@ -81,7 +74,7 @@ static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&igc->tmreg_lock, flags);
- igc_ptp_read_i225(igc, &now);
+ igc_ptp_read(igc, &now);
now = timespec64_add(now, then);
igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
@@ -102,10 +95,9 @@ static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
spin_lock_irqsave(&igc->tmreg_lock, flags);
ptp_read_system_prets(sts);
- rd32(IGC_SYSTIMR);
- ptp_read_system_postts(sts);
ts->tv_nsec = rd32(IGC_SYSTIML);
ts->tv_sec = rd32(IGC_SYSTIMH);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
@@ -422,24 +414,17 @@ static void igc_ptp_tx_work(struct work_struct *work)
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
- if (time_is_before_jiffies(adapter->ptp_tx_start +
- IGC_PTP_TX_TIMEOUT)) {
- igc_ptp_tx_timeout(adapter);
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
return;
- }
- tsynctxctl = rd32(IGC_TSYNCTXCTL);
- if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
- igc_ptp_tx_hwtstamp(adapter);
- else
- /* reschedule to check later */
- schedule_work(&adapter->ptp_tx_work);
+ igc_ptp_tx_hwtstamp(adapter);
}
/**
* igc_ptp_set_ts_config - set hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
**/
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
@@ -466,7 +451,7 @@ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
/**
* igc_ptp_get_ts_config - get hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -515,6 +500,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real());
+ adapter->ptp_reset_start = ktime_get();
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -526,6 +514,24 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
}
+static void igc_ptp_time_save(struct igc_adapter *adapter)
+{
+ igc_ptp_read(adapter, &adapter->prev_ptp_time);
+ adapter->ptp_reset_start = ktime_get();
+}
+
+static void igc_ptp_time_restore(struct igc_adapter *adapter)
+{
+ struct timespec64 ts = adapter->prev_ptp_time;
+ ktime_t delta;
+
+ delta = ktime_sub(ktime_get(), adapter->ptp_reset_start);
+
+ timespec64_add_ns(&ts, ktime_to_ns(delta));
+
+ igc_ptp_write_i225(adapter, &ts);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -542,6 +548,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ igc_ptp_time_save(adapter);
}
/**
@@ -591,9 +599,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* Re-initialize the timer. */
if (hw->mac.type == igc_i225) {
- struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
-
- igc_ptp_write_i225(adapter, &ts64);
+ igc_ptp_time_restore(adapter);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index cbaa933ef30d..a430871d1c27 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -98,7 +98,6 @@ bool
ixgb_adapter_stop(struct ixgb_hw *hw)
{
u32 ctrl_reg;
- u32 icr_reg;
ENTER();
@@ -142,7 +141,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr_reg = IXGB_READ_REG(hw, ICR);
+ IXGB_READ_REG(hw, ICR);
return ctrl_reg & IXGB_CTRL0_RST;
}
@@ -274,7 +273,6 @@ bool
ixgb_init_hw(struct ixgb_hw *hw)
{
u32 i;
- u32 ctrl_reg;
bool status;
ENTER();
@@ -286,7 +284,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
*/
pr_debug("Issuing a global reset to MAC\n");
- ctrl_reg = ixgb_mac_reset(hw);
+ ixgb_mac_reset(hw);
pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
@@ -949,8 +947,6 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
static void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
- volatile u32 temp_reg;
-
ENTER();
/* if we are stopped or resetting exit gracefully */
@@ -959,66 +955,66 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
return;
}
- temp_reg = IXGB_READ_REG(hw, TPRL);
- temp_reg = IXGB_READ_REG(hw, TPRH);
- temp_reg = IXGB_READ_REG(hw, GPRCL);
- temp_reg = IXGB_READ_REG(hw, GPRCH);
- temp_reg = IXGB_READ_REG(hw, BPRCL);
- temp_reg = IXGB_READ_REG(hw, BPRCH);
- temp_reg = IXGB_READ_REG(hw, MPRCL);
- temp_reg = IXGB_READ_REG(hw, MPRCH);
- temp_reg = IXGB_READ_REG(hw, UPRCL);
- temp_reg = IXGB_READ_REG(hw, UPRCH);
- temp_reg = IXGB_READ_REG(hw, VPRCL);
- temp_reg = IXGB_READ_REG(hw, VPRCH);
- temp_reg = IXGB_READ_REG(hw, JPRCL);
- temp_reg = IXGB_READ_REG(hw, JPRCH);
- temp_reg = IXGB_READ_REG(hw, GORCL);
- temp_reg = IXGB_READ_REG(hw, GORCH);
- temp_reg = IXGB_READ_REG(hw, TORL);
- temp_reg = IXGB_READ_REG(hw, TORH);
- temp_reg = IXGB_READ_REG(hw, RNBC);
- temp_reg = IXGB_READ_REG(hw, RUC);
- temp_reg = IXGB_READ_REG(hw, ROC);
- temp_reg = IXGB_READ_REG(hw, RLEC);
- temp_reg = IXGB_READ_REG(hw, CRCERRS);
- temp_reg = IXGB_READ_REG(hw, ICBC);
- temp_reg = IXGB_READ_REG(hw, ECBC);
- temp_reg = IXGB_READ_REG(hw, MPC);
- temp_reg = IXGB_READ_REG(hw, TPTL);
- temp_reg = IXGB_READ_REG(hw, TPTH);
- temp_reg = IXGB_READ_REG(hw, GPTCL);
- temp_reg = IXGB_READ_REG(hw, GPTCH);
- temp_reg = IXGB_READ_REG(hw, BPTCL);
- temp_reg = IXGB_READ_REG(hw, BPTCH);
- temp_reg = IXGB_READ_REG(hw, MPTCL);
- temp_reg = IXGB_READ_REG(hw, MPTCH);
- temp_reg = IXGB_READ_REG(hw, UPTCL);
- temp_reg = IXGB_READ_REG(hw, UPTCH);
- temp_reg = IXGB_READ_REG(hw, VPTCL);
- temp_reg = IXGB_READ_REG(hw, VPTCH);
- temp_reg = IXGB_READ_REG(hw, JPTCL);
- temp_reg = IXGB_READ_REG(hw, JPTCH);
- temp_reg = IXGB_READ_REG(hw, GOTCL);
- temp_reg = IXGB_READ_REG(hw, GOTCH);
- temp_reg = IXGB_READ_REG(hw, TOTL);
- temp_reg = IXGB_READ_REG(hw, TOTH);
- temp_reg = IXGB_READ_REG(hw, DC);
- temp_reg = IXGB_READ_REG(hw, PLT64C);
- temp_reg = IXGB_READ_REG(hw, TSCTC);
- temp_reg = IXGB_READ_REG(hw, TSCTFC);
- temp_reg = IXGB_READ_REG(hw, IBIC);
- temp_reg = IXGB_READ_REG(hw, RFC);
- temp_reg = IXGB_READ_REG(hw, LFC);
- temp_reg = IXGB_READ_REG(hw, PFRC);
- temp_reg = IXGB_READ_REG(hw, PFTC);
- temp_reg = IXGB_READ_REG(hw, MCFRC);
- temp_reg = IXGB_READ_REG(hw, MCFTC);
- temp_reg = IXGB_READ_REG(hw, XONRXC);
- temp_reg = IXGB_READ_REG(hw, XONTXC);
- temp_reg = IXGB_READ_REG(hw, XOFFRXC);
- temp_reg = IXGB_READ_REG(hw, XOFFTXC);
- temp_reg = IXGB_READ_REG(hw, RJC);
+ IXGB_READ_REG(hw, TPRL);
+ IXGB_READ_REG(hw, TPRH);
+ IXGB_READ_REG(hw, GPRCL);
+ IXGB_READ_REG(hw, GPRCH);
+ IXGB_READ_REG(hw, BPRCL);
+ IXGB_READ_REG(hw, BPRCH);
+ IXGB_READ_REG(hw, MPRCL);
+ IXGB_READ_REG(hw, MPRCH);
+ IXGB_READ_REG(hw, UPRCL);
+ IXGB_READ_REG(hw, UPRCH);
+ IXGB_READ_REG(hw, VPRCL);
+ IXGB_READ_REG(hw, VPRCH);
+ IXGB_READ_REG(hw, JPRCL);
+ IXGB_READ_REG(hw, JPRCH);
+ IXGB_READ_REG(hw, GORCL);
+ IXGB_READ_REG(hw, GORCH);
+ IXGB_READ_REG(hw, TORL);
+ IXGB_READ_REG(hw, TORH);
+ IXGB_READ_REG(hw, RNBC);
+ IXGB_READ_REG(hw, RUC);
+ IXGB_READ_REG(hw, ROC);
+ IXGB_READ_REG(hw, RLEC);
+ IXGB_READ_REG(hw, CRCERRS);
+ IXGB_READ_REG(hw, ICBC);
+ IXGB_READ_REG(hw, ECBC);
+ IXGB_READ_REG(hw, MPC);
+ IXGB_READ_REG(hw, TPTL);
+ IXGB_READ_REG(hw, TPTH);
+ IXGB_READ_REG(hw, GPTCL);
+ IXGB_READ_REG(hw, GPTCH);
+ IXGB_READ_REG(hw, BPTCL);
+ IXGB_READ_REG(hw, BPTCH);
+ IXGB_READ_REG(hw, MPTCL);
+ IXGB_READ_REG(hw, MPTCH);
+ IXGB_READ_REG(hw, UPTCL);
+ IXGB_READ_REG(hw, UPTCH);
+ IXGB_READ_REG(hw, VPTCL);
+ IXGB_READ_REG(hw, VPTCH);
+ IXGB_READ_REG(hw, JPTCL);
+ IXGB_READ_REG(hw, JPTCH);
+ IXGB_READ_REG(hw, GOTCL);
+ IXGB_READ_REG(hw, GOTCH);
+ IXGB_READ_REG(hw, TOTL);
+ IXGB_READ_REG(hw, TOTH);
+ IXGB_READ_REG(hw, DC);
+ IXGB_READ_REG(hw, PLT64C);
+ IXGB_READ_REG(hw, TSCTC);
+ IXGB_READ_REG(hw, TSCTFC);
+ IXGB_READ_REG(hw, IBIC);
+ IXGB_READ_REG(hw, RFC);
+ IXGB_READ_REG(hw, LFC);
+ IXGB_READ_REG(hw, PFRC);
+ IXGB_READ_REG(hw, PFTC);
+ IXGB_READ_REG(hw, MCFRC);
+ IXGB_READ_REG(hw, MCFTC);
+ IXGB_READ_REG(hw, XONRXC);
+ IXGB_READ_REG(hw, XONTXC);
+ IXGB_READ_REG(hw, XOFFRXC);
+ IXGB_READ_REG(hw, XOFFTXC);
+ IXGB_READ_REG(hw, RJC);
}
/******************************************************************************
@@ -1161,18 +1157,13 @@ static void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
- u16 mdio_reg;
-
ixgb_write_phy_reg(hw,
MDIO_CTRL1,
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD,
MDIO_CTRL1_RESET);
- mdio_reg = ixgb_read_phy_reg(hw,
- MDIO_CTRL1,
- IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD);
+ ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
}
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 048351cf0e4a..1588376d4c67 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1109,7 +1109,7 @@ alloc_failed:
/**
* ixgb_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void
@@ -1531,10 +1531,11 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* ixgb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue hanging (unused)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -1746,7 +1747,8 @@ ixgb_intr(int irq, void *data)
/**
* ixgb_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct pointer
+ * @budget: max number of receives to clean
**/
static int
@@ -1865,7 +1867,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
* ixgb_rx_checksum - Receive Checksum Offload for 82597.
* @adapter: board private structure
* @rx_desc: receive descriptor
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void
@@ -1923,6 +1925,8 @@ static void ixgb_check_copybreak(struct napi_struct *napi,
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
+ * @work_done: output pointer to amount of packets cleaned
+ * @work_to_do: how much work we can complete
**/
static bool
@@ -2042,6 +2046,7 @@ rxdesc_done:
/**
* ixgb_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
+ * @cleaned_count: how many buffers to allocate
**/
static void
@@ -2211,7 +2216,7 @@ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
/**
* ixgb_io_slot_reset - called after the pci bus has been reset.
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* This callback is called after the PCI bus has been reset.
* Basically, this tries to restart the card from scratch.
@@ -2259,7 +2264,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/**
* ixgb_io_resume - called when its OK to resume normal operations
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* The error recovery driver tells us that its OK to resume
* normal operation. Implementation resembles the second-half
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1e8a809233a0..de0fc6ecf491 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -350,7 +350,7 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 71ec908266a6..a280aa34ca1d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -531,6 +531,16 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
return err;
}
+static void ixgbe_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+
+ stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
+ stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
+}
+
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -3546,6 +3556,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
.get_msglevel = ixgbe_get_msglevel,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2e35c5706cf1..df389a11d3af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1029,10 +1029,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
- napi_hash_del(&q_vector->napi);
- netif_napi_del(&q_vector->napi);
+ __netif_napi_del(&q_vector->napi);
/*
+ * after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 86ca8b9ea1b8..45ae33e15303 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2095,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2161,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -3156,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
@@ -3176,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
@@ -3471,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
- ring->xsk_umem = NULL;
+ ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
@@ -3713,8 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
+ if (rx_ring->xsk_pool) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4059,12 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4119,8 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
- if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
@@ -4142,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- if (ring->xsk_umem)
+ if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
@@ -5292,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -5682,7 +5677,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
- WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
netif_trans_update(adapter->netdev);
@@ -5989,7 +5983,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (tx_ring->xsk_umem) {
+ if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
@@ -6185,8 +6179,9 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that timed out
**/
-static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -10161,7 +10156,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->xdp_ring[i]->xsk_umem)
+ if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
@@ -10175,8 +10170,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 7980d7265e10..f77fa3e4fdd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -771,7 +771,7 @@ mii_bus_write_done:
/**
* ixgbe_mii_bus_read - Read a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -786,7 +786,7 @@ static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
/**
* ixgbe_mii_bus_write - Write a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
@@ -803,7 +803,7 @@ static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
/**
* ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -820,7 +820,7 @@ static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
/**
* ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 7887ae4aaf4f..2aeec78029bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -28,9 +28,10 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid);
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ec7121f352e2..3771857cf887 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -8,8 +8,8 @@
#include "ixgbe.h"
#include "ixgbe_txrx_common.h"
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
@@ -17,11 +17,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(adapter->netdev, qid);
+ return xsk_get_pool_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
+static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
@@ -35,7 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -59,13 +59,13 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
{
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
- umem = xdp_get_umem_from_qid(adapter->netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
@@ -75,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -83,11 +83,12 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
return 0;
}
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
- ixgbe_xsk_umem_disable(adapter, qid);
+ return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
+ ixgbe_xsk_pool_disable(adapter, qid);
}
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
@@ -149,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count;
do {
- bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) {
ok = false;
break;
@@ -286,7 +287,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
@@ -344,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -373,6 +374,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
+ struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi;
bool work_done = true;
@@ -387,12 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
- desc.len);
+ dma = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -418,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(pool);
}
return !!budget && work_done;
@@ -439,7 +440,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -484,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
@@ -511,7 +512,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
- if (!ring->xsk_umem)
+ if (!ring->xsk_pool)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
@@ -526,7 +527,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -546,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a428113e6d54..82fce27f682b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -246,8 +246,9 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: transmit queue hanging (unused)
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -866,10 +867,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -947,10 +946,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
* have a consumer accessing first few bytes of meta data,
* and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2526,8 +2522,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
{
- WARN_ON(in_interrupt());
-
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);