aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/igb')
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c77
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c369
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c128
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c99
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c406
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1572
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c102
16 files changed, 2020 insertions, 958 deletions
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197fd4a8e..624476cfa727 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+ e1000_i210.o igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e2f748..fdaaf2709d0a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
@@ -1027,6 +1028,15 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igb_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
} else {
ret_val = igb_check_for_copper_link(hw);
}
@@ -1277,12 +1287,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
+ u32 phpm_reg;
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit */
+ if (hw->mac.type >= e1000_82580) {
+ phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
ret_val = igb_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
@@ -1336,7 +1354,7 @@ out:
**/
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 ctrl_ext, ctrl_reg, reg;
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
@@ -1424,27 +1442,45 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- /*
- * We force flow control to prevent the CTRL register values from being
- * overwritten by the autonegotiated flow control values
- */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = rd32(E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+ wr32(E1000_PCS_ANADV, anadv_reg);
+
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
wr32(E1000_PCS_LCTL, reg);
- if (!igb_sgmii_active_82575(hw))
+ if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
igb_force_mac_fc(hw);
return ret_val;
@@ -1918,6 +1954,12 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
hw->dev_spec._82575.global_device_reset = false;
+ /* due to hw errata, global device reset doesn't always
+ * work on 82580
+ */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
/* Get current control state. */
ctrl = rd32(E1000_CTRL);
@@ -2233,19 +2275,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
- E1000_IPCNFG_EEE_100M_AN);
- eeer |= (E1000_EEER_TX_LPI_EN |
- E1000_EEER_RX_LPI_EN |
+ u32 eee_su = rd32(E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
- /* keep the LPI clock running before EEE is enabled */
- if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
- u32 eee_su;
- eee_su = rd32(E1000_EEE_SU);
- eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
- wr32(E1000_EEE_SU, eee_su);
- }
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453f5428..44b76b3b6816 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41ec3c40..45dce06eff26 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -431,6 +431,10 @@
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
+/* Transmit Config Word */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
@@ -539,6 +543,9 @@
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
@@ -636,6 +643,7 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+#define NVM_VERSION 0x0005
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +661,19 @@
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
+/* NVM version defines */
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_BUILD_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -860,6 +881,7 @@
#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f939bc74..fbcdbebb0b5f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -35,11 +35,42 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
+/**
+ * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 ret_val = E1000_SUCCESS;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(E1000_SWSM);
+ wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igb_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
/**
* igb_acquire_nvm_i210 - Request for access to EEPROM
@@ -68,6 +99,23 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
}
/**
+ * igb_put_hw_semaphore_i210 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ */
+static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(E1000_SWSM);
+
+ swsm &= ~E1000_SWSM_SWESMBI;
+
+ wr32(E1000_SWSM, swsm);
+}
+
+/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
@@ -138,60 +186,6 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
}
/**
- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- **/
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 ret_val = E1000_SUCCESS;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- udelay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- igb_put_hw_semaphore(hw);
- hw_dbg("Driver can't access the NVM\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * igb_put_hw_semaphore_i210 - Release hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Release hardware semaphore used to access the PHY or NVM
- **/
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
-
- swsm = rd32(E1000_SWSM);
-
- swsm &= ~E1000_SWSM_SWESMBI;
-
- wr32(E1000_SWSM, swsm);
-}
-
-/**
* igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
@@ -229,49 +223,6 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
- * @hw: pointer to the HW structure
- * @offset: offset within the Shadow RAM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow RAM
- *
- * Writes data to Shadow RAM at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * data will not be committed to FLASH and also Shadow RAM will most likely
- * contain an invalid checksum.
- *
- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
- * partially written.
- **/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 i, count;
-
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to write in bursts than synchronizing access for each word. */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = igb_write_nvm_srwr(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- return status;
-}
-
-/**
* igb_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
@@ -329,6 +280,50 @@ out:
}
/**
+ * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = igb_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
@@ -350,16 +345,40 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
- case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_INIT_CTRL_4:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_1_CFG:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
break;
- case NVM_COMPAT:
- *data = ID_LED_DEFAULT_I210;
- break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -423,6 +442,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
}
/**
+ * igb_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver) {
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
* igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
@@ -519,6 +632,28 @@ out:
}
/**
+ * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ */
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = rd32(E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -548,28 +683,6 @@ out:
}
/**
- * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
- * @hw: pointer to the HW structure
- *
- **/
-s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_NVM;
- u32 i, reg;
-
- for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
- reg = rd32(E1000_EECD);
- if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
- break;
- }
- udelay(5);
- }
-
- return ret_val;
-}
-
-/**
* igb_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3f50bc..1c89358a99ab 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
+extern s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_OFF1_OFF2 << 4) | \
@@ -73,4 +84,10 @@ enum E1000_INVM_STRUCTURE_TYPE {
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+/* NVM offset defaults for i211 device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145ac762..101e6e4da97f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -839,6 +839,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1040,6 +1041,129 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+ && mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = rd32(E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ hw_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = rd32(E1000_PCS_ANADV);
+ pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = igb_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
out:
return ret_val;
@@ -1391,6 +1515,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ /* All MDI settings are supported on 82580 and newer. */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
hw_dbg("Invalid MDI setting detected\n");
hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e51e30..e2b2c4b9c951 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_defines.h"
+#include "e1000_i210.h"
/*
* Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf3f357..fbb7604db364 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -438,7 +438,7 @@ out:
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/*
@@ -448,22 +448,21 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ return ret_val;
}
- ret_val = hw->nvm.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- msleep(10);
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = igb_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
igb_standby_nvm(hw);
@@ -497,13 +496,10 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(1000, 2000);
+ nvm->ops.release(hw);
}
- msleep(10);
-release:
- hw->nvm.ops.release(hw);
-
-out:
return ret_val;
}
@@ -710,3 +706,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igb_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output structure
+ *
+ * unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 fw_version;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ break;
+ default:
+ return;
+ }
+ /* basic eeprom version numbers */
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+ /* etrack id */
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i350:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ ((comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT));
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b0228cac0..7012d458c6f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..fe76004aca4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- goto out;
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- goto out;
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ }
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
switch (hw->phy.id) {
case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299bfcb9..ed282f877d9a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
/* Enable flexible speed on link-up */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230c0592..17f1686ee411 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,16 +34,16 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#ifdef CONFIG_IGB_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
#include <linux/bitops.h>
#include <linux/if_vlan.h>
struct igb_adapter;
+#define E1000_PCS_CFG_IGN_SD 1
+
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
@@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -151,11 +152,18 @@ struct vf_data_storage {
#define IGB_MNG_VLAN_NONE -1
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
@@ -174,11 +182,9 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
- u32 page_offset;
+ unsigned int page_offset;
};
struct igb_tx_queue_stats {
@@ -205,22 +211,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- char name[IFNAMSIZ + 9];
-};
-
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +222,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
- u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
- u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_clean;
u16 next_to_use;
+ u16 next_to_alloc;
union {
/* TX */
@@ -251,12 +243,30 @@ struct igb_ring {
};
/* RX */
struct {
+ struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
- /* Items past this point are only used during ring alloc / free */
- dma_addr_t dma; /* phys address of the ring */
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
@@ -362,8 +372,6 @@ struct igb_adapter {
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
- u32 eeprom_wol;
-
u16 tx_ring_count;
u16 rx_ring_count;
unsigned int vfs_allocated_count;
@@ -373,7 +381,6 @@ struct igb_adapter {
u32 wvbr;
u32 *shadow_vfta;
-#ifdef CONFIG_IGB_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct delayed_work ptp_overflow_work;
@@ -382,17 +389,19 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
char fw_version[32];
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -436,18 +445,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(q_vector, skb);
+}
+
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea012849825..bfe9208c4b18 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/highmem.h>
#include "igb.h"
@@ -1623,6 +1624,20 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
reg &= ~E1000_CONNSW_ENRGSRC;
wr32(E1000_CONNSW, reg);
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and i350 devices.
+ */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ wr32(E1000_PCS_CFG0, reg);
+ break;
+ default:
+ break;
+ }
+
/* Set PCS register for forced speed */
reg = rd32(E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
@@ -1685,16 +1700,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = kmap(rx_buffer->page);
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ kunmap(rx_buffer->page);
+
+ return match;
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
- struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
- unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1738,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- total_bytes += tx_buffer_info->bytecount;
- total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1746,8 +1770,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
- txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
- netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
@@ -1957,54 +1980,6 @@ static void igb_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2014,10 +1989,7 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
/* apply any specific unsupported masks here */
@@ -2045,8 +2017,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
@@ -2301,7 +2272,6 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
case e1000_82576:
case e1000_82580:
case e1000_i350:
@@ -2337,12 +2307,288 @@ static int igb_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
}
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ipcnfg, eeer;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+ edata->advertised = ADVERTISED_1000baseT_Full;
+
+ if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+ edata->advertised |= ADVERTISED_100baseT_Full;
+
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ /* Report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI timer is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.advertised != edata->advertised) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE options are not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+ igb_set_eee_i350(hw);
+
+ /* reset link */
+ if (!netif_running(netdev))
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2629,10 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
.get_ts_info = igb_get_ts_info,
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37ef12e..31cfe2ec75df 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
#include "igb.h"
#define MAJ 4
-#define MIN 0
-#define BUILD 1
+#define MIN 1
+#define BUILD 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -118,10 +118,11 @@ static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_remove(struct pci_dev *pdev);
static int igb_sw_init(struct igb_adapter *);
static int igb_open(struct net_device *);
static int igb_close(struct net_device *);
+static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
@@ -228,7 +229,7 @@ static struct pci_driver igb_driver = {
.name = igb_driver_name,
.id_table = igb_pci_tbl,
.probe = igb_probe,
- .remove = __devexit_p(igb_remove),
+ .remove = igb_remove,
#ifdef CONFIG_PM
.driver.pm = &igb_pm_ops,
#endif
@@ -534,31 +535,27 @@ rx_ring_summary:
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX -------"
- "--------- %p%s\n", "RWB", i,
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb, next_desc);
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX"
- " %p%s\n", "R ", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb, next_desc);
+ next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->skb) {
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, buffer_info->skb->data,
- IGB_RX_HDR_LEN, true);
+ buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- PAGE_SIZE/2, true);
+ IGB_RX_BUFSZ, true);
}
}
}
@@ -656,80 +653,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
-static void igb_free_queues(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
- adapter->num_rx_queues = 0;
- adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
- adapter->tx_ring[i] = ring;
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- /*
- * On i350, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- * */
- if (adapter->hw.mac.type >= e1000_i350)
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- adapter->rx_ring[i] = ring;
- }
-
- igb_cache_ring_register(adapter);
-
- return 0;
-
-err:
- igb_free_queues(adapter);
-
- return -ENOMEM;
-}
-
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -909,17 +832,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- int i, err = 0, vector = 0;
+ int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
igb_msix_other, 0, netdev->name, adapter);
if (err)
- goto out;
- vector++;
+ goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
+ vector++;
+
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx.ring && q_vector->tx.ring)
@@ -938,13 +862,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
igb_msix_ring, 0, q_vector->name,
q_vector);
if (err)
- goto out;
- vector++;
+ goto err_free;
}
igb_configure_msix(adapter);
return 0;
-out:
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
return err;
}
@@ -960,6 +893,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
}
/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -969,17 +931,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
- int v_idx;
+ int v_idx = adapter->num_q_vectors;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- if (!q_vector)
- continue;
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- }
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
}
/**
@@ -990,7 +949,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
- igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
@@ -1001,11 +959,14 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{
int err;
int numvecs, i;
+ if (!msix)
+ goto msi_only;
+
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
if (adapter->vfs_allocated_count)
@@ -1038,7 +999,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries,
numvecs);
if (err == 0)
- goto out;
+ return;
igb_reset_interrupt_capability(adapter);
@@ -1068,105 +1029,183 @@ msi_only:
adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
-out:
- /* Notify the stack of the (possibly) reduced queue counts. */
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- err = netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
- rtnl_unlock();
- return err;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
}
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ int v_count, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
{
struct igb_q_vector *q_vector;
- struct e1000_hw *hw = &adapter->hw;
- int v_idx;
+ struct igb_ring *ring;
+ int ring_count, size;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
- q_vector->adapter = adapter;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
+
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
}
- return 0;
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
-err_out:
- igb_free_q_vectors(adapter);
- return -ENOMEM;
-}
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
- q_vector->rx.ring = adapter->rx_ring[ring_idx];
- q_vector->rx.ring->q_vector = q_vector;
- q_vector->rx.count++;
- q_vector->itr_val = adapter->rx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
-}
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /*
+ * On i350, i210, and i211, loopback VLAN packets
+ * have the tag byte-swapped.
+ * */
+ if (adapter->hw.mac.type >= e1000_i350)
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
- q_vector->tx.ring = adapter->tx_ring[ring_idx];
- q_vector->tx.ring->q_vector = q_vector;
- q_vector->tx.count++;
- q_vector->itr_val = adapter->tx_itr_setting;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
}
+
/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function maps the recently allocated queues to vectors.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
- int i;
- int v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
- (adapter->num_q_vectors < adapter->num_tx_queues))
- return -ENOMEM;
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
- if (adapter->num_q_vectors >=
- (adapter->num_rx_queues + adapter->num_tx_queues)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
- } else {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (i < adapter->num_tx_queues)
- igb_map_tx_ring_to_vector(adapter, i, v_idx);
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
}
- for (; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
}
/**
@@ -1174,14 +1213,12 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter)
*
* This function initializes the interrupts and allocates all of the queues.
**/
-static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
struct pci_dev *pdev = adapter->pdev;
int err;
- err = igb_set_interrupt_capability(adapter);
- if (err)
- return err;
+ igb_set_interrupt_capability(adapter, msix);
err = igb_alloc_q_vectors(adapter);
if (err) {
@@ -1189,24 +1226,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- err = igb_map_ring_to_vector(adapter);
- if (err) {
- dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
- goto err_map_queues;
- }
-
+ igb_cache_ring_register(adapter);
return 0;
-err_map_queues:
- igb_free_queues(adapter);
-err_alloc_queues:
- igb_free_q_vectors(adapter);
+
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
@@ -1229,29 +1252,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err)
goto request_done;
/* fall back to MSI */
- igb_clear_interrupt_scheme(adapter);
- if (!pci_enable_msi(pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
- adapter->num_tx_queues = 1;
- adapter->num_rx_queues = 1;
- adapter->num_q_vectors = 1;
- err = igb_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for vectors\n");
- goto request_done;
- }
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for queues\n");
- igb_free_q_vectors(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
goto request_done;
- }
+
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
}
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1587,8 +1598,7 @@ void igb_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 hwm;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@@ -1663,7 +1673,7 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
@@ -1706,10 +1716,8 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
-#ifdef CONFIG_IGB_PTP
/* Re-enable PTP, where applicable. */
igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
igb_get_phy_info(hw);
}
@@ -1783,58 +1791,34 @@ static const struct net_device_ops igb_netdev_ops = {
void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 major, build, patch, fw_version;
- u32 etrack_id;
-
- hw->nvm.ops.read(hw, 5, 1, &fw_version);
- if (adapter->hw.mac.type != e1000_i211) {
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
- etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
-
- /* combo image version needs to be found */
- hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) &&
- (comb_offset != IGB_NVM_VER_INVALID)) {
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
- + 1), 1, &comb_verh);
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
- 1, &comb_verl);
-
- /* Only display Option Rom if it exists and is valid */
- if ((comb_verh && comb_verl) &&
- ((comb_verh != IGB_NVM_VER_INVALID) &&
- (comb_verl != IGB_NVM_VER_INVALID))) {
- major = comb_verl >> IGB_COMB_VER_SHFT;
- build = (comb_verl << IGB_COMB_VER_SHFT) |
- (comb_verh >> IGB_COMB_VER_SHFT);
- patch = comb_verh & IGB_COMB_VER_MASK;
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x, %d.%d.%d",
- (fw_version & IGB_MAJOR_MASK) >>
- IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >>
- IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK),
- etrack_id, major, build, patch);
- goto out;
- }
- }
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK), etrack_id);
- } else {
+ struct e1000_fw_version fw;
+
+ igb_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i211:
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK));
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+
+ default:
+ /* if option is rom valid, display its version too */
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ }
+ break;
}
-out:
return;
}
@@ -1849,8 +1833,7 @@ out:
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit igb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igb_adapter *adapter;
@@ -1861,7 +1844,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
- u16 eeprom_apme_mask = IGB_EEPROM_APME;
u8 part_str[E1000_PBANUM_LENGTH];
/* Catch broken hardware that put the wrong VF device ID in
@@ -2069,28 +2051,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_validate_mdi_setting(hw);
- /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
- * enable the ACPI Magic Packet filter
- */
-
+ /* By default, support wake on port A */
if (hw->bus.func == 0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support on non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
else if (hw->bus.func == 1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
- if (eeprom_data & eeprom_apme_mask)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
@@ -2098,24 +2079,38 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
@@ -2141,10 +2136,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
#endif
-#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
@@ -2212,16 +2205,14 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit igb_remove(struct pci_dev *pdev)
+static void igb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
/*
* The watchdog timer may be rescheduled, so explicitly
@@ -2294,7 +2285,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
* mor expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
-static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+static void igb_probe_vfs(struct igb_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
@@ -2355,7 +2346,7 @@ out:
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit igb_sw_init(struct igb_adapter *adapter)
+static int igb_sw_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
@@ -2461,7 +2452,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
GFP_ATOMIC);
/* This call may decrease the number of queues */
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -2531,6 +2522,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
@@ -2560,6 +2562,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return 0;
+err_set_queues:
+ igb_free_irq(adapter);
err_req_irq:
igb_release_hw_control(adapter);
igb_power_down_link(adapter);
@@ -2637,10 +2641,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2777,18 +2779,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2893,18 +2893,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
- /*
- * Generate RSS hash based on TCP port numbers and/or
- * IPv4/v6 src and dst addresses since UDP cannot be
- * hashed reliably due to IP fragmentation
- */
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue */
@@ -3106,16 +3109,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
- srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#ifdef CONFIG_IGB_PTP
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3302,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned long size;
u16 i;
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- if (buffer_info->page_dma) {
- dma_unmap_page(rx_ring->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_offset = 0;
- }
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3331,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -4028,6 +4017,9 @@ static int igb_tso(struct igb_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -4148,26 +4140,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
}
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
+
+ /* set segmentation bits for TSO */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
-#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
- cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
- /* set segmentation bits for TSO */
- if (tx_flags & IGB_TX_FLAGS_TSO)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -4178,19 +4176,19 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring if any offload is enabled */
- if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
- test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
- if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
- /* insert IPv4 checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- }
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -4209,33 +4207,37 @@ static void igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
dma_addr_t dma;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
- __le32 cmd_type;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IGB_TX_DESC(tx_ring, i);
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
- cmd_type = igb_tx_cmd_type(tx_flags);
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -4243,18 +4245,18 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IGB_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD;
- tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -4262,32 +4264,22 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
+ size, DMA_TO_DEVICE);
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.olinfo_status = 0;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- frag++;
}
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
@@ -4372,9 +4364,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
-#ifdef CONFIG_IGB_PTP
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4397,7 +4387,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
-#ifdef CONFIG_IGB_PTP
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4396,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
-#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4455,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4789,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -4811,7 +4799,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
wr32(E1000_EIMS, adapter->eims_other);
@@ -4851,45 +4838,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
}
#ifdef CONFIG_IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_TXCTRL_CPUID_SHIFT;
- }
- dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_RXCTRL_CPUID_SHIFT;
- }
- dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
- wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
q_vector->cpu = cpu;
out_no_update:
put_cpu();
@@ -5545,7 +5550,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5556,7 +5560,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5599,7 +5602,6 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5610,7 +5612,6 @@ static irqreturn_t igb_intr(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5840,6 +5841,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return !!budget;
}
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+{
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -5889,224 +6065,389 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-static void igb_rx_vlan(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
{
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ u32 ntc = rx_ring->next_to_clean + 1;
- __vlan_hwaccel_put_tag(skb, vid);
- }
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
}
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
-{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!hdr.ipv4->frag_off)
+ nexthdr = hdr.ipv4->protocol;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return hdr.network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
*/
- u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IGB_RX_HDR_LEN)
- hlen = IGB_RX_HDR_LEN;
- return hlen;
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- union e1000_adv_rx_desc *rx_desc;
- const int current_node = numa_node_id();
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
- u16 i = rx_ring->next_to_clean;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_desc = IGB_RX_DESC(rx_ring, i);
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- struct sk_buff *skb = buffer_info->skb;
- union e1000_adv_rx_desc *next_rxd;
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- buffer_info->skb = NULL;
- prefetch(skb->data);
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
- next_rxd = IGB_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (!skb_is_nonlinear(skb)) {
- __skb_put(skb, igb_get_hlen(rx_desc));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
}
+ }
- if (rx_desc->wb.upper.length) {
- u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += PAGE_SIZE / 2;
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
- if ((page_count(buffer_info->page) != 1) ||
- (page_to_nid(buffer_info->page) != current_node))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
+ return false;
+}
- dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
- struct igb_rx_buffer *next_buffer;
- next_buffer = &rx_ring->rx_buffer_info[i];
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
+ igb_rx_hash(rx_ring, rx_desc, skb);
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK))
- && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
- dev_kfree_skb_any(skb);
- goto next_desc;
- }
+ igb_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IGB_PTP
- igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
- igb_rx_hash(rx_ring, rx_desc, skb);
- igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_rx_vlan(rx_ring, rx_desc, skb);
+ igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
- total_bytes += skb->len;
- total_packets++;
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
- napi_gro_receive(&q_vector->napi, skb);
+ skb_record_rx_queue(skb, rx_ring->queue_index);
- budget--;
-next_desc:
- if (!budget)
- break;
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
- cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
- }
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- rx_ring->next_to_clean = i;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
- return !!budget;
-}
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct sk_buff *skb = bi->skb;
- dma_addr_t dma = bi->dma;
+ cleaned_count++;
- if (dma)
- return true;
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- bi->skb = skb;
- if (!skb) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- /* initialize skb for ring */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- }
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
- dma = dma_map_single(rx_ring->dev, skb->data,
- IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
- if (dma_mapping_error(rx_ring->dev, dma)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ napi_gro_receive(&q_vector->napi, skb);
- bi->dma = dma;
- return true;
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return (total_packets < budget);
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t page_dma = bi->page_dma;
- unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+ dma_addr_t dma;
- if (page_dma)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
return true;
- if (!page) {
- page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
- bi->page = page;
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ /* alloc new page for storage */
+ page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
}
- page_dma = dma_map_page(rx_ring->dev, page,
- page_offset, PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
- if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
- bi->page_dma = page_dma;
- bi->page_offset = page_offset;
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
return true;
}
@@ -6120,22 +6461,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
- while (cleaned_count--) {
- if (!igb_alloc_mapped_skb(rx_ring, bi))
- break;
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
+ do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -6148,17 +6490,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
- }
+
+ cleaned_count--;
+ } while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
rx_ring->next_to_use = i;
- /* Force memory writes to complete before letting h/w
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, rx_ring->tail);
}
@@ -6207,10 +6557,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6478,7 +6826,7 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6492,7 +6840,9 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
+ rtnl_lock();
err = __igb_open(netdev, true);
+ rtnl_unlock();
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445157a3..ab3429729bde 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
}
-void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -532,18 +553,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
@@ -554,31 +563,33 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /* 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -596,6 +607,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
if ((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {