aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel')
-rw-r--r--drivers/net/ethernet/intel/Kconfig30
-rw-r--r--drivers/net/ethernet/intel/e100.c7
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c17
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c13
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_param.c14
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c66
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c115
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h17
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c69
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c243
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c135
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c331
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c60
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c141
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c77
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h22
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c369
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h17
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c128
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c99
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h16
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h112
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c406
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1572
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c102
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h1
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c41
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_param.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c160
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c100
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c115
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c418
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c470
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c403
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c61
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h2
61 files changed, 3911 insertions, 2571 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 0cafe4fe9406..ddee4060948a 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -93,6 +93,7 @@ config E1000E
config IGB
tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
depends on PCI
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -120,19 +121,6 @@ config IGB_DCA
driver. DCA is a method for warming the CPU cache before data
is used, with the intent of lessening the impact of cache misses.
-config IGB_PTP
- bool "PTP Hardware Clock (PHC)"
- default n
- depends on IGB && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want to use PTP Hardware Clock (PHC) in the
- driver. Only the basic clock operations have been implemented.
-
- Every timestamp and clock read operations must consult the
- overflow counter to form a correct time value.
-
config IGBVF
tristate "Intel(R) 82576 Virtual Function Ethernet support"
depends on PCI
@@ -178,8 +166,9 @@ config IXGB
config IXGBE
tristate "Intel(R) 10GbE PCI Express adapters support"
- depends on PCI && INET
+ depends on PCI
select MDIO
+ select PTP_1588_CLOCK
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
@@ -222,19 +211,6 @@ config IXGBE_DCB
If unsure, say N.
-config IXGBE_PTP
- bool "PTP Clock Support"
- default n
- depends on IXGBE && EXPERIMENTAL
- select PPS
- select PTP_1588_CLOCK
- ---help---
- Say Y here if you want support for 1588 Timestamping with a
- PHC device, using the PTP 1588 Clock support. This is
- required to enable timestamping support for the device.
-
- If unsure, say N.
-
config IXGBEVF
tristate "Intel(R) 82599 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 29ce9bd27f94..a59f0779e1c3 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2829,8 +2829,7 @@ static const struct net_device_ops e100_netdev_ops = {
.ndo_set_features = e100_set_features,
};
-static int __devinit e100_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct nic *nic;
@@ -2981,7 +2980,7 @@ err_out_free_dev:
return err;
}
-static void __devexit e100_remove(struct pci_dev *pdev)
+static void e100_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3167,7 +3166,7 @@ static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
.probe = e100_probe,
- .remove = __devexit_p(e100_remove),
+ .remove = e100_remove,
#ifdef CONFIG_PM
/* Power Management hooks */
.suspend = e100_suspend,
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 9089d00f1421..14e30515f6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1671,7 +1671,7 @@ static void e1000_get_wol(struct net_device *netdev,
/* apply any specific unsupported masks here */
switch (hw->device_id) {
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
- /* KSP3 does not suppport UCAST wake-ups */
+ /* KSP3 does not support UCAST wake-ups */
wol->supported &= ~WAKE_UCAST;
if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 3d6839528761..8fedd2451538 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -107,6 +107,7 @@ u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = {
};
static DEFINE_SPINLOCK(e1000_eeprom_lock);
+static DEFINE_SPINLOCK(e1000_phy_lock);
/**
* e1000_set_phy_type - Set the phy type member in the hw struct.
@@ -2830,19 +2831,25 @@ static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw)
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_read_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
@@ -2965,19 +2972,25 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
{
u32 ret_val;
+ unsigned long flags;
e_dbg("e1000_write_phy_reg");
+ spin_lock_irqsave(&e1000_phy_lock, flags);
+
if ((hw->phy_type == e1000_phy_igp) &&
(reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
(u16) reg_addr);
- if (ret_val)
+ if (ret_val) {
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
+ }
}
ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
phy_data);
+ spin_unlock_irqrestore(&e1000_phy_lock, flags);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 222bfaff4622..294da56b824c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -111,7 +111,7 @@ void e1000_update_stats(struct e1000_adapter *adapter);
static int e1000_init_module(void);
static void e1000_exit_module(void);
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void __devexit e1000_remove(struct pci_dev *pdev);
+static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
static int e1000_open(struct net_device *netdev);
@@ -202,7 +202,7 @@ static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
- .remove = __devexit_p(e1000_remove),
+ .remove = e1000_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = e1000_suspend,
@@ -938,8 +938,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
@@ -1273,7 +1272,7 @@ err_pci_reg:
* memory.
**/
-static void __devexit e1000_remove(struct pci_dev *pdev)
+static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -1309,7 +1308,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* e1000_init_hw_struct MUST be called before this function
**/
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+static int e1000_sw_init(struct e1000_adapter *adapter)
{
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
@@ -1340,7 +1339,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
* number of queues at compile-time.
**/
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
adapter->tx_ring = kcalloc(adapter->num_tx_queues,
sizeof(struct e1000_tx_ring), GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_param.c b/drivers/net/ethernet/intel/e1000/e1000_param.c
index 1301eba8b57a..750fc0194f37 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_param.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_param.c
@@ -45,7 +45,7 @@
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int __devinitdata X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+ static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
@@ -205,9 +205,9 @@ struct e1000_option {
} arg;
};
-static int __devinit e1000_validate_option(unsigned int *value,
- const struct e1000_option *opt,
- struct e1000_adapter *adapter)
+static int e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -268,7 +268,7 @@ static void e1000_check_copper_options(struct e1000_adapter *adapter);
* in a variable in the adapter structure.
**/
-void __devinit e1000_check_options(struct e1000_adapter *adapter)
+void e1000_check_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
int bd = adapter->bd_number;
@@ -534,7 +534,7 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on fiber adapters
**/
-static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
{
int bd = adapter->bd_number;
if (num_Speed > bd) {
@@ -560,7 +560,7 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
* Handles speed and duplex options on copper adapters
**/
-static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
{
struct e1000_option opt;
unsigned int speed, dplx, an;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4dd18a1f45d2..e73c2c355993 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+/* 80003ES2LAN Gigabit Ethernet Controller (Copper)
* 80003ES2LAN Gigabit Ethernet Controller (Serdes)
*/
@@ -80,7 +79,8 @@
1 = 50-80M
2 = 80-110M
3 = 110-140M
- 4 = >140M */
+ 4 = >140M
+ */
/* Kumeran Mode Control Register (Page 193, Register 16) */
#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800
@@ -95,8 +95,7 @@
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
-/*
- * A table for the GG82563 cable length where the range is defined
+/* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
@@ -183,8 +182,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -375,8 +373,7 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
if (!(swfw_sync & (fwmask | swmask)))
break;
- /*
- * Firmware currently using resource (fwmask)
+ /* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000e_put_hw_semaphore(hw);
@@ -442,8 +439,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
- /*
- * Use Alternative Page Select register to access
+ /* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -457,8 +453,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
+ /* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@@ -513,8 +508,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
page_select = GG82563_PHY_PAGE_SELECT;
} else {
- /*
- * Use Alternative Page Select register to access
+ /* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -528,8 +522,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
}
if (hw->dev_spec.e80003es2lan.mdic_wa_enable) {
- /*
- * The "ready" bit in the MDIC register may be incorrectly set
+ /* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@@ -618,8 +611,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -657,8 +649,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
return ret_val;
if (!link) {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1000e_phy_reset_dsp(hw);
@@ -677,8 +668,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Resetting the phy means we need to verify the TX_CLK corresponds
+ /* Resetting the phy means we need to verify the TX_CLK corresponds
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
*/
phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -687,8 +677,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
else
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -766,8 +755,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
s32 ret_val;
u16 kum_reg_data;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -899,8 +887,7 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
}
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -945,8 +932,7 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
reg |= (1 << 28);
ew32(TARC(1), reg);
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
reg = er32(RFCTL);
@@ -979,8 +965,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -1006,8 +991,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -1065,8 +1049,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Do not init these registers when the HW is in IAMT mode, since the
+ /* Do not init these registers when the HW is in IAMT mode, since the
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
@@ -1087,8 +1070,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Workaround: Disable padding in Kumeran interface in the MAC
+ /* Workaround: Disable padding in Kumeran interface in the MAC
* and in the PHY to avoid CRC errors.
*/
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1121,8 +1103,7 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
- /*
- * Set the mac to wait the maximum time between each
+ /* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
@@ -1352,8 +1333,7 @@ static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
{
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
+ /* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index c98586408005..c77d010d5c59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 82571EB Gigabit Ethernet Controller
+/* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Copper)
* 82571EB Gigabit Ethernet Controller (Fiber)
* 82571EB Dual Port Gigabit Mezzanine Adapter
@@ -191,8 +190,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
- /*
- * Autonomous Flash update bit must be cleared due
+ /* Autonomous Flash update bit must be cleared due
* to Flash update issue.
*/
eecd &= ~E1000_EECD_AUPDEN;
@@ -204,8 +202,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
- /*
- * Added to a constant, "size" becomes the left-shift value
+ /* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
@@ -291,8 +288,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
/* FWSM register */
mac->has_fwsm = true;
- /*
- * ARC supported; valid only if manageability features are
+ /* ARC supported; valid only if manageability features are
* enabled.
*/
mac->arc_subsystem_valid = !!(er32(FWSM) &
@@ -314,8 +310,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
break;
}
- /*
- * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+ /* Ensure that the inter-port SWSM.SMBI lock bit is clear before
* first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
@@ -352,11 +347,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
}
- /*
- * Initialize device specific counter of SMBI acquisition
- * timeouts.
- */
- hw->dev_spec.e82571.smb_counter = 0;
+ /* Initialize device specific counter of SMBI acquisition timeouts. */
+ hw->dev_spec.e82571.smb_counter = 0;
return 0;
}
@@ -445,8 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * The 82571 firmware may still be configuring the PHY.
+ /* The 82571 firmware may still be configuring the PHY.
* In this case, we cannot access the PHY until the
* configuration is done. So we explicitly set the
* PHY ID.
@@ -492,8 +483,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
s32 fw_timeout = hw->nvm.word_size + 1;
s32 i = 0;
- /*
- * If we have timedout 3 times on trying to acquire
+ /* If we have timedout 3 times on trying to acquire
* the inter-port SMBI semaphore, there is old code
* operating on the other port, and it is not
* releasing SMBI. Modify the number of times that
@@ -787,8 +777,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * If our nvm is an EEPROM, then we're done
+ /* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
@@ -806,8 +795,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
/* Reset the firmware if using STM opcode. */
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
- /*
- * The enabling of and the actual reset must be done
+ /* The enabling of and the actual reset must be done
* in two write cycles.
*/
ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -867,8 +855,7 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u32 i, eewr = 0;
s32 ret_val = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -957,8 +944,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -1002,8 +988,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
u32 ctrl, ctrl_ext, eecd, tctl;
s32 ret_val;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -1021,8 +1006,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
usleep_range(10000, 20000);
- /*
- * Must acquire the MDIO ownership before MAC reset.
+ /* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
switch (hw->mac.type) {
@@ -1067,8 +1051,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* We don't want to continue accessing MAC registers. */
return ret_val;
- /*
- * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+ /* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
@@ -1076,8 +1059,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * REQ and GNT bits need to be cleared when using AUTO_RD
+ /* REQ and GNT bits need to be cleared when using AUTO_RD
* to access the EEPROM.
*/
eecd = er32(EECD);
@@ -1138,8 +1120,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
e_dbg("Initializing the IEEE VLAN\n");
mac->ops.clear_vfta(hw);
- /* Setup the receive address. */
- /*
+ /* Setup the receive address.
* If, however, a locally administered address was assigned to the
* 82571, we must reserve a RAR for it to work around an issue where
* resetting one port will reload the MAC on the other port.
@@ -1183,8 +1164,7 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
break;
}
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -1281,8 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
ew32(PBA_ECC, reg);
}
- /*
- * Workaround for hardware errata.
+ /* Workaround for hardware errata.
* Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
*/
if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) {
@@ -1291,8 +1270,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
ew32(CTRL_EXT, reg);
}
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type <= e1000_82573) {
@@ -1309,8 +1287,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg |= (1 << 22);
ew32(GCR, reg);
- /*
- * Workaround for hardware errata.
+ /* Workaround for hardware errata.
* apply workaround for hardware errata documented in errata
* docs Fixes issue where some error prone or unreliable PCIe
* completions are occurring, particularly with ASPM enabled.
@@ -1344,8 +1321,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
if (hw->mng_cookie.vlan_id != 0) {
- /*
- * The VFTA is a 4096b bit-field, each identifying
+ /* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
@@ -1362,8 +1338,7 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
break;
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /*
- * If the offset we want to clear is the same offset of the
+ /* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
@@ -1401,8 +1376,7 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
ctrl = hw->mac.ledctl_mode2;
if (!(E1000_STATUS_LU & er32(STATUS))) {
- /*
- * If no link, then turn LED on by setting the invert bit
+ /* If no link, then turn LED on by setting the invert bit
* for each LED that's "on" (0x0E) in ledctl_mode2.
*/
for (i = 0; i < 4; i++)
@@ -1427,8 +1401,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
u16 receive_errors = 0;
s32 ret_val = 0;
- /*
- * Read PHY Receive Error counter first, if its is max - all F's then
+ /* Read PHY Receive Error counter first, if its is max - all F's then
* read the Base1000T status register If both are max then PHY is hung.
*/
ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
@@ -1458,8 +1431,7 @@ bool e1000_check_phy_82574(struct e1000_hw *hw)
**/
static s32 e1000_setup_link_82571(struct e1000_hw *hw)
{
- /*
- * 82573 does not have a word in the NVM to determine
+ /* 82573 does not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
@@ -1526,8 +1498,7 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- /*
- * If SerDes loopback mode is entered, there is no form
+ /* If SerDes loopback mode is entered, there is no form
* of reset to take the adapter out of that mode. So we
* have to explicitly take the adapter out of loopback
* mode. This prevents drivers from twiddling their thumbs
@@ -1584,8 +1555,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
switch (mac->serdes_link_state) {
case e1000_serdes_link_autoneg_complete:
if (!(status & E1000_STATUS_LU)) {
- /*
- * We have lost link, retry autoneg before
+ /* We have lost link, retry autoneg before
* reporting link failure
*/
mac->serdes_link_state =
@@ -1598,8 +1568,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
break;
case e1000_serdes_link_forced_up:
- /*
- * If we are receiving /C/ ordered sets, re-enable
+ /* If we are receiving /C/ ordered sets, re-enable
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
@@ -1619,8 +1588,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_autoneg_progress:
if (rxcw & E1000_RXCW_C) {
- /*
- * We received /C/ ordered sets, meaning the
+ /* We received /C/ ordered sets, meaning the
* link partner has autonegotiated, and we can
* trust the Link Up (LU) status bit.
*/
@@ -1636,8 +1604,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
e_dbg("AN_PROG -> DOWN\n");
}
} else {
- /*
- * The link partner did not autoneg.
+ /* The link partner did not autoneg.
* Force link up and full duplex, and change
* state to forced.
*/
@@ -1660,8 +1627,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
case e1000_serdes_link_down:
default:
- /*
- * The link was down but the receiver has now gained
+ /* The link was down but the receiver has now gained
* valid sync, so lets see if we can bring the link
* up.
*/
@@ -1679,8 +1645,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
mac->serdes_link_state = e1000_serdes_link_down;
e_dbg("ANYSTATE -> DOWN\n");
} else {
- /*
- * Check several times, if SYNCH bit and CONFIG
+ /* Check several times, if SYNCH bit and CONFIG
* bit both are consistently 1 then simply ignore
* the IV bit and restart Autoneg
*/
@@ -1780,8 +1745,7 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
/* If workaround is activated... */
if (state)
- /*
- * Hold a copy of the LAA in RAR[14] This is done so that
+ /* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed, the actual LAA is in one of the RARs and no
* incoming packets directed to this port are dropped.
@@ -1810,8 +1774,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (nvm->type != e1000_nvm_flash_hw)
return 0;
- /*
- * Check bit 4 of word 10h. If it is 0, firmware is done updating
+ /* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed.
*/
ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1819,8 +1782,7 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
return ret_val;
if (!(data & 0x10)) {
- /*
- * Read 0x23 and check bit 15. This bit is a 1
+ /* Read 0x23 and check bit 15. This bit is a 1
* when the checksum has already been fixed. If
* the checksum is still wrong and this bit is a
* 1, we need to return bad checksum. Otherwise,
@@ -1852,8 +1814,7 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
if (hw->mac.type == e1000_82571) {
s32 ret_val = 0;
- /*
- * If there's an alternate MAC address place it in RAR0
+ /* If there's an alternate MAC address place it in RAR0
* so that it will override the Si installed default perm
* address.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 76edbc1be33b..02a12b69555f 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -185,8 +185,7 @@
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
-/*
- * Use byte values for the following shift parameters
+/* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
@@ -242,8 +241,7 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
-/*
- * Bit definitions for the Management Data IO (MDIO) and Management Data
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
@@ -424,8 +422,7 @@
#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */
#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */
-/*
- * This defines the bits that are set in the Interrupt Mask
+/* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
@@ -475,8 +472,7 @@
/* 802.1q VLAN Packet Size */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
-/* Receive Address */
-/*
+/* Receive Address
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
@@ -723,8 +719,7 @@
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
-/* Bit definitions for valid PHY IDs. */
-/*
+/* Bit definitions for valid PHY IDs.
* I = Integrated
* E = External
*/
@@ -762,8 +757,7 @@
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
-/*
- * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
* 0=Normal 10BASE-T Rx Threshold
*/
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
@@ -779,14 +773,12 @@
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
-/*
- * Number of times we will attempt to autonegotiate before downshifting if we
+/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
@@ -808,8 +800,7 @@
#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
((reg) & MAX_PHY_REG_ADDRESS))
-/*
- * Bits...
+/* Bits...
* 15-5: page
* 4-0: register offset
*/
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 04668b47a1df..6782a2eea1bc 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -161,8 +161,7 @@ struct e1000_info;
/* Time to wait before putting the device into D3 if there's no link (in ms). */
#define LINK_TIMEOUT 100
-/*
- * Count for polling __E1000_RESET condition every 10-20msec.
+/* Count for polling __E1000_RESET condition every 10-20msec.
* Experimentation has shown the reset can take approximately 210msec.
*/
#define E1000_CHECK_RESET_COUNT 25
@@ -172,8 +171,7 @@ struct e1000_info;
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
-/*
- * in the case of WTHRESH, it appears at least the 82571/2 hardware
+/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
* WTHRESH=4, so a setting of 5 gives the most efficient bus
* utilization but to avoid possible Tx stalls, set it to 1
@@ -214,8 +212,7 @@ struct e1000_ps_page {
u64 dma; /* must be u64 - written to hw */
};
-/*
- * wrappers around a pointer to a socket buffer,
+/* wrappers around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct e1000_buffer {
@@ -305,9 +302,7 @@ struct e1000_adapter {
u16 tx_itr;
u16 rx_itr;
- /*
- * Tx
- */
+ /* Tx */
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
u32 tx_fifo_limit;
@@ -340,9 +335,7 @@ struct e1000_adapter {
u32 tx_fifo_size;
u32 tx_dma_failed;
- /*
- * Rx
- */
+ /* Rx */
bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
int work_to_do) ____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index c11ac2756667..f95bc6ee1c22 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -214,7 +214,8 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->autoneg = 0;
/* Make sure dplx is at most 1 bit and lsb of speed is not set
- * for the switch() below to work */
+ * for the switch() below to work
+ */
if ((spd & 1) || (dplx & ~1))
goto err_inval;
@@ -263,8 +264,7 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- /*
- * When SoL/IDER sessions are active, autoneg/speed/duplex
+ /* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
@@ -273,8 +273,7 @@ static int e1000_set_settings(struct net_device *netdev,
return -EINVAL;
}
- /*
- * MDI setting is only allowed when autoneg enabled because
+ /* MDI setting is only allowed when autoneg enabled because
* some hardware doesn't allow MDI setting when speed or
* duplex is forced.
*/
@@ -316,8 +315,7 @@ static int e1000_set_settings(struct net_device *netdev,
/* MDI-X => 2; MDI => 1; Auto => 3 */
if (ecmd->eth_tp_mdix_ctrl) {
- /*
- * fix up the value for auto (3 => 0) as zero is mapped
+ /* fix up the value for auto (3 => 0) as zero is mapped
* internally to auto
*/
if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
@@ -454,8 +452,8 @@ static void e1000_get_regs(struct net_device *netdev,
regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
/* ethtool doesn't use anything past this point, so all this
- * code is likely legacy junk for apps that may or may not
- * exist */
+ * code is likely legacy junk for apps that may or may not exist
+ */
if (hw->phy.type == e1000_phy_m88) {
e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (u32)phy_data; /* cable length */
@@ -598,8 +596,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
if (ret_val)
goto out;
- /*
- * Update the checksum over the first part of the EEPROM if needed
+ /* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for applicable controllers
*/
if ((first_word <= NVM_CHECKSUM_REG) ||
@@ -623,8 +620,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, e1000e_driver_version,
sizeof(drvinfo->version));
- /*
- * EEPROM image version # is reported as firmware version # for
+ /* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
@@ -708,8 +704,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
e1000e_down(adapter);
- /*
- * We can't just free everything and then setup again, because the
+ /* We can't just free everything and then setup again, because the
* ISRs in MSI-X mode get passed pointers to the Tx and Rx ring
* structs. First, attempt to allocate new resources...
*/
@@ -813,8 +808,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 mask;
u32 wlock_mac = 0;
- /*
- * The status register is Read Only, so a write should fail.
+ /* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
switch (mac->type) {
@@ -996,8 +990,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
- /*
- * Disable the interrupt to be reported in
+ /* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the
@@ -1015,8 +1008,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
}
- /*
- * Enable the interrupt to be reported in
+ /* Enable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the
@@ -1034,8 +1026,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
- /*
- * Disable the other interrupts to be reported in
+ /* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
* an interrupt was posted to the bus, the
@@ -1378,8 +1369,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
hw->phy.type == e1000_phy_m88) {
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else {
- /*
- * Set the ILOS bit on the fiber Nic if half duplex link is
+ /* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
if ((er32(STATUS) & E1000_STATUS_FD) == 0)
@@ -1388,8 +1378,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
- /*
- * Disable the receiver on the PHY so when a cable is plugged in, the
+ /* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (hw->phy.type == e1000_phy_m88)
@@ -1408,8 +1397,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special requirements for 82571/82572 fiber adapters */
- /*
- * jump through hoops to make sure link is up because serdes
+ /* jump through hoops to make sure link is up because serdes
* link is hardwired up
*/
ctrl |= E1000_CTRL_SLU;
@@ -1429,8 +1417,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl);
}
- /*
- * special write to serdes control register to enable SerDes analog
+ /* special write to serdes control register to enable SerDes analog
* loopback
*/
#define E1000_SERDES_LB_ON 0x410
@@ -1448,8 +1435,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
u32 ctrlext = er32(CTRL_EXT);
u32 ctrl = er32(CTRL);
- /*
- * save CTRL_EXT to restore later, reuse an empty variable (unused
+ /* save CTRL_EXT to restore later, reuse an empty variable (unused
* on mac_type 80003es2lan)
*/
adapter->tx_fifo_head = ctrlext;
@@ -1585,8 +1571,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ew32(RDT(0), rx_ring->count - 1);
- /*
- * Calculate the loop count based on the largest descriptor ring
+ /* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
@@ -1627,8 +1612,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
l++;
if (l == rx_ring->count)
l = 0;
- /*
- * time + 20 msecs (200 msecs on 2.4) is more than
+ /* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
@@ -1649,10 +1633,7 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * PHY loopback cannot be performed if SoL/IDER
- * sessions are active
- */
+ /* PHY loopback cannot be performed if SoL/IDER sessions are active */
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
@@ -1686,8 +1667,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
int i = 0;
hw->mac.serdes_has_link = false;
- /*
- * On some blade server designs, link establishment
+ /* On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do {
@@ -1701,8 +1681,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
} else {
hw->mac.ops.check_for_link(hw);
if (hw->mac.autoneg)
- /*
- * On some Phy/switch combinations, link establishment
+ /* On some Phy/switch combinations, link establishment
* can take a few seconds more than expected.
*/
msleep(5000);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index d37bfd96c987..cf217777586c 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -85,8 +85,7 @@ enum e1e_registers {
E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
-/*
- * Convenience macros
+/* Convenience macros
*
* Note: "_n" is the queue number of the register to be written to.
*
@@ -800,8 +799,7 @@ struct e1000_mac_operations {
s32 (*read_mac_addr)(struct e1000_hw *);
};
-/*
- * When to use various PHY register access functions:
+/* When to use various PHY register access functions:
*
* Func Caller
* Function Does Does When to use
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e3a7b07df629..976336547607 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -26,8 +26,7 @@
*******************************************************************************/
-/*
- * 82562G 10/100 Network Connection
+/* 82562G 10/100 Network Connection
* 82562G-2 10/100 Network Connection
* 82562GT 10/100 Network Connection
* 82562GT-2 10/100 Network Connection
@@ -354,8 +353,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
return true;
}
- /*
- * In case the PHY needs to be in mdio slow mode,
+ /* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
hw->phy.ops.release(hw);
@@ -386,8 +384,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
+ /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is
* inaccessible and resetting the PHY is not blocked, toggle the
* LANPHYPC Value bit to force the interconnect to PCIe mode.
*/
@@ -396,8 +393,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (e1000_phy_is_accessible_pchlan(hw))
break;
- /*
- * Before toggling LANPHYPC, see if PHY is accessible by
+ /* Before toggling LANPHYPC, see if PHY is accessible by
* forcing MAC to SMBus mode first.
*/
mac_reg = er32(CTRL_EXT);
@@ -406,8 +402,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
- /*
- * Gate automatic PHY configuration by hardware on
+ /* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
if ((hw->mac.type == e1000_pch2lan) &&
@@ -474,8 +469,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
hw->phy.ops.release(hw);
- /*
- * Reset the PHY before any access to it. Doing so, ensures
+ /* Reset the PHY before any access to it. Doing so, ensures
* that the PHY is in a known good state before we read/write
* PHY registers. The generic reset is sufficient here,
* because we haven't determined the PHY type yet.
@@ -536,8 +530,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* fall-through */
case e1000_pch2lan:
case e1000_pch_lpt:
- /*
- * In case the PHY needs to be in mdio slow mode,
+ /* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -593,8 +586,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->ops.power_up = e1000_power_up_phy_copper;
phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
- /*
- * We may need to do this twice - once for IGP and if that fails,
+ /* We may need to do this twice - once for IGP and if that fails,
* we'll set BM func pointers and try again
*/
ret_val = e1000e_determine_phy_address(hw);
@@ -679,8 +671,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
gfpreg = er32flash(ICH_FLASH_GFPREG);
- /*
- * sector_X_addr is a "sector"-aligned address (4096 bytes)
+ /* sector_X_addr is a "sector"-aligned address (4096 bytes)
* Add 1 to sector_end_addr since this sector is included in
* the overall size.
*/
@@ -690,8 +681,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
/* flash_base_addr is byte-aligned */
nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
- /*
- * find total size of the NVM, then cut in half since the total
+ /* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
@@ -788,8 +778,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
if (mac->type == e1000_ich8lan)
e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
- /*
- * Gate automatic PHY configuration by hardware on managed
+ /* Gate automatic PHY configuration by hardware on managed
* 82579 and i217
*/
if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
@@ -840,8 +829,7 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
goto release;
e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
- /*
- * EEE is not supported in 100Half, so ignore partner's EEE
+ /* EEE is not supported in 100Half, so ignore partner's EEE
* in 100 ability if full-duplex is not advertised.
*/
e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
@@ -869,8 +857,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
bool link;
u16 phy_reg;
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@@ -878,8 +865,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (!mac->get_link_status)
return 0;
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@@ -914,8 +900,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Workaround for PCHx parts in half-duplex:
+ /* Workaround for PCHx parts in half-duplex:
* Set the number of preambles removed from the packet
* when it is passed from the PHY to the MAC to prevent
* the MAC from misinterpreting the packet type.
@@ -932,8 +917,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000e_check_downshift(hw);
@@ -943,22 +927,19 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
return -E1000_ERR_CONFIG;
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@@ -1000,8 +981,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
if (rc)
return rc;
- /*
- * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+ /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
* on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
*/
if ((adapter->hw.phy.type == e1000_phy_ife) ||
@@ -1191,8 +1171,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] |
@@ -1256,8 +1235,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
u32 rar_low, rar_high;
u32 wlock_mac;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -1277,8 +1255,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
return;
}
- /*
- * The manageability engine (ME) can lock certain SHRAR registers that
+ /* The manageability engine (ME) can lock certain SHRAR registers that
* it is using - those registers are unavailable for use.
*/
if (index < hw->mac.rar_entry_count) {
@@ -1387,8 +1364,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
s32 ret_val = 0;
u16 word_addr, reg_data, reg_addr, phy_page = 0;
- /*
- * Initialize the PHY from the NVM on ICH platforms. This
+ /* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
* not properly autoloaded after power transitions.
* Therefore, after each PHY reset, we will load the
@@ -1422,8 +1398,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
if (!(data & sw_cfg_mask))
goto release;
- /*
- * Make sure HW does not configure LCD from PHY
+ /* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL);
@@ -1443,8 +1418,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pchlan) &&
!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
(hw->mac.type > e1000_pchlan)) {
- /*
- * HW configures the SMBus address and LEDs when the
+ /* HW configures the SMBus address and LEDs when the
* OEM and LCD Write Enable bits are set in the NVM.
* When both NVM bits are cleared, SW will configure
* them instead.
@@ -1748,8 +1722,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
}
if (hw->phy.type == e1000_phy_82578) {
- /*
- * Return registers to default by doing a soft reset then
+ /* Return registers to default by doing a soft reset then
* writing 0x3140 to the control register.
*/
if (hw->phy.revision < 2) {
@@ -1769,8 +1742,7 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Configure the K1 Si workaround during phy reset assuming there is
+ /* Configure the K1 Si workaround during phy reset assuming there is
* link so that it disables K1 if link is in 1Gbps.
*/
ret_val = e1000_k1_gig_workaround_hv(hw, true);
@@ -1853,8 +1825,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
if (enable) {
- /*
- * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+ /* Write Rx addresses (rar_entry_count for RAL/H, +4 for
* SHRAL/H) and initial CRC values to the MAC
*/
for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
@@ -2131,8 +2102,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
udelay(100);
} while ((!data) && --loop);
- /*
- * If basic configuration is incomplete before the above loop
+ /* If basic configuration is incomplete before the above loop
* count reaches 0, loading the configuration from NVM will
* leave the PHY in a bad state possibly resulting in no link.
*/
@@ -2299,8 +2269,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * Call gig speed drop workaround on LPLU before accessing
+ /* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -2319,8 +2288,7 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -2382,8 +2350,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -2420,8 +2387,7 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (phy->type != e1000_phy_igp_3)
return 0;
- /*
- * Call gig speed drop workaround on LPLU before accessing
+ /* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -2589,8 +2555,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
- /*
- * Either we should have a hardware SPI cycle in progress
+ /* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
* FDONE bit should be changed in the hardware so that it
* is 1 after hardware reset, which can then be used as an
@@ -2599,8 +2564,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
*/
if (!hsfsts.hsf_status.flcinprog) {
- /*
- * There is no cycle running at present,
+ /* There is no cycle running at present,
* so we can start a cycle.
* Begin by setting Flash Cycle Done.
*/
@@ -2610,8 +2574,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
} else {
s32 i;
- /*
- * Otherwise poll for sometime so the current
+ /* Otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
*/
for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
@@ -2623,8 +2586,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
udelay(1);
}
if (!ret_val) {
- /*
- * Successful in waiting for previous cycle to timeout,
+ /* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
@@ -2753,8 +2715,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_READ_COMMAND_TIMEOUT);
- /*
- * Check if FCERR is set to 1, if set to 1, clear it
+ /* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
* read in (shift in) the Flash Data0, the order is
* least significant byte first msb to lsb
@@ -2767,8 +2728,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
*data = (u16)(flash_data & 0x0000FFFF);
break;
} else {
- /*
- * If we've gotten here, then things are probably
+ /* If we've gotten here, then things are probably
* completely hosed, but if the error condition is
* detected, it won't hurt to give it another try...
* ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -2849,8 +2809,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
nvm->ops.acquire(hw);
- /*
- * We're writing to the opposite bank so if we're on bank 1,
+ /* We're writing to the opposite bank so if we're on bank 1,
* write to bank 0 etc. We also need to erase the segment that
* is going to be written
*/
@@ -2875,8 +2834,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
- /*
- * Determine whether to write the value stored
+ /* Determine whether to write the value stored
* in the other NVM bank or a modified value stored
* in the shadow RAM
*/
@@ -2890,8 +2848,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * If the word is 0x13, then make sure the signature bits
+ /* If the word is 0x13, then make sure the signature bits
* (15:14) are 11b until the commit has completed.
* This will allow us to write 10b which indicates the
* signature is valid. We want to do this after the write
@@ -2920,8 +2877,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
break;
}
- /*
- * Don't bother writing the segment valid bits if sector
+ /* Don't bother writing the segment valid bits if sector
* programming failed.
*/
if (ret_val) {
@@ -2930,8 +2886,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
goto release;
}
- /*
- * Finally validate the new segment by setting bit 15:14
+ /* Finally validate the new segment by setting bit 15:14
* to 10b in word 0x13 , this can be done without an
* erase as well since these bits are 11 to start with
* and we need to change bit 14 to 0b
@@ -2948,8 +2903,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
if (ret_val)
goto release;
- /*
- * And invalidate the previously valid segment by setting
+ /* And invalidate the previously valid segment by setting
* its signature word (0x13) high_byte to 0b. This can be
* done without an erase because flash erase sets all bits
* to 1's. We can write 1's to 0's without an erase
@@ -2968,8 +2922,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
release:
nvm->ops.release(hw);
- /*
- * Reload the EEPROM, or else modifications will not appear
+ /* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
if (!ret_val) {
@@ -2997,8 +2950,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
s32 ret_val;
u16 data;
- /*
- * Read 0x19 and check bit 6. If this bit is 0, the checksum
+ /* Read 0x19 and check bit 6. If this bit is 0, the checksum
* needs to be fixed. This bit is an indication that the NVM
* was prepared by OEM software and did not calculate the
* checksum...a likely scenario.
@@ -3048,8 +3000,7 @@ void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
pr0.range.wpe = true;
ew32flash(ICH_FLASH_PR0, pr0.regval);
- /*
- * Lock down a subset of GbE Flash Control Registers, e.g.
+ /* Lock down a subset of GbE Flash Control Registers, e.g.
* PR0 to prevent the write-protection from being lifted.
* Once FLOCKDN is set, the registers protected by it cannot
* be written until FLOCKDN is cleared by a hardware reset.
@@ -3109,8 +3060,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FDATA0, flash_data);
- /*
- * check if FCERR is set to 1 , if set to 1, clear it
+ /* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
ret_val = e1000_flash_cycle_ich8lan(hw,
@@ -3118,8 +3068,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
if (!ret_val)
break;
- /*
- * If we're here, then things are most likely
+ /* If we're here, then things are most likely
* completely hosed, but if the error condition
* is detected, it won't hurt to give it another
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -3207,8 +3156,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
- /*
- * Determine HW Sector size: Read BERASE bits of hw flash status
+ /* Determine HW Sector size: Read BERASE bits of hw flash status
* register
* 00: The Hw sector is 256 bytes, hence we need to erase 16
* consecutive sectors. The start index for the nth Hw sector
@@ -3253,16 +3201,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (ret_val)
return ret_val;
- /*
- * Write a value 11 (block Erase) in Flash
+ /* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
- /*
- * Write the last 24 bits of an index within the
+ /* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
* Address.
*/
@@ -3274,8 +3220,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (!ret_val)
break;
- /*
- * Check if FCERR is set to 1. If 1,
+ /* Check if FCERR is set to 1. If 1,
* clear it and try the whole sequence
* a few more times else Done
*/
@@ -3403,8 +3348,7 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_get_bus_info_pcie(hw);
- /*
- * ICH devices are "PCI Express"-ish. They have
+ /* ICH devices are "PCI Express"-ish. They have
* a configuration space, but do not contain
* PCI Express Capability registers, so bus width
* must be hardcoded.
@@ -3429,8 +3373,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
u32 ctrl, reg;
s32 ret_val;
- /*
- * Prevent the PCI-E bus from sticking if there is no TLP connection
+ /* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@@ -3440,8 +3383,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
e_dbg("Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
- /*
- * Disable the Transmit and Receive units. Then delay to allow
+ /* Disable the Transmit and Receive units. Then delay to allow
* any pending transactions to complete before we hit the MAC
* with the global reset.
*/
@@ -3474,15 +3416,13 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!hw->phy.ops.check_reset_block(hw)) {
- /*
- * Full-chip reset requires MAC and PHY reset at the same
+ /* Full-chip reset requires MAC and PHY reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
ctrl |= E1000_CTRL_PHY_RST;
- /*
- * Gate automatic PHY configuration by hardware on
+ /* Gate automatic PHY configuration by hardware on
* non-managed 82579
*/
if ((hw->mac.type == e1000_pch2lan) &&
@@ -3516,8 +3456,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
return ret_val;
}
- /*
- * For PCH, this write will make sure that any noise
+ /* For PCH, this write will make sure that any noise
* will be detected as a CRC error and be dropped rather than show up
* as a bad packet to the DMA engine.
*/
@@ -3569,8 +3508,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
for (i = 0; i < mac->mta_reg_count; i++)
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
- /*
- * The 82578 Rx buffer will stall if wakeup is enabled in host and
+ /* The 82578 Rx buffer will stall if wakeup is enabled in host and
* the ME. Disable wakeup by clearing the host wakeup bit.
* Reset the phy after disabling host wakeup to reset the Rx buffer.
*/
@@ -3600,8 +3538,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
ew32(TXDCTL(1), txdctl);
- /*
- * ICH8 has opposite polarity of no_snoop bits.
+ /* ICH8 has opposite polarity of no_snoop bits.
* By default, we should use snoop behavior.
*/
if (mac->type == e1000_ich8lan)
@@ -3614,8 +3551,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
- /*
- * Clear all of the statistics registers (clear on read). It is
+ /* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@@ -3676,15 +3612,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(STATUS, reg);
}
- /*
- * work-around descriptor data corruption issue during nfs v2 udp
+ /* work-around descriptor data corruption issue during nfs v2 udp
* traffic, just disable the nfs filtering capability
*/
reg = er32(RFCTL);
reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
- /*
- * Disable IPv6 extension header parsing because some malformed
+ /* Disable IPv6 extension header parsing because some malformed
* IPv6 headers can hang the Rx.
*/
if (hw->mac.type == e1000_ich8lan)
@@ -3709,8 +3643,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw))
return 0;
- /*
- * ICH parts do not have a word in the NVM to determine
+ /* ICH parts do not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
@@ -3722,8 +3655,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
hw->fc.requested_mode = e1000_fc_full;
}
- /*
- * Save off the requested flow control mode for use later. Depending
+ /* Save off the requested flow control mode for use later. Depending
* on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
@@ -3771,8 +3703,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
- /*
- * Set the mac to wait the maximum time between each iteration
+ /* Set the mac to wait the maximum time between each iteration
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps.
*/
@@ -3892,8 +3823,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
if (!dev_spec->kmrn_lock_loss_workaround_enabled)
return 0;
- /*
- * Make sure link is up before proceeding. If not just return.
+ /* Make sure link is up before proceeding. If not just return.
* Attempting this while link is negotiating fouled up link
* stability
*/
@@ -3925,8 +3855,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, phy_ctrl);
- /*
- * Call gig speed drop workaround on Gig disable before accessing
+ /* Call gig speed drop workaround on Gig disable before accessing
* any PHY registers
*/
e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -3983,8 +3912,7 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, reg);
- /*
- * Call gig speed drop workaround on Gig disable before
+ /* Call gig speed drop workaround on Gig disable before
* accessing any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
@@ -4078,8 +4006,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
goto release;
e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
- /*
- * Disable LPLU if both link partners support 100BaseT
+ /* Disable LPLU if both link partners support 100BaseT
* EEE and 100Full is advertised on both ends of the
* link.
*/
@@ -4091,8 +4018,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_LPLU);
}
- /*
- * For i217 Intel Rapid Start Technology support,
+ /* For i217 Intel Rapid Start Technology support,
* when the system is going into Sx and no manageability engine
* is present, the driver must configure proxy to reset only on
* power good. LPI (Low Power Idle) state must also reset only
@@ -4106,8 +4032,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
- /*
- * Set bit enable LPI (EEE) to reset only on
+ /* Set bit enable LPI (EEE) to reset only on
* power good.
*/
e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
@@ -4120,8 +4045,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
}
- /*
- * Enable MTA to reset for Intel Rapid Start Technology
+ /* Enable MTA to reset for Intel Rapid Start Technology
* Support
*/
e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
@@ -4175,8 +4099,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
return;
}
- /*
- * For i217 Intel Rapid Start Technology support when the system
+ /* For i217 Intel Rapid Start Technology support when the system
* is transitioning from Sx and no manageability engine is present
* configure SMBus to restore on reset, disable proxy, and enable
* the reset on MTA (Multicast table array).
@@ -4191,8 +4114,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
}
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- /*
- * Restore clear on SMB if no manageability engine
+ /* Restore clear on SMB if no manageability engine
* is present
*/
ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
@@ -4298,8 +4220,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
u16 data = (u16)hw->mac.ledctl_mode2;
u32 i, led;
- /*
- * If no link, then turn LED on by setting the invert bit
+ /* If no link, then turn LED on by setting the invert bit
* for each LED that's mode is "link_up" in ledctl_mode2.
*/
if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4329,8 +4250,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
u16 data = (u16)hw->mac.ledctl_mode1;
u32 i, led;
- /*
- * If no link, then turn LED off by clearing the invert bit
+ /* If no link, then turn LED off by clearing the invert bit
* for each LED that's mode is "link_up" in ledctl_mode1.
*/
if (!(er32(STATUS) & E1000_STATUS_LU)) {
@@ -4375,8 +4295,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
} else {
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
- /*
- * When auto config read does not complete, do not
+ /* When auto config read does not complete, do not
* return with an error. This can happen in situations
* where there is no eeprom and prevents getting link.
*/
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index a13439928488..54d9dafaf126 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -73,8 +73,7 @@ void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
struct e1000_bus_info *bus = &hw->bus;
u32 reg;
- /*
- * The status register reports the correct function number
+ /* The status register reports the correct function number
* for the device regardless of function swap state.
*/
reg = er32(STATUS);
@@ -210,8 +209,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
- /*
- * We have a valid alternate MAC address, and we want to treat it the
+ /* We have a valid alternate MAC address, and we want to treat it the
* same as the normal permanent MAC address stored by the HW into the
* RAR. Do this by mapping this address into RAR0.
*/
@@ -233,8 +231,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
- /*
- * HW expects these in little endian so we reverse the byte order
+ /* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
@@ -246,8 +243,7 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
if (rar_low || rar_high)
rar_high |= E1000_RAH_AV;
- /*
- * Some bridges will combine consecutive 32-bit writes into
+ /* Some bridges will combine consecutive 32-bit writes into
* a single burst write, which will malfunction on some parts.
* The flushes avoid this.
*/
@@ -273,15 +269,13 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
- /*
- * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+ /* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
- /*
- * The portion of the address that is used for the hash table
+ /* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
@@ -423,8 +417,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
s32 ret_val;
bool link;
- /*
- * We only want to go out to the PHY registers to see if Auto-Neg
+ /* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@@ -432,8 +425,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
if (!mac->get_link_status)
return 0;
- /*
- * First we want to see if the MII Status Register reports
+ /* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@@ -446,28 +438,24 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = false;
- /*
- * Check if there was DownShift, must be checked
+ /* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000e_check_downshift(hw);
- /*
- * If we are forcing speed/duplex, then we simply return since
+ /* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg)
return -E1000_ERR_CONFIG;
- /*
- * Auto-Neg is enabled. Auto Speed Detection takes care
+ /* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
mac->ops.config_collision_dist(hw);
- /*
- * Configure Flow Control now that Auto-Neg has completed.
+ /* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@@ -498,8 +486,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), the cable is plugged in (we have signal),
* and our link partner is not trying to auto-negotiate with us (we
* are receiving idles or data), we need to force link up. We also
@@ -530,8 +517,7 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -565,8 +551,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
- /*
- * If we don't have link (auto-negotiation failed or link partner
+ /* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@@ -595,8 +580,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
- /*
- * If we are forcing link and we are receiving /C/ ordered
+ /* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@@ -607,8 +591,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->serdes_has_link = true;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
- /*
- * If we force link for non-auto-negotiation switch, check
+ /* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@@ -665,8 +648,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
s32 ret_val;
u16 nvm_data;
- /*
- * Read and store word 0x0F of the EEPROM. This word contains bits
+ /* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
* disabling auto-negotiation, and the direction of the
@@ -705,15 +687,13 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
{
s32 ret_val;
- /*
- * In the case of the phy reset being blocked, we already have a link.
+ /* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
return 0;
- /*
- * If requested flow control is set to default, set flow control
+ /* If requested flow control is set to default, set flow control
* based on the EEPROM flow control settings.
*/
if (hw->fc.requested_mode == e1000_fc_default) {
@@ -722,8 +702,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Save off the requested flow control mode for use later. Depending
+ /* Save off the requested flow control mode for use later. Depending
* on the link partner's capabilities, we may or may not use this mode.
*/
hw->fc.current_mode = hw->fc.requested_mode;
@@ -735,8 +714,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Initialize the flow control address, type, and PAUSE timer
+ /* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
@@ -763,8 +741,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
u32 txcw;
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the device accordingly. If auto-negotiation is enabled, then
* software will have to set the "PAUSE" bits to the correct value in
* the Transmit Config Word Register (TXCW) and re-start auto-
@@ -786,8 +763,7 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled and Tx Flow control is disabled
+ /* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of Rx Pause ONLY, we will
* advertise that we support both symmetric and asymmetric Rx
@@ -797,15 +773,13 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is disabled,
+ /* Tx Flow control is enabled, and Rx Flow control is disabled,
* by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -835,8 +809,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
u32 i, status;
s32 ret_val;
- /*
- * If we have a signal (the cable is plugged in, or assumed true for
+ /* If we have a signal (the cable is plugged in, or assumed true for
* serdes media) then poll for a "Link-Up" indication in the Device
* Status Register. Time-out if a link isn't seen in 500 milliseconds
* seconds (Auto-negotiation should complete in less than 500
@@ -851,8 +824,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
if (i == FIBER_LINK_UP_LIMIT) {
e_dbg("Never got a valid link from auto-neg!!!\n");
mac->autoneg_failed = true;
- /*
- * AutoNeg failed to achieve a link, so we'll call
+ /* AutoNeg failed to achieve a link, so we'll call
* mac->check_for_link. This routine will force the
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
@@ -894,8 +866,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Since auto-negotiation is enabled, take the link out of reset (the
+ /* Since auto-negotiation is enabled, take the link out of reset (the
* link will be in reset, because we previously reset the chip). This
* will restart auto-negotiation. If auto-negotiation is successful
* then the link-up status bit will be set and the flow control enable
@@ -907,8 +878,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
e1e_flush();
usleep_range(1000, 2000);
- /*
- * For these adapters, the SW definable pin 1 is set when the optics
+ /* For these adapters, the SW definable pin 1 is set when the optics
* detect a signal. If we have a signal, then poll for a "Link-Up"
* indication.
*/
@@ -954,16 +924,14 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
{
u32 fcrtl = 0, fcrth = 0;
- /*
- * Set the flow control receive threshold registers. Normally,
+ /* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (hw->fc.current_mode & e1000_fc_tx_pause) {
- /*
- * We need to set up the Receive Threshold high and low water
+ /* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
@@ -995,8 +963,7 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
ctrl = er32(CTRL);
- /*
- * Because we didn't get link via the internal auto-negotiation
+ /* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
@@ -1057,8 +1024,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
- /*
- * Check for the case where we have fiber media and auto-neg failed
+ /* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
@@ -1076,15 +1042,13 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Check for the case where we have copper media and auto-neg is
+ /* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
- /*
- * Read the MII Status Register and check to see if AutoNeg
+ /* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
@@ -1100,8 +1064,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
- /*
- * The AutoNeg process has completed, so we now need to
+ /* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
@@ -1115,8 +1078,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Two bits in the Auto Negotiation Advertisement Register
+ /* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
@@ -1151,8 +1113,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
- /*
- * Now we need to check if the user selected Rx ONLY
+ /* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
@@ -1166,8 +1127,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
e_dbg("Flow Control = Rx PAUSE frames only.\n");
}
}
- /*
- * For receiving PAUSE frames ONLY.
+ /* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1181,8 +1141,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_tx_pause;
e_dbg("Flow Control = Tx PAUSE frames only.\n");
}
- /*
- * For transmitting PAUSE frames ONLY.
+ /* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1196,16 +1155,14 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
hw->fc.current_mode = e1000_fc_rx_pause;
e_dbg("Flow Control = Rx PAUSE frames only.\n");
} else {
- /*
- * Per the IEEE spec, at this point flow control
+ /* Per the IEEE spec, at this point flow control
* should be disabled.
*/
hw->fc.current_mode = e1000_fc_none;
e_dbg("Flow Control = NONE.\n");
}
- /*
- * Now we need to do one last check... If we auto-
+ /* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
@@ -1218,8 +1175,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
if (duplex == HALF_DUPLEX)
hw->fc.current_mode = e1000_fc_none;
- /*
- * Now we call a subroutine to actually force the MAC
+ /* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = e1000e_force_mac_fc(hw);
@@ -1520,8 +1476,7 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
- /*
- * set the blink bit for each LED that's "on" (0x0E)
+ /* set the blink bit for each LED that's "on" (0x0E)
* in ledctl_mode2
*/
ledctl_blink = hw->mac.ledctl_mode2;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index bacc950fc684..6dc47beb3adc 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -143,8 +143,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
return hw->mac.tx_pkt_filtering;
}
- /*
- * If we can't read from the host interface for whatever
+ /* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
@@ -163,8 +162,7 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
- /*
- * If either the checksums or signature don't match, then
+ /* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
@@ -252,8 +250,7 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
/* Calculate length in DWORDs */
length >>= 2;
- /*
- * The device driver writes the relevant command block into the
+ /* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f444eb0b76d8..fbf75fdca994 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -146,9 +146,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-/*
+/**
* e1000_regdump - register printout routine
- */
+ * @hw: pointer to the HW structure
+ * @reginfo: pointer to the register info table
+ **/
static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
{
int n = 0;
@@ -196,9 +198,10 @@ static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
}
}
-/*
+/**
* e1000e_dump - Print registers, Tx-ring and Rx-ring
- */
+ * @adapter: board private structure
+ **/
static void e1000e_dump(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -623,8 +626,7 @@ map_skb:
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -692,8 +694,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
goto no_buffers;
}
}
- /*
- * Refresh the desc even if buffer_addrs
+ /* Refresh the desc even if buffer_addrs
* didn't change because each write-back
* erases this info.
*/
@@ -726,8 +727,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -817,7 +817,8 @@ check_page:
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_rdt_wa(rx_ring, i);
@@ -891,8 +892,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
length = le16_to_cpu(rx_desc->wb.upper.length);
- /*
- * !EOP means multiple descriptors were used to store a single
+ /* !EOP means multiple descriptors were used to store a single
* packet, if that's the case we need to toss it. In fact, we
* need to toss every packet with the EOP bit clear and the
* next frame that _does_ have the EOP bit set, as it is by
@@ -933,8 +933,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
total_rx_bytes += length;
total_rx_packets++;
- /*
- * code added for copybreak, this should improve
+ /* code added for copybreak, this should improve
* performance for small packets with large amounts
* of reassembly being done in the stack
*/
@@ -1032,15 +1031,13 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (!adapter->tx_hang_recheck &&
(adapter->flags2 & FLAG2_DMA_BURST)) {
- /*
- * May be block on write-back, flush and detect again
+ /* May be block on write-back, flush and detect again
* flush pending descriptor writebacks to memory
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
/* execute the writes immediately */
e1e_flush();
- /*
- * Due to rare timing issues, write to TIDV again to ensure
+ /* Due to rare timing issues, write to TIDV again to ensure
* the write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1169,8 +1166,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
}
if (adapter->detect_tx_hung) {
- /*
- * Detect a transmit hang in hardware, this serializes the
+ /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
adapter->detect_tx_hung = false;
@@ -1270,14 +1266,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
skb_put(skb, length);
{
- /*
- * this looks ugly, but it seems compiler issues make
+ /* this looks ugly, but it seems compiler issues make
* it more efficient than reusing j
*/
int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
- /*
- * page alloc/put takes too long and effects small
+ /* page alloc/put takes too long and effects small
* packet throughput, so unsplit small packets and
* save the alloc/put only valid in softirq (napi)
* context to call kmap_*
@@ -1288,8 +1282,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
ps_page = &buffer_info->ps_pages[0];
- /*
- * there is no documentation about how to call
+ /* there is no documentation about how to call
* kmap_atomic, so we can't hold the mapping
* very long
*/
@@ -1486,14 +1479,16 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
skb_shinfo(rxtop)->nr_frags,
buffer_info->page, 0, length);
/* re-use the current skb, we only consumed the
- * page */
+ * page
+ */
buffer_info->skb = skb;
skb = rxtop;
rxtop = NULL;
e1000_consume_page(buffer_info, skb, length);
} else {
/* no chain, got EOP, this buf is the packet
- * copybreak to save the put_page/alloc_page */
+ * copybreak to save the put_page/alloc_page
+ */
if (length <= copybreak &&
skb_tailroom(skb) >= length) {
u8 *vaddr;
@@ -1502,7 +1497,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
length);
kunmap_atomic(vaddr);
/* re-use the page, so don't erase
- * buffer_info->page */
+ * buffer_info->page
+ */
skb_put(skb, length);
} else {
skb_fill_page_desc(skb, 0,
@@ -1656,22 +1652,17 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
struct e1000_hw *hw = &adapter->hw;
u32 icr = er32(ICR);
- /*
- * read ICR disables interrupts using IAM
- */
-
+ /* read ICR disables interrupts using IAM */
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
- /*
- * 80003ES2LAN workaround-- For packet buffer work-around on
+ /* 80003ES2LAN workaround-- For packet buffer work-around on
* link down event; disable receives here in the ISR and reset
* adapter in watchdog
*/
@@ -1713,31 +1704,27 @@ static irqreturn_t e1000_intr(int irq, void *data)
if (!icr || test_bit(__E1000_DOWN, &adapter->state))
return IRQ_NONE; /* Not our interrupt */
- /*
- * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+ /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
* not set, then the adapter didn't send an interrupt
*/
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
- /*
- * Interrupt Auto-Mask...upon reading ICR,
+ /* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write
*/
if (icr & E1000_ICR_LSC) {
hw->mac.get_link_status = true;
- /*
- * ICH8 workaround-- Call gig speed drop workaround on cable
+ /* ICH8 workaround-- Call gig speed drop workaround on cable
* disconnect (LSC) before accessing any PHY registers
*/
if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
(!(er32(STATUS) & E1000_STATUS_LU)))
schedule_work(&adapter->downshift_task);
- /*
- * 80003ES2LAN workaround--
+ /* 80003ES2LAN workaround--
* For packet buffer work-around on link down event;
* disable receives here in the ISR and
* reset adapter in watchdog
@@ -2469,8 +2456,7 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
set_itr_now:
if (new_itr != adapter->itr) {
- /*
- * this attempts to bias the interrupt rate towards Bulk
+ /* this attempts to bias the interrupt rate towards Bulk
* by adding intermediate steps when interrupt rate is
* increasing
*/
@@ -2517,7 +2503,7 @@ void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
* e1000_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
-static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
{
int size = sizeof(struct e1000_ring);
@@ -2740,8 +2726,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
manc = er32(MANC);
- /*
- * enable receiving management packets to the host. this will probably
+ /* enable receiving management packets to the host. this will probably
* generate destination unreachable messages from the host OS, but
* the packets will be handled on SMBUS
*/
@@ -2754,8 +2739,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
break;
case e1000_82574:
case e1000_82583:
- /*
- * Check if IPMI pass-through decision filter already exists;
+ /* Check if IPMI pass-through decision filter already exists;
* if so, enable it.
*/
for (i = 0, j = 0; i < 8; i++) {
@@ -2827,8 +2811,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
u32 txdctl = er32(TXDCTL(0));
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
- /*
- * set up some performance related parameters to encourage the
+ /* set up some performance related parameters to encourage the
* hardware to use the bus more efficiently in bursts, depends
* on the tx_int_delay to be enabled,
* wthresh = 1 ==> burst write is disabled to avoid Tx stalls
@@ -2845,8 +2828,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
tarc = er32(TARC(0));
- /*
- * set the speed mode bit, we'll clear it if we're not at
+ /* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
#define SPEED_MODE_BIT (1 << 21)
@@ -2967,8 +2949,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
rfctl |= E1000_RFCTL_EXTEN;
ew32(RFCTL, rfctl);
- /*
- * 82571 and greater support packet-split where the protocol
+ /* 82571 and greater support packet-split where the protocol
* header is placed in skb->data and the packet data is
* placed in pages hanging off of skb_shinfo(skb)->nr_frags.
* In the case of a non-split, skb->data is linearly filled,
@@ -3016,7 +2997,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
- * in e1000e_set_rx_mode */
+ * in e1000e_set_rx_mode
+ */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3071,8 +3053,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
usleep_range(10000, 20000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
- /*
- * set the writeback threshold (only takes effect if the RDTR
+ /* set the writeback threshold (only takes effect if the RDTR
* is set). set GRAN=1 and write back up to 0x4 worth, and
* enable prefetching of 0x20 Rx descriptors
* granularity = 01
@@ -3083,8 +3064,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
- /*
- * override the delay timers for enabling bursting, only if
+ /* override the delay timers for enabling bursting, only if
* the value was not set by the user via module options
*/
if (adapter->rx_int_delay == DEFAULT_RDTR)
@@ -3108,8 +3088,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(CTRL_EXT, ctrl_ext);
e1e_flush();
- /*
- * Setup the HW Rx Head and Tail Descriptor Pointers and
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
rdba = rx_ring->dma;
@@ -3130,8 +3109,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
ew32(RXCSUM, rxcsum);
if (adapter->hw.mac.type == e1000_pch2lan) {
- /*
- * With jumbo frames, excessive C-state transition
+ /* With jumbo frames, excessive C-state transition
* latencies result in dropped transactions.
*/
if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3216,8 +3194,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- /*
- * write the addresses in reverse order to avoid write
+ /* write the addresses in reverse order to avoid write
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
@@ -3269,8 +3246,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
} else {
- /*
- * Write addresses to the MTA, if the attempt fails
+ /* Write addresses to the MTA, if the attempt fails
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
@@ -3279,8 +3255,7 @@ static void e1000e_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- /*
- * Write addresses to available RAR registers, if there is not
+ /* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
@@ -3315,8 +3290,7 @@ static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
for (i = 0; i < 32; i++)
ew32(RETA(i), 0);
- /*
- * Disable raw packet checksumming so that RSS hash is placed in
+ /* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback.
*/
rxcsum = er32(RXCSUM);
@@ -3408,8 +3382,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
- /*
- * To maintain wire speed transmits, the Tx FIFO should be
+ /* To maintain wire speed transmits, the Tx FIFO should be
* large enough to accommodate two full transmit packets,
* rounded up to the next 1KB and expressed in KB. Likewise,
* the Rx FIFO should be large enough to accommodate at least
@@ -3421,8 +3394,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
tx_space = pba >> 16;
/* lower 16 bits has Rx packet buffer allocation size in KB */
pba &= 0xffff;
- /*
- * the Tx fifo also stores 16 bytes of information about the Tx
+ /* the Tx fifo also stores 16 bytes of information about the Tx
* but don't include ethernet FCS because hardware appends it
*/
min_tx_space = (adapter->max_frame_size +
@@ -3435,8 +3407,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
min_rx_space = ALIGN(min_rx_space, 1024);
min_rx_space >>= 10;
- /*
- * If current Tx allocation is less than the min Tx FIFO size,
+ /* If current Tx allocation is less than the min Tx FIFO size,
* and the min Tx FIFO size is less than the current Rx FIFO
* allocation, take space away from current Rx allocation
*/
@@ -3444,8 +3415,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
((min_tx_space - tx_space) < pba)) {
pba -= min_tx_space - tx_space;
- /*
- * if short on Rx space, Rx wins and must trump Tx
+ /* if short on Rx space, Rx wins and must trump Tx
* adjustment
*/
if (pba < min_rx_space)
@@ -3455,8 +3425,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
ew32(PBA, pba);
}
- /*
- * flow control settings
+ /* flow control settings
*
* The high water mark must be low enough to fit one full frame
* (or the size used for early receive) above it in the Rx FIFO.
@@ -3490,8 +3459,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
fc->low_water = fc->high_water - 8;
break;
case e1000_pchlan:
- /*
- * Workaround PCH LOM adapter hangs with certain network
+ /* Workaround PCH LOM adapter hangs with certain network
* loads. If hangs persist, try disabling Tx flow control.
*/
if (adapter->netdev->mtu > ETH_DATA_LEN) {
@@ -3516,8 +3484,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
- /*
- * Alignment of Tx data is on an arbitrary byte boundary with the
+ /* Alignment of Tx data is on an arbitrary byte boundary with the
* maximum size per Tx descriptor limited only to the transmit
* allocation of the packet buffer minus 96 bytes with an upper
* limit of 24KB due to receive synchronization limitations.
@@ -3525,8 +3492,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
24 << 10);
- /*
- * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+ /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
*/
if (adapter->itr_setting & 0x3) {
@@ -3549,8 +3515,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
- /*
- * For parts with AMT enabled, let the firmware know
+ /* For parts with AMT enabled, let the firmware know
* that the network interface is in control
*/
if (adapter->flags & FLAG_HAS_AMT)
@@ -3579,8 +3544,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
u16 phy_data = 0;
- /*
- * speed up time to link by disabling smart power down, ignore
+ /* speed up time to link by disabling smart power down, ignore
* the return value of this function because there is nothing
* different we would do if it failed
*/
@@ -3628,8 +3592,7 @@ static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
/* execute the writes immediately */
e1e_flush();
- /*
- * due to rare timing issues, write to TIDV/RDTR again to ensure the
+ /* due to rare timing issues, write to TIDV/RDTR again to ensure the
* write is successful
*/
ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -3647,8 +3610,7 @@ void e1000e_down(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 tctl, rctl;
- /*
- * signal that we're down so the interrupt handler does not
+ /* signal that we're down so the interrupt handler does not
* reschedule our watchdog timer
*/
set_bit(__E1000_DOWN, &adapter->state);
@@ -3691,8 +3653,7 @@ void e1000e_down(struct e1000_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
- /*
- * TODO: for power management, we could drop the link and
+ /* TODO: for power management, we could drop the link and
* pci_disable_device here.
*/
}
@@ -3715,7 +3676,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
+static int e1000_sw_init(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3755,8 +3716,7 @@ static irqreturn_t e1000_intr_msi_test(int irq, void *data)
e_dbg("icr is %08X\n", icr);
if (icr & E1000_ICR_RXSEQ) {
adapter->flags &= ~FLAG_MSI_TEST_FAILED;
- /*
- * Force memory writes to complete before acknowledging the
+ /* Force memory writes to complete before acknowledging the
* interrupt is handled.
*/
wmb();
@@ -3786,7 +3746,8 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
e1000e_reset_interrupt_capability(adapter);
/* Assume that the test fails, if it succeeds then the test
- * MSI irq handler will unset this flag */
+ * MSI irq handler will unset this flag
+ */
adapter->flags |= FLAG_MSI_TEST_FAILED;
err = pci_enable_msi(adapter->pdev);
@@ -3800,8 +3761,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
goto msi_test_failed;
}
- /*
- * Force memory writes to complete before enabling and firing an
+ /* Force memory writes to complete before enabling and firing an
* interrupt.
*/
wmb();
@@ -3901,8 +3861,7 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- /*
- * If AMT is enabled, let the firmware know that the network
+ /* If AMT is enabled, let the firmware know that the network
* interface is now open and reset the part to a known state.
*/
if (adapter->flags & FLAG_HAS_AMT) {
@@ -3923,8 +3882,7 @@ static int e1000_open(struct net_device *netdev)
PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
- /*
- * before we allocate an interrupt, we must be ready to handle it.
+ /* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
* as soon as we call pci_request_irq, so we have to setup our
* clean_rx handler before we do so.
@@ -3935,8 +3893,7 @@ static int e1000_open(struct net_device *netdev)
if (err)
goto err_req_irq;
- /*
- * Work around PCIe errata with MSI interrupts causing some chipsets to
+ /* Work around PCIe errata with MSI interrupts causing some chipsets to
* ignore e1000e MSI messages, which means we need to test our MSI
* interrupt now
*/
@@ -4017,16 +3974,14 @@ static int e1000_close(struct net_device *netdev)
e1000e_free_tx_resources(adapter->tx_ring);
e1000e_free_rx_resources(adapter->rx_ring);
- /*
- * kill manageability vlan ID if supported, but not if a vlan with
+ /* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
if (adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
- /*
- * If AMT is enabled, let the firmware know that the network
+ /* If AMT is enabled, let the firmware know that the network
* interface is now closed
*/
if ((adapter->flags & FLAG_HAS_AMT) &&
@@ -4065,8 +4020,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
/* activate the work around */
e1000e_set_laa_state_82571(&adapter->hw, 1);
- /*
- * Hold a copy of the LAA in RAR[14] This is done so that
+ /* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed (in e1000_watchdog), the actual LAA is in one
* of the RARs and no incoming packets directed to this port
@@ -4099,10 +4053,13 @@ static void e1000e_update_phy_task(struct work_struct *work)
e1000_get_phy_info(&adapter->hw);
}
-/*
+/**
+ * e1000_update_phy_info - timre call-back to update PHY info
+ * @data: pointer to adapter cast into an unsigned long
+ *
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
- */
+ **/
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -4129,8 +4086,7 @@ static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
if (ret_val)
return;
- /*
- * A page set is expensive so check if already on desired page.
+ /* A page set is expensive so check if already on desired page.
* If not, set to the page with the PHY status registers.
*/
hw->phy.addr = 1;
@@ -4201,8 +4157,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- /*
- * Prevent stats update while adapter is being reset, or if the pci
+ /* Prevent stats update while adapter is being reset, or if the pci
* connection is down.
*/
if (adapter->link_speed == 0)
@@ -4270,8 +4225,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Rx Errors */
- /*
- * RLEC on some newer hardware can be incorrect so build
+ /* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC
*/
netdev->stats.rx_errors = adapter->stats.rxerrc +
@@ -4323,8 +4277,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
if (ret_val)
e_warn("Error reading PHY register\n");
} else {
- /*
- * Do not read PHY registers if link is not up
+ /* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
*/
phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
@@ -4362,8 +4315,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
bool link_active = false;
s32 ret_val = 0;
- /*
- * get_link_status is set on LSC (link status) interrupt or
+ /* get_link_status is set on LSC (link status) interrupt or
* Rx sequence error interrupt. get_link_status will stay
* false until the check_for_link establishes link
* for copper adapters ONLY
@@ -4415,8 +4367,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- /*
- * With 82574 controllers, PHY needs to be checked periodically
+ /* With 82574 controllers, PHY needs to be checked periodically
* for hung state and reset, if two calls return true
*/
if (e1000_check_phy_82574(hw))
@@ -4484,8 +4435,7 @@ static void e1000_watchdog_task(struct work_struct *work)
&adapter->link_speed,
&adapter->link_duplex);
e1000_print_link_info(adapter);
- /*
- * On supported PHYs, check for duplex mismatch only
+ /* On supported PHYs, check for duplex mismatch only
* if link has autonegotiated at 10/100 half
*/
if ((hw->phy.type == e1000_phy_igp_3 ||
@@ -4515,8 +4465,7 @@ static void e1000_watchdog_task(struct work_struct *work)
break;
}
- /*
- * workaround: re-program speed mode bit after
+ /* workaround: re-program speed mode bit after
* link-up event
*/
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
@@ -4527,8 +4476,7 @@ static void e1000_watchdog_task(struct work_struct *work)
ew32(TARC(0), tarc0);
}
- /*
- * disable TSO for pcie and 10/100 speeds, to avoid
+ /* disable TSO for pcie and 10/100 speeds, to avoid
* some hardware issues
*/
if (!(adapter->flags & FLAG_TSO_FORCE)) {
@@ -4549,16 +4497,14 @@ static void e1000_watchdog_task(struct work_struct *work)
}
}
- /*
- * enable transmits in the hardware, need to do this
+ /* enable transmits in the hardware, need to do this
* after setting TARC(0)
*/
tctl = er32(TCTL);
tctl |= E1000_TCTL_EN;
ew32(TCTL, tctl);
- /*
- * Perform any post-link-up configuration before
+ /* Perform any post-link-up configuration before
* reporting link up.
*/
if (phy->ops.cfg_on_link_up)
@@ -4609,8 +4555,7 @@ link_up:
if (!netif_carrier_ok(netdev) &&
(e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
- /*
- * We've lost link, so the controller stops DMA,
+ /* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
* (Do the reset outside of interrupt context).
@@ -4622,8 +4567,7 @@ link_up:
/* Simple mode for Interrupt Throttle Rate (ITR) */
if (adapter->itr_setting == 4) {
- /*
- * Symmetric Tx/Rx gets a reduced ITR=2000;
+ /* Symmetric Tx/Rx gets a reduced ITR=2000;
* Total asymmetrical Tx or Rx gets ITR=8000;
* everyone else is between 2000-8000.
*/
@@ -4648,8 +4592,7 @@ link_up:
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = true;
- /*
- * With 82571 controllers, LAA may be overwritten due to controller
+ /* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0]
*/
if (e1000e_get_laa_state_82571(hw))
@@ -4948,8 +4891,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
- /*
- * Force memory writes to complete before letting h/w
+ /* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
@@ -4963,8 +4905,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
else
writel(i, tx_ring->tail);
- /*
- * we need this if more than one processor can write to our tail
+ /* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
@@ -5014,15 +4955,13 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
struct e1000_adapter *adapter = tx_ring->adapter;
netif_stop_queue(adapter->netdev);
- /*
- * Herbert's original patch had:
+ /* Herbert's original patch had:
* smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it.
*/
smp_mb();
- /*
- * We need to check again in a case another CPU has just
+ /* We need to check again in a case another CPU has just
* made room available.
*/
if (e1000_desc_unused(tx_ring) < size)
@@ -5067,18 +5006,26 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ /* The minimum packet size with TCTL.PSP set is 17 bytes so
+ * pad skb in order to meet this minimum size requirement
+ */
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
+ }
+
mss = skb_shinfo(skb)->gso_size;
if (mss) {
u8 hdr_len;
- /*
- * TSO Workaround for 82571/2/3 Controllers -- if skb->data
+ /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data
*/
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- /*
- * we do this workaround for ES2LAN, but it is un-necessary,
+ /* we do this workaround for ES2LAN, but it is un-necessary,
* avoiding it could save a lot of cycles
*/
if (skb->data_len && (hdr_len == len)) {
@@ -5109,8 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
- /*
- * need: count + 2 desc gap to keep tail from touching
+ /* need: count + 2 desc gap to keep tail from touching
* head, otherwise try next time
*/
if (e1000_maybe_stop_tx(tx_ring, count + 2))
@@ -5134,8 +5080,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
else if (e1000_tx_csum(tx_ring, skb))
tx_flags |= E1000_TX_FLAGS_CSUM;
- /*
- * Old method was to assume IPv4 packet by default if TSO was enabled.
+ /* Old method was to assume IPv4 packet by default if TSO was enabled.
* 82571 hardware supports TSO capabilities for IPv6 as well...
* no longer assume, we must.
*/
@@ -5222,8 +5167,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
/* Rx Errors */
- /*
- * RLEC on some newer hardware can be incorrect so build
+ /* RLEC on some newer hardware can be incorrect so build
* our own version based on RUC and ROC
*/
stats->rx_errors = adapter->stats.rxerrc +
@@ -5292,8 +5236,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev))
e1000e_down(adapter);
- /*
- * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+ /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size.
* i.e. RXBUFFER_2048 --> size-4096 slab
@@ -5555,8 +5498,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (adapter->hw.phy.type == e1000_phy_igp_3)
e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
e1000e_release_hw_control(adapter);
@@ -5583,8 +5525,7 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
- /*
- * The pci-e switch on some quad port adapters will report a
+ /* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
@@ -5613,8 +5554,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
#else
static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
- /*
- * Both device and parent should have the same ASPM setting.
+ /* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, state);
@@ -5708,8 +5648,7 @@ static int __e1000_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -5837,7 +5776,10 @@ static irqreturn_t e1000_intr_msix(int irq, void *data)
return IRQ_HANDLED;
}
-/*
+/**
+ * e1000_netpoll
+ * @netdev: network interface device structure
+ *
* Polling 'interrupt' - used by things like netconsole to send skbs
* without having to re-enable interrupts. It's not called while
* the interrupt routine is executing.
@@ -5962,8 +5904,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
netif_device_attach(netdev);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -6083,8 +6024,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit e1000_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct e1000_adapter *adapter;
@@ -6262,14 +6202,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (e1000e_enable_mng_pass_thru(&adapter->hw))
adapter->flags |= FLAG_MNG_PT_ENABLED;
- /*
- * before reading the NVM, reset the controller to
+ /* before reading the NVM, reset the controller to
* put the device in a known good starting state
*/
adapter->hw.mac.ops.reset_hw(&adapter->hw);
- /*
- * systems with ASPM and others may see the checksum fail on the first
+ /* systems with ASPM and others may see the checksum fail on the first
* attempt. Let's give it a few tries
*/
for (i = 0;; i++) {
@@ -6324,8 +6262,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->rx_ring->count = E1000_DEFAULT_RXD;
adapter->tx_ring->count = E1000_DEFAULT_TXD;
- /*
- * Initial Wake on LAN setting - If APM wake is enabled in
+ /* Initial Wake on LAN setting - If APM wake is enabled in
* the EEPROM, enable the ACPI Magic Packet filter
*/
if (adapter->flags & FLAG_APME_IN_WUC) {
@@ -6349,8 +6286,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
- /*
- * now that we have the eeprom settings, apply the special cases
+ /* now that we have the eeprom settings, apply the special cases
* where the eeprom may be wrong or the board simply won't support
* wake on lan on a particular port
*/
@@ -6367,8 +6303,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
e1000e_reset(adapter);
- /*
- * If the controller has AMT, do not set DRV_LOAD until the interface
+ /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now
* under the control of the driver.
*/
@@ -6425,14 +6360,13 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit e1000_remove(struct pci_dev *pdev)
+static void e1000_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
- /*
- * The timers may be rescheduled, so explicitly disable them
+ /* The timers may be rescheduled, so explicitly disable them
* from being rescheduled.
*/
if (!down)
@@ -6457,8 +6391,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
- /*
- * Release control of h/w to f/w. If f/w is AMT enabled, this
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
e1000e_release_hw_control(adapter);
@@ -6578,7 +6511,7 @@ static struct pci_driver e1000_driver = {
.name = e1000e_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
- .remove = __devexit_p(e1000_remove),
+ .remove = e1000_remove,
#ifdef CONFIG_PM
.driver = {
.pm = &e1000_pm_ops,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a969f1af1b4e..b6468804cb2e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -279,8 +279,7 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
e1e_flush();
udelay(1);
- /*
- * Read "Status Register" repeatedly until the LSB is cleared.
+ /* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out.
@@ -321,8 +320,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u32 i, eerd = 0;
s32 ret_val = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -364,8 +362,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 ret_val;
u16 widx = 0;
- /*
- * A check for invalid values: offset too large, too many words,
+ /* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
@@ -393,8 +390,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
e1000_standby_nvm(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
+ /* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
@@ -461,8 +457,7 @@ s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
return ret_val;
}
- /*
- * if nvm_data is not ptr guard the PBA must be in legacy format which
+ /* if nvm_data is not ptr guard the PBA must be in legacy format which
* means pba_ptr is actually our second data word for the PBA number
* and we can decode it into an ascii string
*/
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index dfbfa7fd98c3..89d536dd7ff5 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -32,11 +32,9 @@
#include "e1000.h"
-/*
- * This is the only thing that needs to be changed to adjust the
+/* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
-
#define E1000_MAX_NIC 32
#define OPTION_UNSET -1
@@ -49,22 +47,19 @@ module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
-/*
- * All parameters are treated the same, as an integer array of values.
+/* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
-
#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
#define E1000_PARAM(X, desc) \
- static int __devinitdata X[E1000_MAX_NIC+1] \
+ static int X[E1000_MAX_NIC+1] \
= E1000_PARAM_INIT; \
static unsigned int num_##X; \
module_param_array_named(X, X, int, &num_##X, 0); \
MODULE_PARM_DESC(X, desc);
-/*
- * Transmit Interrupt Delay in units of 1.024 microseconds
+/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non-zero
*
* Valid Range: 0-65535
@@ -74,8 +69,7 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
-/*
- * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@@ -84,8 +78,7 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
-/*
- * Receive Interrupt Delay in units of 1.024 microseconds
+/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
@@ -94,8 +87,7 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
-/*
- * Receive Absolute Interrupt Delay in units of 1.024 microseconds
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@@ -103,8 +95,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
-/*
- * Interrupt Throttle Rate (interrupts/sec)
+/* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative
*/
@@ -113,8 +104,7 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_ITR 100000
#define MIN_ITR 100
-/*
- * IntMode (Interrupt Mode)
+/* IntMode (Interrupt Mode)
*
* Valid Range: varies depending on kernel configuration & hardware support
*
@@ -132,8 +122,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
#define MAX_INTMODE 2
#define MIN_INTMODE 0
-/*
- * Enable Smart Power Down of the PHY
+/* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
@@ -141,8 +130,7 @@ E1000_PARAM(IntMode, "Interrupt Mode");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
-/*
- * Enable Kumeran Lock Loss workaround
+/* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*
@@ -150,8 +138,7 @@ E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
*/
E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
-/*
- * Write Protect NVM
+/* Write Protect NVM
*
* Valid Range: 0, 1
*
@@ -159,8 +146,7 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*/
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
-/*
- * Enable CRC Stripping
+/* Enable CRC Stripping
*
* Valid Range: 0, 1
*
@@ -186,9 +172,9 @@ struct e1000_option {
} arg;
};
-static int __devinit e1000_validate_option(unsigned int *value,
- const struct e1000_option *opt,
- struct e1000_adapter *adapter)
+static int e1000_validate_option(unsigned int *value,
+ const struct e1000_option *opt,
+ struct e1000_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -249,7 +235,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
**/
-void __devinit e1000e_check_options(struct e1000_adapter *adapter)
+void e1000e_check_options(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
int bd = adapter->bd_number;
@@ -351,8 +337,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
if (num_InterruptThrottleRate > bd) {
adapter->itr = InterruptThrottleRate[bd];
- /*
- * Make sure a message is printed for non-special
+ /* Make sure a message is printed for non-special
* values. And in case of an invalid option, display
* warning, use default and go through itr/itr_setting
* adjustment logic below
@@ -361,14 +346,12 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&adapter->itr, &opt, adapter))
adapter->itr = opt.def;
} else {
- /*
- * If no option specified, use default value and go
+ /* If no option specified, use default value and go
* through the logic below to adjust itr/itr_setting
*/
adapter->itr = opt.def;
- /*
- * Make sure a message is printed for non-special
+ /* Make sure a message is printed for non-special
* default values
*/
if (adapter->itr > 4)
@@ -400,8 +383,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
opt.name);
break;
default:
- /*
- * Save the setting, because the dynamic bits
+ /* Save the setting, because the dynamic bits
* change itr.
*
* Clear the lower two bits because
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fc62a3f3a5be..28b38ff37e84 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -193,8 +193,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -204,8 +203,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
ew32(MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -225,8 +223,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
}
*data = (u16) mdic;
- /*
- * Allow some time after each MDIC transaction to avoid
+ /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
@@ -253,8 +250,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PARAM;
}
- /*
- * Set up Op-code, Phy Address, and register offset in the MDI
+ /* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@@ -265,8 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
ew32(MDIC, mdic);
- /*
- * Poll the ready bit to see if the MDI read completed
+ /* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
@@ -285,8 +280,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PHY;
}
- /*
- * Allow some time after each MDIC transaction to avoid
+ /* Allow some time after each MDIC transaction to avoid
* reading duplicate data in the next MDIC transaction.
*/
if (hw->mac.type == e1000_pch2lan)
@@ -708,8 +702,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
if (ret_val)
return ret_val;
phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK;
- /*
- * Options:
+ /* Options:
* 0 - Auto (default)
* 1 - MDI mode
* 2 - MDI-X mode
@@ -754,8 +747,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if (phy->type != e1000_phy_bm)
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
- /*
- * Options:
+ /* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@@ -780,8 +772,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
break;
}
- /*
- * Options:
+ /* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@@ -818,8 +809,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
if ((phy->type == e1000_phy_m88) &&
(phy->revision < E1000_REVISION_4) &&
(phy->id != BME1000_E_PHY_ID_R2)) {
- /*
- * Force TX_CLK in the Extended PHY Specific Control Register
+ /* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -899,8 +889,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+ /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
* timeout issues when LFS is enabled.
*/
msleep(100);
@@ -936,8 +925,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
- /*
- * when autonegotiation advertisement is only 1000Mbps then we
+ /* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default.
*/
@@ -1001,16 +989,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
}
- /*
- * Need to parse both autoneg_advertised and fc and set up
+ /* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
- /*
- * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ /* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
@@ -1056,8 +1042,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
- /*
- * Check for a software override of the flow control settings, and
+ /* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -1076,15 +1061,13 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
*/
switch (hw->fc.current_mode) {
case e1000_fc_none:
- /*
- * Flow control (Rx & Tx) is completely disabled by a
+ /* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
- /*
- * Rx Flow control is enabled, and Tx Flow control is
+ /* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*
* Since there really isn't a way to advertise that we are
@@ -1096,16 +1079,14 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
- /*
- * Tx Flow control is enabled, and Rx Flow control is
+ /* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
- /*
- * Flow control (both Rx and Tx) is enabled by a software
+ /* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -1142,14 +1123,12 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
- /*
- * Perform some bounds checking on the autoneg advertisement
+ /* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
- /*
- * If autoneg_advertised is zero, we assume it was not defaulted
+ /* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (!phy->autoneg_advertised)
@@ -1163,8 +1142,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
}
e_dbg("Restarting Auto-Neg\n");
- /*
- * Restart auto-negotiation by setting the Auto Neg Enable bit and
+ /* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -1176,8 +1154,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Does the user want to wait for Auto-Neg to complete here, or
+ /* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->autoneg_wait_to_complete) {
@@ -1208,16 +1185,14 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
bool link;
if (hw->mac.autoneg) {
- /*
- * Setup autoneg and flow control advertisement and perform
+ /* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
- /*
- * PHY will be set to 10H, 10F, 100H or 100F
+ /* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
e_dbg("Forcing Speed and Duplex\n");
@@ -1228,8 +1203,7 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
}
}
- /*
- * Check link status. Wait up to 100 microseconds for link to become
+ /* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10,
@@ -1273,8 +1247,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Clear Auto-Crossover to force MDI manually. IGP requires MDI
+ /* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -1328,8 +1301,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+ /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1370,8 +1342,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (hw->phy.type != e1000_phy_m88) {
e_dbg("Link taking longer than expected.\n");
} else {
- /*
- * We didn't get link.
+ /* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
@@ -1398,8 +1369,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Resetting the phy means we need to re-force TX_CLK in the
+ /* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
* the reset value of 2.5MHz.
*/
@@ -1408,8 +1378,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * In addition, we must re-enable CRS on Tx for both half and full
+ /* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1573,8 +1542,7 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
if (ret_val)
return ret_val;
- /*
- * LPLU and SmartSpeed are mutually exclusive. LPLU is used
+ /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained.
@@ -1702,8 +1670,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
s32 ret_val;
u16 data, offset, mask;
- /*
- * Polarity is determined based on the speed of
+ /* Polarity is determined based on the speed of
* our connection.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
@@ -1715,8 +1682,7 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
offset = IGP01E1000_PHY_PCS_INIT_REG;
mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
- /*
- * This really only applies to 10Mbps since
+ /* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1745,8 +1711,7 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, offset, mask;
- /*
- * Polarity is determined based on the reversal feature being enabled.
+ /* Polarity is determined based on the reversal feature being enabled.
*/
if (phy->polarity_correction) {
offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -1791,8 +1756,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
msleep(100);
}
- /*
- * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+ /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
@@ -1814,15 +1778,13 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
- /*
- * Some PHYs require the PHY_STATUS register to be read
+ /* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
if (ret_val)
- /*
- * If the first read fails, another entity may have
+ /* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
@@ -1913,8 +1875,7 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
if (ret_val)
return ret_val;
- /*
- * Getting bits 15:9, which represent the combination of
+ /* Getting bits 15:9, which represent the combination of
* coarse and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length.
@@ -2285,15 +2246,13 @@ s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
e1e_wphy(hw, 0x1796, 0x0008);
/* Change cg_icount + enable integbp for channels BCD */
e1e_wphy(hw, 0x1798, 0xD008);
- /*
- * Change cg_icount + enable integbp + change prop_factor_master
+ /* Change cg_icount + enable integbp + change prop_factor_master
* to 8 for channel A
*/
e1e_wphy(hw, 0x1898, 0xD918);
/* Disable AHT in Slave mode on channel A */
e1e_wphy(hw, 0x187A, 0x0800);
- /*
- * Enable LPLU and disable AN to 1000 in non-D0a states,
+ /* Enable LPLU and disable AN to 1000 in non-D0a states,
* Enable SPD+B2B
*/
e1e_wphy(hw, 0x0019, 0x008D);
@@ -2417,8 +2376,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
e1000e_get_phy_id(hw);
phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
- /*
- * If phy_type is valid, break - we found our
+ /* If phy_type is valid, break - we found our
* PHY address
*/
if (phy_type != e1000_phy_unknown)
@@ -2478,8 +2436,7 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
- /*
- * Page select is register 31 for phy address 1 and 22 for
+ /* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
* phy address 1.
*/
@@ -2537,8 +2494,7 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
if (offset > MAX_PHY_MULTI_PAGE_REG) {
u32 page_shift, page_select;
- /*
- * Page select is register 31 for phy address 1 and 22 for
+ /* Page select is register 31 for phy address 1 and 22 for
* phy address 2 and 3. Page select is shifted only for
* phy address 1.
*/
@@ -2683,8 +2639,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
return ret_val;
}
- /*
- * Enable both PHY wakeup mode and Wakeup register page writes.
+ /* Enable both PHY wakeup mode and Wakeup register page writes.
* Prevent a power state change by disabling ME and Host PHY wakeup.
*/
temp = *phy_reg;
@@ -2698,8 +2653,7 @@ s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
return ret_val;
}
- /*
- * Select Host Wakeup Registers page - caller now able to write
+ /* Select Host Wakeup Registers page - caller now able to write
* registers on the Wakeup registers page
*/
return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
@@ -3038,8 +2992,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if (page == HV_INTC_FC_PAGE_START)
page = 0;
- /*
- * Workaround MDIO accesses being disabled after entering IEEE
+ /* Workaround MDIO accesses being disabled after entering IEEE
* Power Down (when bit 11 of the PHY Control register is set)
*/
if ((hw->phy.type == e1000_phy_82578) &&
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 97c197fd4a8e..624476cfa727 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -34,6 +34,4 @@ obj-$(CONFIG_IGB) += igb.o
igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o
-
-igb-$(CONFIG_IGB_PTP) += igb_ptp.o
+ e1000_i210.o igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index ca4641e2f748..fdaaf2709d0a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -319,6 +319,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
nvm->ops.acquire = igb_acquire_nvm_i210;
nvm->ops.release = igb_release_nvm_i210;
nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
nvm->ops.valid_led_default = igb_valid_led_default_i210;
break;
case e1000_i211:
@@ -1027,6 +1028,15 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
* continue to check for link.
*/
hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+ /* Configure Flow Control now that Auto-Neg has completed.
+ * First, we need to restore the desired flow control
+ * settings because we may have had to re-autoneg with a
+ * different link partner.
+ */
+ ret_val = igb_config_fc_after_link_up(hw);
+ if (ret_val)
+ hw_dbg("Error configuring flow control\n");
} else {
ret_val = igb_check_for_copper_link(hw);
}
@@ -1277,12 +1287,20 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
{
u32 ctrl;
s32 ret_val;
+ u32 phpm_reg;
ctrl = rd32(E1000_CTRL);
ctrl |= E1000_CTRL_SLU;
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
+ /* Clear Go Link Disconnect bit */
+ if (hw->mac.type >= e1000_82580) {
+ phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+ phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+ wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ }
+
ret_val = igb_setup_serdes_link_82575(hw);
if (ret_val)
goto out;
@@ -1336,7 +1354,7 @@ out:
**/
static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
- u32 ctrl_ext, ctrl_reg, reg;
+ u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
s32 ret_val = E1000_SUCCESS;
u16 data;
@@ -1424,27 +1442,45 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
- /*
- * We force flow control to prevent the CTRL register values from being
- * overwritten by the autonegotiated flow control values
- */
- reg |= E1000_PCS_LCTL_FORCE_FCTRL;
-
if (pcs_autoneg) {
/* Set PCS register for autoneg */
reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+ /* Disable force flow control for autoneg */
+ reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+ /* Configure flow control advertisement for autoneg */
+ anadv_reg = rd32(E1000_PCS_ANADV);
+ anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+ switch (hw->fc.requested_mode) {
+ case e1000_fc_full:
+ case e1000_fc_rx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ anadv_reg |= E1000_TXCW_PAUSE;
+ break;
+ case e1000_fc_tx_pause:
+ anadv_reg |= E1000_TXCW_ASM_DIR;
+ break;
+ default:
+ break;
+ }
+ wr32(E1000_PCS_ANADV, anadv_reg);
+
hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
} else {
/* Set PCS register for forced link */
reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
+ /* Force flow control for forced link */
+ reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
}
wr32(E1000_PCS_LCTL, reg);
- if (!igb_sgmii_active_82575(hw))
+ if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
igb_force_mac_fc(hw);
return ret_val;
@@ -1918,6 +1954,12 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
hw->dev_spec._82575.global_device_reset = false;
+ /* due to hw errata, global device reset doesn't always
+ * work on 82580
+ */
+ if (hw->mac.type == e1000_82580)
+ global_device_reset = false;
+
/* Get current control state. */
ctrl = rd32(E1000_CTRL);
@@ -2233,19 +2275,16 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
/* enable or disable per user setting */
if (!(hw->dev_spec._82575.eee_disable)) {
- ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
- E1000_IPCNFG_EEE_100M_AN);
- eeer |= (E1000_EEER_TX_LPI_EN |
- E1000_EEER_RX_LPI_EN |
+ u32 eee_su = rd32(E1000_EEE_SU);
+
+ ipcnfg |= (E1000_IPCNFG_EEE_1G_AN | E1000_IPCNFG_EEE_100M_AN);
+ eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
E1000_EEER_LPI_FC);
- /* keep the LPI clock running before EEE is enabled */
- if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) {
- u32 eee_su;
- eee_su = rd32(E1000_EEE_SU);
- eee_su &= ~E1000_EEE_SU_LPI_CLK_STP;
- wr32(E1000_EEE_SU, eee_su);
- }
+ /* This bit should not be set in normal operation. */
+ if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+ hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
} else {
ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index e85c453f5428..44b76b3b6816 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index de4b41ec3c40..45dce06eff26 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -431,6 +431,10 @@
#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
#define FLOW_CONTROL_TYPE 0x8808
+/* Transmit Config Word */
+#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */
+
/* 802.1q VLAN Packet Size */
#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMA'd) */
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
@@ -539,6 +543,9 @@
/* mPHY Near End Digital Loopback Override Bit */
#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+#define E1000_PCS_LCTL_FORCE_FCTRL 0x80
+#define E1000_PCS_LSTS_AN_COMPLETE 0x10000
+
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
@@ -636,6 +643,7 @@
/* NVM Word Offsets */
#define NVM_COMPAT 0x0003
#define NVM_ID_LED_SETTINGS 0x0004 /* SERDES output amplitude */
+#define NVM_VERSION 0x0005
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
#define NVM_INIT_CONTROL3_PORT_A 0x0024
@@ -653,6 +661,19 @@
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
+/* NVM version defines */
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_BUILD_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
#define E1000_NVM_CFG_DONE_PORT_0 0x040000 /* MNG config cycle done */
#define E1000_NVM_CFG_DONE_PORT_1 0x080000 /* ...for second port */
@@ -860,6 +881,7 @@
#define E1000_EEER_FRC_AN 0x10000000 /* Enable EEE in loopback */
#define E1000_EEER_LPI_FC 0x00040000 /* EEE Enable on FC */
#define E1000_EEE_SU_LPI_CLK_STP 0X00800000 /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG 0x20000000 /* EEE capability nego */
/* SerDes Control */
#define E1000_GEN_CTL_READY 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 77a5f939bc74..fbcdbebb0b5f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -35,11 +35,42 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw);
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw);
-static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw);
+/**
+ * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+ s32 ret_val = E1000_SUCCESS;
+ s32 timeout = hw->nvm.word_size + 1;
+ s32 i = 0;
+
+ /* Get the FW semaphore. */
+ for (i = 0; i < timeout; i++) {
+ swsm = rd32(E1000_SWSM);
+ wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+ /* Semaphore acquired if bit latched */
+ if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+ break;
+
+ udelay(50);
+ }
+
+ if (i == timeout) {
+ /* Release semaphores */
+ igb_put_hw_semaphore(hw);
+ hw_dbg("Driver can't access the NVM\n");
+ ret_val = -E1000_ERR_NVM;
+ goto out;
+ }
+
+out:
+ return ret_val;
+}
/**
* igb_acquire_nvm_i210 - Request for access to EEPROM
@@ -68,6 +99,23 @@ void igb_release_nvm_i210(struct e1000_hw *hw)
}
/**
+ * igb_put_hw_semaphore_i210 - Release hardware semaphore
+ * @hw: pointer to the HW structure
+ *
+ * Release hardware semaphore used to access the PHY or NVM
+ */
+static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
+{
+ u32 swsm;
+
+ swsm = rd32(E1000_SWSM);
+
+ swsm &= ~E1000_SWSM_SWESMBI;
+
+ wr32(E1000_SWSM, swsm);
+}
+
+/**
* igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
* @hw: pointer to the HW structure
* @mask: specifies which semaphore to acquire
@@ -138,60 +186,6 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
}
/**
- * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Acquire the HW semaphore to access the PHY or NVM
- **/
-static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
- s32 ret_val = E1000_SUCCESS;
- s32 timeout = hw->nvm.word_size + 1;
- s32 i = 0;
-
- /* Get the FW semaphore. */
- for (i = 0; i < timeout; i++) {
- swsm = rd32(E1000_SWSM);
- wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
-
- /* Semaphore acquired if bit latched */
- if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
- break;
-
- udelay(50);
- }
-
- if (i == timeout) {
- /* Release semaphores */
- igb_put_hw_semaphore(hw);
- hw_dbg("Driver can't access the NVM\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
- }
-
-out:
- return ret_val;
-}
-
-/**
- * igb_put_hw_semaphore_i210 - Release hardware semaphore
- * @hw: pointer to the HW structure
- *
- * Release hardware semaphore used to access the PHY or NVM
- **/
-static void igb_put_hw_semaphore_i210(struct e1000_hw *hw)
-{
- u32 swsm;
-
- swsm = rd32(E1000_SWSM);
-
- swsm &= ~E1000_SWSM_SWESMBI;
-
- wr32(E1000_SWSM, swsm);
-}
-
-/**
* igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
* @hw: pointer to the HW structure
* @offset: offset of word in the Shadow Ram to read
@@ -229,49 +223,6 @@ s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
- * @hw: pointer to the HW structure
- * @offset: offset within the Shadow RAM to be written to
- * @words: number of words to write
- * @data: 16 bit word(s) to be written to the Shadow RAM
- *
- * Writes data to Shadow RAM at offset using EEWR register.
- *
- * If e1000_update_nvm_checksum is not called after this function , the
- * data will not be committed to FLASH and also Shadow RAM will most likely
- * contain an invalid checksum.
- *
- * If error code is returned, data and Shadow RAM may be inconsistent - buffer
- * partially written.
- **/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
-{
- s32 status = E1000_SUCCESS;
- u16 i, count;
-
- /* We cannot hold synchronization semaphores for too long,
- * because of forceful takeover procedure. However it is more efficient
- * to write in bursts than synchronizing access for each word. */
- for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
- count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
- E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
- status = igb_write_nvm_srwr(hw, offset, count,
- data + i);
- hw->nvm.ops.release(hw);
- } else {
- status = E1000_ERR_SWFW_SYNC;
- }
-
- if (status != E1000_SUCCESS)
- break;
- }
-
- return status;
-}
-
-/**
* igb_write_nvm_srwr - Write to Shadow Ram using EEWR
* @hw: pointer to the HW structure
* @offset: offset within the Shadow Ram to be written to
@@ -329,6 +280,50 @@ out:
}
/**
+ * igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ * @hw: pointer to the HW structure
+ * @offset: offset within the Shadow RAM to be written to
+ * @words: number of words to write
+ * @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ * Writes data to Shadow RAM at offset using EEWR register.
+ *
+ * If e1000_update_nvm_checksum is not called after this function , the
+ * data will not be committed to FLASH and also Shadow RAM will most likely
+ * contain an invalid checksum.
+ *
+ * If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ * partially written.
+ */
+s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
+{
+ s32 status = E1000_SUCCESS;
+ u16 i, count;
+
+ /* We cannot hold synchronization semaphores for too long,
+ * because of forceful takeover procedure. However it is more efficient
+ * to write in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+ count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+ E1000_EERD_EEWR_MAX_COUNT : (words - i);
+ if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ status = igb_write_nvm_srwr(hw, offset, count,
+ data + i);
+ hw->nvm.ops.release(hw);
+ } else {
+ status = E1000_ERR_SWFW_SYNC;
+ }
+
+ if (status != E1000_SUCCESS)
+ break;
+ }
+
+ return status;
+}
+
+/**
* igb_read_nvm_i211 - Read NVM wrapper function for I211
* @hw: pointer to the HW structure
* @address: the word address (aka eeprom offset) to read
@@ -350,16 +345,40 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
- case NVM_ID_LED_SETTINGS:
case NVM_INIT_CTRL_2:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_2_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_INIT_CTRL_4:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_INIT_CTRL_4_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_1_CFG:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_1_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
+ break;
case NVM_LED_0_2_CFG:
igb_read_invm_i211(hw, offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = NVM_LED_0_2_CFG_DEFAULT_I211;
+ ret_val = E1000_SUCCESS;
+ }
break;
- case NVM_COMPAT:
- *data = ID_LED_DEFAULT_I210;
- break;
+ case NVM_ID_LED_SETTINGS:
+ ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ if (ret_val != E1000_SUCCESS) {
+ *data = ID_LED_RESERVED_FFFF;
+ ret_val = E1000_SUCCESS;
+ }
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -423,6 +442,100 @@ s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
}
/**
+ * igb_read_invm_version - Reads iNVM version and image type
+ * @hw: pointer to the HW structure
+ * @invm_ver: version structure for the version read
+ *
+ * Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver) {
+ u32 *record = NULL;
+ u32 *next_record = NULL;
+ u32 i = 0;
+ u32 invm_dword = 0;
+ u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+ E1000_INVM_RECORD_SIZE_IN_BYTES);
+ u32 buffer[E1000_INVM_SIZE];
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u16 version = 0;
+
+ /* Read iNVM memory */
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ buffer[i] = invm_dword;
+ }
+
+ /* Read version number */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have first version location used */
+ if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+ version = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have second version location used */
+ else if ((i == 1) &&
+ ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have odd version location
+ * used and it is the last one used
+ */
+ else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+ ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+ (i != 1))) {
+ version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ >> 13;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have even version location
+ * used and it is the last one used
+ */
+ else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+ ((*record & 0x3) == 0)) {
+ version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+
+ if (status == E1000_SUCCESS) {
+ invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ >> E1000_INVM_MAJOR_SHIFT;
+ invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ }
+ /* Read Image Type */
+ for (i = 1; i < invm_blocks; i++) {
+ record = &buffer[invm_blocks - i];
+ next_record = &buffer[invm_blocks - i + 1];
+
+ /* Check if we have image type in first location used */
+ if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+ invm_ver->invm_img_type = 0;
+ status = E1000_SUCCESS;
+ break;
+ }
+ /* Check if we have image type in first location used */
+ else if ((((*record & 0x3) == 0) &&
+ ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+ ((((*record & 0x3) != 0) && (i != 1)))) {
+ invm_ver->invm_img_type =
+ (*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ return status;
+}
+
+/**
* igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
* @hw: pointer to the HW structure
*
@@ -519,6 +632,28 @@ out:
}
/**
+ * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ * @hw: pointer to the HW structure
+ *
+ */
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = -E1000_ERR_NVM;
+ u32 i, reg;
+
+ for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+ reg = rd32(E1000_EECD);
+ if (reg & E1000_EECD_FLUDONE_I210) {
+ ret_val = E1000_SUCCESS;
+ break;
+ }
+ udelay(5);
+ }
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -548,28 +683,6 @@ out:
}
/**
- * igb_pool_flash_update_done_i210 - Pool FLUDONE status.
- * @hw: pointer to the HW structure
- *
- **/
-s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
-{
- s32 ret_val = -E1000_ERR_NVM;
- u32 i, reg;
-
- for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
- reg = rd32(E1000_EECD);
- if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
- break;
- }
- udelay(5);
- }
-
- return ret_val;
-}
-
-/**
* igb_valid_led_default_i210 - Verify a valid default LED config
* @hw: pointer to the HW structure
* @data: pointer to the NVM (EEPROM)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5dc2bd3f50bc..1c89358a99ab 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -43,6 +43,8 @@ extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data);
+extern s32 igb_read_invm_version(struct e1000_hw *hw,
+ struct e1000_fw_version *invm_ver);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -65,6 +67,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS 8
#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS 1
+#define E1000_INVM_ULT_BYTES_SIZE 8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES 4
+#define E1000_INVM_VER_FIELD_ONE 0x1FF8
+#define E1000_INVM_VER_FIELD_TWO 0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD 0x1F800000
+
+#define E1000_INVM_MAJOR_MASK 0x3F0
+#define E1000_INVM_MINOR_MASK 0xF
+#define E1000_INVM_MAJOR_SHIFT 4
#define ID_LED_DEFAULT_I210 ((ID_LED_OFF1_ON2 << 8) | \
(ID_LED_OFF1_OFF2 << 4) | \
@@ -73,4 +84,10 @@ enum E1000_INVM_STRUCTURE_TYPE {
(ID_LED_DEF1_DEF2 << 4) | \
(ID_LED_DEF1_DEF2))
+/* NVM offset defaults for i211 device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211 0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211 0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 819c145ac762..101e6e4da97f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -839,6 +839,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val = 0;
+ u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
@@ -1040,6 +1041,129 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
}
}
+ /* Check for the case where we have SerDes media and auto-neg is
+ * enabled. In this case, we need to check and see if Auto-Neg
+ * has completed, and if so, how the PHY and link partner has
+ * flow control configured.
+ */
+ if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+ && mac->autoneg) {
+ /* Read the PCS_LSTS and check to see if AutoNeg
+ * has completed.
+ */
+ pcs_status_reg = rd32(E1000_PCS_LSTAT);
+
+ if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+ hw_dbg("PCS Auto Neg has not completed.\n");
+ return ret_val;
+ }
+
+ /* The AutoNeg process has completed, so we now need to
+ * read both the Auto Negotiation Advertisement
+ * Register (PCS_ANADV) and the Auto_Negotiation Base
+ * Page Ability Register (PCS_LPAB) to determine how
+ * flow control was negotiated.
+ */
+ pcs_adv_reg = rd32(E1000_PCS_ANADV);
+ pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+
+ /* Two bits in the Auto Negotiation Advertisement Register
+ * (PCS_ANADV) and two bits in the Auto Negotiation Base
+ * Page Ability Register (PCS_LPAB) determine flow control
+ * for both the PHY and the link partner. The following
+ * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+ * 1999, describes these PAUSE resolution bits and how flow
+ * control is determined based upon these settings.
+ * NOTE: DC = Don't Care
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+ *-------|---------|-------|---------|--------------------
+ * 0 | 0 | DC | DC | e1000_fc_none
+ * 0 | 1 | 0 | DC | e1000_fc_none
+ * 0 | 1 | 1 | 0 | e1000_fc_none
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ * 1 | 0 | 0 | DC | e1000_fc_none
+ * 1 | DC | 1 | DC | e1000_fc_full
+ * 1 | 1 | 0 | 0 | e1000_fc_none
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ *
+ * Are both PAUSE bits set to 1? If so, this implies
+ * Symmetric Flow Control is enabled at both ends. The
+ * ASM_DIR bits are irrelevant per the spec.
+ *
+ * For Symmetric Flow Control:
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | DC | 1 | DC | e1000_fc_full
+ *
+ */
+ if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+ /* Now we need to check if the user selected Rx ONLY
+ * of pause frames. In this case, we had to advertise
+ * FULL flow control because we could not advertise Rx
+ * ONLY. Hence, we must now check to see if we need to
+ * turn OFF the TRANSMISSION of PAUSE frames.
+ */
+ if (hw->fc.requested_mode == e1000_fc_full) {
+ hw->fc.current_mode = e1000_fc_full;
+ hw_dbg("Flow Control = FULL.\n");
+ } else {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ }
+ }
+ /* For receiving PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 0 | 1 | 1 | 1 | e1000_fc_tx_pause
+ */
+ else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_tx_pause;
+ hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+ }
+ /* For transmitting PAUSE frames ONLY.
+ *
+ * LOCAL DEVICE | LINK PARTNER
+ * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+ *-------|---------|-------|---------|--------------------
+ * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
+ */
+ else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+ (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+ !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+ (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+ hw->fc.current_mode = e1000_fc_rx_pause;
+ hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+ } else {
+ /* Per the IEEE spec, at this point flow control
+ * should be disabled.
+ */
+ hw->fc.current_mode = e1000_fc_none;
+ hw_dbg("Flow Control = NONE.\n");
+ }
+
+ /* Now we call a subroutine to actually force the MAC
+ * controller to use the correct flow control settings.
+ */
+ pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+ pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+ wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+
+ ret_val = igb_force_mac_fc(hw);
+ if (ret_val) {
+ hw_dbg("Error forcing flow control settings\n");
+ return ret_val;
+ }
+ }
out:
return ret_val;
@@ -1391,6 +1515,10 @@ s32 igb_validate_mdi_setting(struct e1000_hw *hw)
{
s32 ret_val = 0;
+ /* All MDI settings are supported on 82580 and newer. */
+ if (hw->mac.type >= e1000_82580)
+ goto out;
+
if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
hw_dbg("Invalid MDI setting detected\n");
hw->phy.mdix = 1;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index cbddc4e51e30..e2b2c4b9c951 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -33,6 +33,7 @@
#include "e1000_phy.h"
#include "e1000_nvm.h"
#include "e1000_defines.h"
+#include "e1000_i210.h"
/*
* Functions that should not be called directly from drivers but can be used
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index aa5fcdf3f357..fbb7604db364 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -438,7 +438,7 @@ out:
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
- s32 ret_val;
+ s32 ret_val = -E1000_ERR_NVM;
u16 widx = 0;
/*
@@ -448,22 +448,21 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg("nvm parameter(s) out of bounds\n");
- ret_val = -E1000_ERR_NVM;
- goto out;
+ return ret_val;
}
- ret_val = hw->nvm.ops.acquire(hw);
- if (ret_val)
- goto out;
-
- msleep(10);
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
- ret_val = igb_ready_nvm_eeprom(hw);
+ ret_val = nvm->ops.acquire(hw);
if (ret_val)
- goto release;
+ return ret_val;
+
+ ret_val = igb_ready_nvm_eeprom(hw);
+ if (ret_val) {
+ nvm->ops.release(hw);
+ return ret_val;
+ }
igb_standby_nvm(hw);
@@ -497,13 +496,10 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
break;
}
}
+ usleep_range(1000, 2000);
+ nvm->ops.release(hw);
}
- msleep(10);
-release:
- hw->nvm.ops.release(hw);
-
-out:
return ret_val;
}
@@ -710,3 +706,74 @@ s32 igb_update_nvm_checksum(struct e1000_hw *hw)
out:
return ret_val;
}
+
+/**
+ * igb_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output structure
+ *
+ * unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
+ u16 fw_version;
+
+ memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+ switch (hw->mac.type) {
+ case e1000_i211:
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ case e1000_82575:
+ case e1000_82576:
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ break;
+ default:
+ return;
+ }
+ /* basic eeprom version numbers */
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
+
+ /* etrack id */
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
+
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i350:
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ + 1), 1, &comb_verh);
+ hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if ((comb_verh && comb_verl) &&
+ ((comb_verh != NVM_VER_INVALID) &&
+ (comb_verl != NVM_VER_INVALID))) {
+
+ fw_vers->or_valid = true;
+ fw_vers->or_major =
+ comb_verl >> NVM_COMB_VER_SHFT;
+ fw_vers->or_build =
+ ((comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT));
+ fw_vers->or_patch =
+ comb_verh & NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 825b0228cac0..7012d458c6f7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -40,4 +40,20 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
s32 igb_update_nvm_checksum(struct e1000_hw *hw);
+struct e1000_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 3404bc79f4ca..fe76004aca4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1207,20 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
- /*
- * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
- * forced whenever speed and duplex are forced.
- */
- ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
- if (ret_val)
- goto out;
+ /* I210 and I211 devices support Auto-Crossover in forced operation. */
+ if (phy->type != e1000_phy_i210) {
+ /*
+ * Clear Auto-Crossover to force MDI manually. M88E1000
+ * requires MDI forced whenever speed and duplex are forced.
+ */
+ ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ &phy_data);
+ if (ret_val)
+ goto out;
- phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
- ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
- if (ret_val)
- goto out;
+ phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+ ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+ phy_data);
+ if (ret_val)
+ goto out;
- hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+ }
ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
if (ret_val)
@@ -1710,6 +1715,26 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
switch (hw->phy.id) {
case I210_I_PHY_ID:
+ /* Get cable length from PHY Cable Diagnostics Control Reg */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ (I347AT4_PCDL + phy->addr),
+ &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ /* Check if the unit of cable length is meters or cm */
+ ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+ I347AT4_PCDC, &phy_data2);
+ if (ret_val)
+ return ret_val;
+
+ is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+ /* Populate the phy structure with cable length in meters */
+ phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+ phy->cable_length = phy_data / (is_cm ? 100 : 1);
+ break;
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6ac3299bfcb9..ed282f877d9a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -124,6 +124,7 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define E1000_82580_PM_SPD 0x0001 /* Smart Power Down */
#define E1000_82580_PM_D0_LPLU 0x0002 /* For D0a states */
#define E1000_82580_PM_D3_LPLU 0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD 0x0020 /* Go Link Disconnect */
/* Enable flexible speed on link-up */
#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8aad230c0592..17f1686ee411 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,16 +34,16 @@
#include "e1000_mac.h"
#include "e1000_82575.h"
-#ifdef CONFIG_IGB_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IGB_PTP */
#include <linux/bitops.h>
#include <linux/if_vlan.h>
struct igb_adapter;
+#define E1000_PCS_CFG_IGN_SD 1
+
/* Interrupt defines */
#define IGB_START_ITR 648 /* ~6000 ints/sec */
#define IGB_4K_ITR 980
@@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */
-#define IGB_RXBUFFER_256 256
-#define IGB_RXBUFFER_16384 16384
-#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_2048 2048
+#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16
@@ -151,11 +152,18 @@ struct vf_data_storage {
#define IGB_MNG_VLAN_NONE -1
-#define IGB_TX_FLAGS_CSUM 0x00000001
-#define IGB_TX_FLAGS_VLAN 0x00000002
-#define IGB_TX_FLAGS_TSO 0x00000004
-#define IGB_TX_FLAGS_IPV4 0x00000008
-#define IGB_TX_FLAGS_TSTAMP 0x00000010
+enum igb_tx_flags {
+ /* cmd_type flags */
+ IGB_TX_FLAGS_VLAN = 0x01,
+ IGB_TX_FLAGS_TSO = 0x02,
+ IGB_TX_FLAGS_TSTAMP = 0x04,
+
+ /* olinfo flags */
+ IGB_TX_FLAGS_IPV4 = 0x10,
+ IGB_TX_FLAGS_CSUM = 0x20,
+};
+
+/* VLAN info */
#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
#define IGB_TX_FLAGS_VLAN_SHIFT 16
@@ -174,11 +182,9 @@ struct igb_tx_buffer {
};
struct igb_rx_buffer {
- struct sk_buff *skb;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
- u32 page_offset;
+ unsigned int page_offset;
};
struct igb_tx_queue_stats {
@@ -205,22 +211,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */
};
-struct igb_q_vector {
- struct igb_adapter *adapter; /* backlink */
- int cpu; /* CPU for DCA */
- u32 eims_value; /* EIMS mask value */
-
- struct igb_ring_container rx, tx;
-
- struct napi_struct napi;
-
- u16 itr_val;
- u8 set_itr;
- void __iomem *itr_register;
-
- char name[IFNAMSIZ + 9];
-};
-
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
@@ -232,15 +222,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */
+ dma_addr_t dma; /* phys address of the ring */
+ unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */
- u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */
- u16 next_to_clean ____cacheline_aligned_in_smp;
+ u16 next_to_clean;
u16 next_to_use;
+ u16 next_to_alloc;
union {
/* TX */
@@ -251,12 +243,30 @@ struct igb_ring {
};
/* RX */
struct {
+ struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp;
};
};
- /* Items past this point are only used during ring alloc / free */
- dma_addr_t dma; /* phys address of the ring */
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+ struct igb_adapter *adapter; /* backlink */
+ int cpu; /* CPU for DCA */
+ u32 eims_value; /* EIMS mask value */
+
+ u16 itr_val;
+ u8 set_itr;
+ void __iomem *itr_register;
+
+ struct igb_ring_container rx, tx;
+
+ struct napi_struct napi;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
+ char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum e1000_ring_flags_t {
@@ -362,8 +372,6 @@ struct igb_adapter {
u32 eims_other;
/* to not mess up cache alignment, always add to the bottom */
- u32 eeprom_wol;
-
u16 tx_ring_count;
u16 rx_ring_count;
unsigned int vfs_allocated_count;
@@ -373,7 +381,6 @@ struct igb_adapter {
u32 wvbr;
u32 *shadow_vfta;
-#ifdef CONFIG_IGB_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct delayed_work ptp_overflow_work;
@@ -382,17 +389,19 @@ struct igb_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
-#endif /* CONFIG_IGB_PTP */
char fw_version[32];
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_HAS_MSI (1 << 0)
+#define IGB_FLAG_DCA_ENABLED (1 << 1)
+#define IGB_FLAG_QUAD_PORT_A (1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
+#define IGB_FLAG_DMAC (1 << 4)
+#define IGB_FLAG_PTP (1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -436,18 +445,27 @@ extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
extern void igb_power_up_link(struct igb_adapter *);
extern void igb_set_fw_version(struct igb_adapter *);
-#ifdef CONFIG_IGB_PTP
extern void igb_ptp_init(struct igb_adapter *adapter);
extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
-extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
+ struct sk_buff *skb);
+extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
struct sk_buff *skb);
+static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
+ !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
+ igb_ptp_rx_rgtstamp(q_vector, skb);
+}
+
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd);
-#endif /* CONFIG_IGB_PTP */
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 2ea012849825..bfe9208c4b18 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -37,6 +37,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/highmem.h>
#include "igb.h"
@@ -1623,6 +1624,20 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
reg &= ~E1000_CONNSW_ENRGSRC;
wr32(E1000_CONNSW, reg);
+ /* Unset sigdetect for SERDES loopback on
+ * 82580 and i350 devices.
+ */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_IGN_SD;
+ wr32(E1000_PCS_CFG0, reg);
+ break;
+ default:
+ break;
+ }
+
/* Set PCS register for forced speed */
reg = rd32(E1000_PCS_LCTL);
reg &= ~E1000_PCS_LCTL_AN_ENABLE; /* Disable Autoneg*/
@@ -1685,16 +1700,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1);
}
-static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
+ unsigned int frame_size)
{
- frame_size /= 2;
- if (*(skb->data + 3) == 0xFF) {
- if ((*(skb->data + frame_size + 10) == 0xBE) &&
- (*(skb->data + frame_size + 12) == 0xAF)) {
- return 0;
- }
- }
- return 13;
+ unsigned char *data;
+ bool match = true;
+
+ frame_size >>= 1;
+
+ data = kmap(rx_buffer->page);
+
+ if (data[3] != 0xFF ||
+ data[frame_size + 10] != 0xBE ||
+ data[frame_size + 12] != 0xAF)
+ match = false;
+
+ kunmap(rx_buffer->page);
+
+ return match;
}
static int igb_clean_test_rings(struct igb_ring *rx_ring,
@@ -1704,9 +1727,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info;
- struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0;
- unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean;
@@ -1717,21 +1738,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
- /* unmap rx buffer, will be remapped by alloc_rx_buffers */
- dma_unmap_single(rx_ring->dev,
- rx_buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- rx_buffer_info->dma = 0;
+ /* sync Rx buffer for CPU read */
+ dma_sync_single_for_cpu(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
/* verify contents of skb */
- if (!igb_check_lbtest_frame(rx_buffer_info->skb, size))
+ if (igb_check_lbtest_frame(rx_buffer_info, size))
count++;
+ /* sync Rx buffer for device write */
+ dma_sync_single_for_device(rx_ring->dev,
+ rx_buffer_info->dma,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
/* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
- total_bytes += tx_buffer_info->bytecount;
- total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */
@@ -1746,8 +1770,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
}
- txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
- netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ netdev_tx_reset_queue(txring_txq(tx_ring));
/* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count);
@@ -1957,54 +1980,6 @@ static void igb_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int igb_wol_exclusion(struct igb_adapter *adapter,
- struct ethtool_wolinfo *wol)
-{
- struct e1000_hw *hw = &adapter->hw;
- int retval = 1; /* fail by default */
-
- switch (hw->device_id) {
- case E1000_DEV_ID_82575GB_QUAD_COPPER:
- /* WoL not supported */
- wol->supported = 0;
- break;
- case E1000_DEV_ID_82575EB_FIBER_SERDES:
- case E1000_DEV_ID_82576_FIBER:
- case E1000_DEV_ID_82576_SERDES:
- /* Wake events not supported on port B */
- if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- case E1000_DEV_ID_82576_QUAD_COPPER:
- case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
- /* quad port adapters only support WoL on port A */
- if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
- wol->supported = 0;
- break;
- }
- /* return success for non excluded adapter ports */
- retval = 0;
- break;
- default:
- /* dual port cards only support WoL on port A from now on
- * unless it was enabled in the eeprom for port B
- * so exclude FUNC_1 ports from having WoL enabled */
- if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
- !adapter->eeprom_wol) {
- wol->supported = 0;
- break;
- }
-
- retval = 0;
- }
-
- return retval;
-}
-
static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2014,10 +1989,7 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
WAKE_PHY;
wol->wolopts = 0;
- /* this function will set ->supported = 0 and return 1 if wol is not
- * supported by this hardware */
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return;
/* apply any specific unsupported masks here */
@@ -2045,8 +2017,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol) ||
- !device_can_wakeup(&adapter->pdev->dev))
+ if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
@@ -2301,7 +2272,6 @@ static int igb_get_ts_info(struct net_device *dev,
struct igb_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IGB_PTP
case e1000_82576:
case e1000_82580:
case e1000_i350:
@@ -2337,12 +2307,288 @@ static int igb_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
}
+static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ /* Report default options for RSS on igb */
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V4_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ case TCP_V6_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case UDP_V6_FLOW:
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ ret = igb_get_rss_hash_opts(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#define UDP_RSS_FLAGS (IGB_FLAG_RSS_FIELD_IPV4_UDP | \
+ IGB_FLAG_RSS_FIELD_IPV6_UDP)
+static int igb_set_rss_hash_opt(struct igb_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ u32 flags = adapter->flags;
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ !(nfc->data & RXH_L4_B_0_1) ||
+ !(nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ case UDP_V4_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV4_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ flags &= ~IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ flags |= IGB_FLAG_RSS_FIELD_IPV6_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST) ||
+ (nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* if we changed something we need to update flags */
+ if (flags != adapter->flags) {
+ struct e1000_hw *hw = &adapter->hw;
+ u32 mrqc = rd32(E1000_MRQC);
+
+ if ((flags & UDP_RSS_FLAGS) &&
+ !(adapter->flags & UDP_RSS_FLAGS))
+ dev_err(&adapter->pdev->dev,
+ "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
+
+ adapter->flags = flags;
+
+ /* Perform hash on these packet types */
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4 |
+ E1000_MRQC_RSS_FIELD_IPV4_TCP |
+ E1000_MRQC_RSS_FIELD_IPV6 |
+ E1000_MRQC_RSS_FIELD_IPV6_TCP;
+
+ mrqc &= ~(E1000_MRQC_RSS_FIELD_IPV4_UDP |
+ E1000_MRQC_RSS_FIELD_IPV6_UDP);
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+
+ if (flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+ wr32(E1000_MRQC, mrqc);
+ }
+
+ return 0;
+}
+
+static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = igb_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int igb_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ipcnfg, eeer;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ edata->supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full);
+
+ ipcnfg = rd32(E1000_IPCNFG);
+ eeer = rd32(E1000_EEER);
+
+ /* EEE status on negotiated link */
+ if (ipcnfg & E1000_IPCNFG_EEE_1G_AN)
+ edata->advertised = ADVERTISED_1000baseT_Full;
+
+ if (ipcnfg & E1000_IPCNFG_EEE_100M_AN)
+ edata->advertised |= ADVERTISED_100baseT_Full;
+
+ if (eeer & E1000_EEER_EEE_NEG)
+ edata->eee_active = true;
+
+ edata->eee_enabled = !hw->dev_spec._82575.eee_disable;
+
+ if (eeer & E1000_EEER_TX_LPI_EN)
+ edata->tx_lpi_enabled = true;
+
+ /* Report correct negotiated EEE status for devices that
+ * wrongly report EEE at half-duplex
+ */
+ if (adapter->link_duplex == HALF_DUPLEX) {
+ edata->eee_enabled = false;
+ edata->eee_active = false;
+ edata->tx_lpi_enabled = false;
+ edata->advertised &= ~edata->advertised;
+ }
+
+ return 0;
+}
+
+static int igb_set_eee(struct net_device *netdev,
+ struct ethtool_eee *edata)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct ethtool_eee eee_curr;
+ s32 ret_val;
+
+ if ((hw->mac.type < e1000_i350) ||
+ (hw->phy.media_type != e1000_media_type_copper))
+ return -EOPNOTSUPP;
+
+ ret_val = igb_get_eee(netdev, &eee_curr);
+ if (ret_val)
+ return ret_val;
+
+ if (eee_curr.eee_enabled) {
+ if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE tx-lpi is not supported\n");
+ return -EINVAL;
+ }
+
+ /* Tx LPI timer is not implemented currently */
+ if (edata->tx_lpi_timer) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Tx LPI timer is not supported\n");
+ return -EINVAL;
+ }
+
+ if (eee_curr.advertised != edata->advertised) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE Advertisement is not supported\n");
+ return -EINVAL;
+ }
+
+ } else if (!edata->eee_enabled) {
+ dev_err(&adapter->pdev->dev,
+ "Setting EEE options are not supported with EEE disabled\n");
+ return -EINVAL;
+ }
+
+ if (hw->dev_spec._82575.eee_disable != !edata->eee_enabled) {
+ hw->dev_spec._82575.eee_disable = !edata->eee_enabled;
+ igb_set_eee_i350(hw);
+
+ /* reset link */
+ if (!netif_running(netdev))
+ igb_reset(adapter);
+ }
+
+ return 0;
+}
+
static int igb_ethtool_begin(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -2383,6 +2629,10 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
.get_ts_info = igb_get_ts_info,
+ .get_rxnfc = igb_get_rxnfc,
+ .set_rxnfc = igb_set_rxnfc,
+ .get_eee = igb_get_eee,
+ .set_eee = igb_set_eee,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index e1ceb37ef12e..31cfe2ec75df 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
#include "igb.h"
#define MAJ 4
-#define MIN 0
-#define BUILD 1
+#define MIN 1
+#define BUILD 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -118,10 +118,11 @@ static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void __devexit igb_remove(struct pci_dev *pdev);
+static void igb_remove(struct pci_dev *pdev);
static int igb_sw_init(struct igb_adapter *);
static int igb_open(struct net_device *);
static int igb_close(struct net_device *);
+static void igb_configure(struct igb_adapter *);
static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *);
@@ -228,7 +229,7 @@ static struct pci_driver igb_driver = {
.name = igb_driver_name,
.id_table = igb_pci_tbl,
.probe = igb_probe,
- .remove = __devexit_p(igb_remove),
+ .remove = igb_remove,
#ifdef CONFIG_PM
.driver.pm = &igb_pm_ops,
#endif
@@ -534,31 +535,27 @@ rx_ring_summary:
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- pr_info("%s[0x%03X] %016llX %016llX -------"
- "--------- %p%s\n", "RWB", i,
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
+ "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb, next_desc);
+ next_desc);
} else {
- pr_info("%s[0x%03X] %016llX %016llX %016llX"
- " %p%s\n", "R ", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
+ "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb, next_desc);
+ next_desc);
if (netif_msg_pktdata(adapter) &&
- buffer_info->dma && buffer_info->skb) {
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS,
- 16, 1, buffer_info->skb->data,
- IGB_RX_HDR_LEN, true);
+ buffer_info->dma && buffer_info->page) {
print_hex_dump(KERN_INFO, "",
DUMP_PREFIX_ADDRESS,
16, 1,
page_address(buffer_info->page) +
buffer_info->page_offset,
- PAGE_SIZE/2, true);
+ IGB_RX_BUFSZ, true);
}
}
}
@@ -656,80 +653,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
}
}
-static void igb_free_queues(struct igb_adapter *adapter)
-{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
- adapter->num_rx_queues = 0;
- adapter->num_tx_queues = 0;
-}
-
-/**
- * igb_alloc_queues - Allocate memory for all rings
- * @adapter: board private structure to initialize
- *
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time.
- **/
-static int igb_alloc_queues(struct igb_adapter *adapter)
-{
- struct igb_ring *ring;
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->tx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* For 82575, context index must be unique per ring. */
- if (adapter->hw.mac.type == e1000_82575)
- set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
- adapter->tx_ring[i] = ring;
- }
-
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
- if (!ring)
- goto err;
- ring->count = adapter->rx_ring_count;
- ring->queue_index = i;
- ring->dev = &adapter->pdev->dev;
- ring->netdev = adapter->netdev;
- /* set flag indicating ring supports SCTP checksum offload */
- if (adapter->hw.mac.type >= e1000_82576)
- set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-
- /*
- * On i350, i210, and i211, loopback VLAN packets
- * have the tag byte-swapped.
- * */
- if (adapter->hw.mac.type >= e1000_i350)
- set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
-
- adapter->rx_ring[i] = ring;
- }
-
- igb_cache_ring_register(adapter);
-
- return 0;
-
-err:
- igb_free_queues(adapter);
-
- return -ENOMEM;
-}
-
/**
* igb_write_ivar - configure ivar for given MSI-X vector
* @hw: pointer to the HW structure
@@ -909,17 +832,18 @@ static int igb_request_msix(struct igb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
- int i, err = 0, vector = 0;
+ int i, err = 0, vector = 0, free_vector = 0;
err = request_irq(adapter->msix_entries[vector].vector,
igb_msix_other, 0, netdev->name, adapter);
if (err)
- goto out;
- vector++;
+ goto err_out;
for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
+ vector++;
+
q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
if (q_vector->rx.ring && q_vector->tx.ring)
@@ -938,13 +862,22 @@ static int igb_request_msix(struct igb_adapter *adapter)
igb_msix_ring, 0, q_vector->name,
q_vector);
if (err)
- goto out;
- vector++;
+ goto err_free;
}
igb_configure_msix(adapter);
return 0;
-out:
+
+err_free:
+ /* free already assigned IRQs */
+ free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+ vector--;
+ for (i = 0; i < vector; i++) {
+ free_irq(adapter->msix_entries[free_vector++].vector,
+ adapter->q_vector[i]);
+ }
+err_out:
return err;
}
@@ -960,6 +893,35 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
}
/**
+ * igb_free_q_vector - Free memory allocated for specific interrupt vector
+ * @adapter: board private structure to initialize
+ * @v_idx: Index of vector to be freed
+ *
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+ if (q_vector->tx.ring)
+ adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+ if (q_vector->rx.ring)
+ adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
+
+ /*
+ * ixgbe_get_stats64() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
+}
+
+/**
* igb_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
@@ -969,17 +931,14 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
**/
static void igb_free_q_vectors(struct igb_adapter *adapter)
{
- int v_idx;
+ int v_idx = adapter->num_q_vectors;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- adapter->q_vector[v_idx] = NULL;
- if (!q_vector)
- continue;
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- }
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
}
/**
@@ -990,7 +949,6 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
*/
static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
{
- igb_free_queues(adapter);
igb_free_q_vectors(adapter);
igb_reset_interrupt_capability(adapter);
}
@@ -1001,11 +959,14 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
**/
-static int igb_set_interrupt_capability(struct igb_adapter *adapter)
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
{
int err;
int numvecs, i;
+ if (!msix)
+ goto msi_only;
+
/* Number of supported queues. */
adapter->num_rx_queues = adapter->rss_queues;
if (adapter->vfs_allocated_count)
@@ -1038,7 +999,7 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter)
adapter->msix_entries,
numvecs);
if (err == 0)
- goto out;
+ return;
igb_reset_interrupt_capability(adapter);
@@ -1068,105 +1029,183 @@ msi_only:
adapter->num_q_vectors = 1;
if (!pci_enable_msi(adapter->pdev))
adapter->flags |= IGB_FLAG_HAS_MSI;
-out:
- /* Notify the stack of the (possibly) reduced queue counts. */
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
- err = netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_queues);
- rtnl_unlock();
- return err;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+ struct igb_ring_container *head)
+{
+ head->ring = ring;
+ head->count++;
}
/**
- * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * igb_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_count: q_vectors allocated on adapter, used for ring interleaving
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: total number of Tx rings to allocate
+ * @txr_idx: index of first Tx ring to allocate
+ * @rxr_count: total number of Rx rings to allocate
+ * @rxr_idx: index of first Rx ring to allocate
*
- * We allocate one q_vector per queue interrupt. If allocation fails we
- * return -ENOMEM.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+ int v_count, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
{
struct igb_q_vector *q_vector;
- struct e1000_hw *hw = &adapter->hw;
- int v_idx;
+ struct igb_ring *ring;
+ int ring_count, size;
- for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
- q_vector = kzalloc(sizeof(struct igb_q_vector),
- GFP_KERNEL);
- if (!q_vector)
- goto err_out;
- q_vector->adapter = adapter;
- q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
- q_vector->itr_val = IGB_START_ITR;
- netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
- adapter->q_vector[v_idx] = q_vector;
+ /* igb only supports 1 Tx and/or 1 Rx queue per vector */
+ if (txr_count > 1 || rxr_count > 1)
+ return -ENOMEM;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(struct igb_q_vector) +
+ (sizeof(struct igb_ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ igb_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+
+ /* initialize work limits */
+ q_vector->tx.work_limit = adapter->tx_work_limit;
+
+ /* initialize ITR configuration */
+ q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+ q_vector->itr_val = IGB_START_ITR;
+
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+
+ if (txr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ igb_add_ring(ring, &q_vector->tx);
+
+ /* For 82575, context index must be unique per ring. */
+ if (adapter->hw.mac.type == e1000_82575)
+ set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+ /* apply Tx specific ring traits */
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = txr_idx;
+
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* push pointer to next ring */
+ ring++;
}
- return 0;
+ if (rxr_count) {
+ /* assign generic ring traits */
+ ring->dev = &adapter->pdev->dev;
+ ring->netdev = adapter->netdev;
-err_out:
- igb_free_q_vectors(adapter);
- return -ENOMEM;
-}
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
-static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /* update q_vector Rx values */
+ igb_add_ring(ring, &q_vector->rx);
- q_vector->rx.ring = adapter->rx_ring[ring_idx];
- q_vector->rx.ring->q_vector = q_vector;
- q_vector->rx.count++;
- q_vector->itr_val = adapter->rx_itr_setting;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
-}
+ /* set flag indicating ring supports SCTP checksum offload */
+ if (adapter->hw.mac.type >= e1000_82576)
+ set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
-static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
- int ring_idx, int v_idx)
-{
- struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+ /*
+ * On i350, i210, and i211, loopback VLAN packets
+ * have the tag byte-swapped.
+ * */
+ if (adapter->hw.mac.type >= e1000_i350)
+ set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
- q_vector->tx.ring = adapter->tx_ring[ring_idx];
- q_vector->tx.ring->q_vector = q_vector;
- q_vector->tx.count++;
- q_vector->itr_val = adapter->tx_itr_setting;
- q_vector->tx.work_limit = adapter->tx_work_limit;
- if (q_vector->itr_val && q_vector->itr_val <= 3)
- q_vector->itr_val = IGB_START_ITR;
+ /* apply Rx specific ring traits */
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = rxr_idx;
+
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
+ }
+
+ return 0;
}
+
/**
- * igb_map_ring_to_vector - maps allocated queues to vectors
+ * igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
*
- * This function maps the recently allocated queues to vectors.
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
**/
-static int igb_map_ring_to_vector(struct igb_adapter *adapter)
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
{
- int i;
- int v_idx = 0;
+ int q_vectors = adapter->num_q_vectors;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
- if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
- (adapter->num_q_vectors < adapter->num_tx_queues))
- return -ENOMEM;
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++) {
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ 0, 0, 1, rxr_idx);
- if (adapter->num_q_vectors >=
- (adapter->num_rx_queues + adapter->num_tx_queues)) {
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
- } else {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- if (i < adapter->num_tx_queues)
- igb_map_tx_ring_to_vector(adapter, i, v_idx);
- igb_map_rx_ring_to_vector(adapter, i, v_idx++);
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining--;
+ rxr_idx++;
}
- for (; i < adapter->num_tx_queues; i++)
- igb_map_tx_ring_to_vector(adapter, i, v_idx++);
}
+
+ for (; v_idx < q_vectors; v_idx++) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+ err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+ tqpv, txr_idx, rqpv, rxr_idx);
+
+ if (err)
+ goto err_out;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ txr_remaining -= tqpv;
+ rxr_idx++;
+ txr_idx++;
+ }
+
return 0;
+
+err_out:
+ adapter->num_tx_queues = 0;
+ adapter->num_rx_queues = 0;
+ adapter->num_q_vectors = 0;
+
+ while (v_idx--)
+ igb_free_q_vector(adapter, v_idx);
+
+ return -ENOMEM;
}
/**
@@ -1174,14 +1213,12 @@ static int igb_map_ring_to_vector(struct igb_adapter *adapter)
*
* This function initializes the interrupts and allocates all of the queues.
**/
-static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
{
struct pci_dev *pdev = adapter->pdev;
int err;
- err = igb_set_interrupt_capability(adapter);
- if (err)
- return err;
+ igb_set_interrupt_capability(adapter, msix);
err = igb_alloc_q_vectors(adapter);
if (err) {
@@ -1189,24 +1226,10 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
- err = igb_map_ring_to_vector(adapter);
- if (err) {
- dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
- goto err_map_queues;
- }
-
+ igb_cache_ring_register(adapter);
return 0;
-err_map_queues:
- igb_free_queues(adapter);
-err_alloc_queues:
- igb_free_q_vectors(adapter);
+
err_alloc_q_vectors:
igb_reset_interrupt_capability(adapter);
return err;
@@ -1229,29 +1252,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
if (!err)
goto request_done;
/* fall back to MSI */
- igb_clear_interrupt_scheme(adapter);
- if (!pci_enable_msi(pdev))
- adapter->flags |= IGB_FLAG_HAS_MSI;
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
- adapter->num_tx_queues = 1;
- adapter->num_rx_queues = 1;
- adapter->num_q_vectors = 1;
- err = igb_alloc_q_vectors(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for vectors\n");
- goto request_done;
- }
- err = igb_alloc_queues(adapter);
- if (err) {
- dev_err(&pdev->dev,
- "Unable to allocate memory for queues\n");
- igb_free_q_vectors(adapter);
+
+ igb_clear_interrupt_scheme(adapter);
+ err = igb_init_interrupt_scheme(adapter, false);
+ if (err)
goto request_done;
- }
+
igb_setup_all_tx_resources(adapter);
igb_setup_all_rx_resources(adapter);
+ igb_configure(adapter);
}
igb_assign_vector(adapter->q_vector[0], 0);
@@ -1587,8 +1598,7 @@ void igb_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_mac_info *mac = &hw->mac;
struct e1000_fc_info *fc = &hw->fc;
- u32 pba = 0, tx_space, min_tx_space, min_rx_space;
- u16 hwm;
+ u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@@ -1663,7 +1673,7 @@ void igb_reset(struct igb_adapter *adapter)
hwm = min(((pba << 10) * 9 / 10),
((pba << 10) - 2 * adapter->max_frame_size));
- fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */
+ fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
fc->low_water = fc->high_water - 16;
fc->pause_time = 0xFFFF;
fc->send_xon = 1;
@@ -1706,10 +1716,8 @@ void igb_reset(struct igb_adapter *adapter)
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
-#ifdef CONFIG_IGB_PTP
/* Re-enable PTP, where applicable. */
igb_ptp_reset(adapter);
-#endif /* CONFIG_IGB_PTP */
igb_get_phy_info(hw);
}
@@ -1783,58 +1791,34 @@ static const struct net_device_ops igb_netdev_ops = {
void igb_set_fw_version(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 major, build, patch, fw_version;
- u32 etrack_id;
-
- hw->nvm.ops.read(hw, 5, 1, &fw_version);
- if (adapter->hw.mac.type != e1000_i211) {
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
- etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl;
-
- /* combo image version needs to be found */
- hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) &&
- (comb_offset != IGB_NVM_VER_INVALID)) {
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
- + 1), 1, &comb_verh);
- hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
- 1, &comb_verl);
-
- /* Only display Option Rom if it exists and is valid */
- if ((comb_verh && comb_verl) &&
- ((comb_verh != IGB_NVM_VER_INVALID) &&
- (comb_verl != IGB_NVM_VER_INVALID))) {
- major = comb_verl >> IGB_COMB_VER_SHFT;
- build = (comb_verl << IGB_COMB_VER_SHFT) |
- (comb_verh >> IGB_COMB_VER_SHFT);
- patch = comb_verh & IGB_COMB_VER_MASK;
- snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x, %d.%d.%d",
- (fw_version & IGB_MAJOR_MASK) >>
- IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >>
- IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK),
- etrack_id, major, build, patch);
- goto out;
- }
- }
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d, 0x%08x",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK), etrack_id);
- } else {
+ struct e1000_fw_version fw;
+
+ igb_get_fw_version(hw, &fw);
+
+ switch (hw->mac.type) {
+ case e1000_i211:
snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%d.%d%d",
- (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT,
- (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT,
- (fw_version & IGB_BUILD_MASK));
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor, fw.invm_img_type);
+ break;
+
+ default:
+ /* if option is rom valid, display its version too */
+ if (fw.or_valid) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x, %d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.etrack_id,
+ fw.or_major, fw.or_build, fw.or_patch);
+ /* no option rom */
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ }
+ break;
}
-out:
return;
}
@@ -1849,8 +1833,7 @@ out:
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit igb_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igb_adapter *adapter;
@@ -1861,7 +1844,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
int err, pci_using_dac;
- u16 eeprom_apme_mask = IGB_EEPROM_APME;
u8 part_str[E1000_PBANUM_LENGTH];
/* Catch broken hardware that put the wrong VF device ID in
@@ -2069,28 +2051,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_validate_mdi_setting(hw);
- /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
- * enable the ACPI Magic Packet filter
- */
-
+ /* By default, support wake on port A */
if (hw->bus.func == 0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type >= e1000_82580)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+ /* Check the NVM for wake support on non-port A ports */
+ if (hw->mac.type >= e1000_82580)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
&eeprom_data);
else if (hw->bus.func == 1)
hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
- if (eeprom_data & eeprom_apme_mask)
- adapter->eeprom_wol |= E1000_WUFC_MAG;
+ if (eeprom_data & IGB_EEPROM_APME)
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
/* now that we have the eeprom settings, apply the special cases where
* the eeprom may be wrong or the board simply won't support wake on
* lan on a particular port */
switch (pdev->device) {
case E1000_DEV_ID_82575GB_QUAD_COPPER:
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82575EB_FIBER_SERDES:
case E1000_DEV_ID_82576_FIBER:
@@ -2098,24 +2079,38 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* Wake events only supported on port A for dual fiber
* regardless of eeprom setting */
if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
break;
case E1000_DEV_ID_82576_QUAD_COPPER:
case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
/* if quad port adapter, disable WoL on all but port A */
if (global_quad_port_a != 0)
- adapter->eeprom_wol = 0;
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
else
adapter->flags |= IGB_FLAG_QUAD_PORT_A;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
break;
+ default:
+ /* If the device can't wake, don't set software support */
+ if (!device_can_wakeup(&adapter->pdev->dev))
+ adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
}
/* initialize the wol settings based on the eeprom settings */
- adapter->wol = adapter->eeprom_wol;
- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+ if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+ adapter->wol |= E1000_WUFC_MAG;
+
+ /* Some vendors want WoL disabled by default, but still supported */
+ if ((hw->mac.type == e1000_i350) &&
+ (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+ adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+ adapter->wol = 0;
+ }
+
+ device_set_wakeup_enable(&adapter->pdev->dev,
+ adapter->flags & IGB_FLAG_WOL_SUPPORTED);
/* reset the hardware with the new settings */
igb_reset(adapter);
@@ -2141,10 +2136,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
#endif
-#ifdef CONFIG_IGB_PTP
/* do hw tstamp init after resetting */
igb_ptp_init(adapter);
-#endif /* CONFIG_IGB_PTP */
dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
/* print bus type/speed/width info */
@@ -2212,16 +2205,14 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit igb_remove(struct pci_dev *pdev)
+static void igb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pm_runtime_get_noresume(&pdev->dev);
-#ifdef CONFIG_IGB_PTP
igb_ptp_stop(adapter);
-#endif /* CONFIG_IGB_PTP */
/*
* The watchdog timer may be rescheduled, so explicitly
@@ -2294,7 +2285,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
* mor expensive time wise to disable SR-IOV than it is to allocate and free
* the memory for the VFs.
**/
-static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
+static void igb_probe_vfs(struct igb_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
struct pci_dev *pdev = adapter->pdev;
@@ -2355,7 +2346,7 @@ out:
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit igb_sw_init(struct igb_adapter *adapter)
+static int igb_sw_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
@@ -2461,7 +2452,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
GFP_ATOMIC);
/* This call may decrease the number of queues */
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -2531,6 +2522,17 @@ static int __igb_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(adapter->netdev,
+ adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(adapter->netdev,
+ adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
/* From here on the code is the same as igb_up() */
clear_bit(__IGB_DOWN, &adapter->state);
@@ -2560,6 +2562,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
return 0;
+err_set_queues:
+ igb_free_irq(adapter);
err_req_irq:
igb_release_hw_control(adapter);
igb_power_down_link(adapter);
@@ -2637,10 +2641,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
- tx_ring->desc = dma_alloc_coherent(dev,
- tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL);
+ tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc)
goto err;
@@ -2777,18 +2779,16 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
if (!rx_ring->rx_buffer_info)
goto err;
-
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev,
- rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL);
+ rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc)
goto err;
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -2893,18 +2893,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
/* Don't need to set TUOFL or IPOFL, they default to 1 */
wr32(E1000_RXCSUM, rxcsum);
- /*
- * Generate RSS hash based on TCP port numbers and/or
- * IPv4/v6 src and dst addresses since UDP cannot be
- * hashed reliably due to IP fragmentation
- */
+ /* Generate RSS hash based on packet types, TCP/UDP
+ * port numbers and/or IPv4/v6 src and dst addresses
+ */
mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
E1000_MRQC_RSS_FIELD_IPV4_TCP |
E1000_MRQC_RSS_FIELD_IPV6 |
E1000_MRQC_RSS_FIELD_IPV6_TCP |
E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+ if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+ mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
/* If VMDq is enabled then we set the appropriate mode for that, else
* we default to RSS so that an RSS hash is calculated per packet even
* if we are only using one queue */
@@ -3106,16 +3109,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
/* set descriptor configuration */
srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
-#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
- srrctl |= IGB_RXBUFFER_16384 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#else
- srrctl |= (PAGE_SIZE / 2) >> E1000_SRRCTL_BSIZEPKT_SHIFT;
-#endif
- srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
-#ifdef CONFIG_IGB_PTP
+ srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
if (hw->mac.type >= e1000_82580)
srrctl |= E1000_SRRCTL_TIMESTAMP;
-#endif /* CONFIG_IGB_PTP */
/* Only set Drop Enable if we are supporting multiple queues */
if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
srrctl |= E1000_SRRCTL_DROP_EN;
@@ -3305,36 +3302,27 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
unsigned long size;
u16 i;
+ if (rx_ring->skb)
+ dev_kfree_skb(rx_ring->skb);
+ rx_ring->skb = NULL;
+
if (!rx_ring->rx_buffer_info)
return;
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- if (buffer_info->dma) {
- dma_unmap_single(rx_ring->dev,
- buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
- }
- if (buffer_info->skb) {
- dev_kfree_skb(buffer_info->skb);
- buffer_info->skb = NULL;
- }
- if (buffer_info->page_dma) {
- dma_unmap_page(rx_ring->dev,
- buffer_info->page_dma,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
- if (buffer_info->page) {
- put_page(buffer_info->page);
- buffer_info->page = NULL;
- buffer_info->page_offset = 0;
- }
+ if (!buffer_info->page)
+ continue;
+
+ dma_unmap_page(rx_ring->dev,
+ buffer_info->dma,
+ PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ __free_page(buffer_info->page);
+
+ buffer_info->page = NULL;
}
size = sizeof(struct igb_rx_buffer) * rx_ring->count;
@@ -3343,6 +3331,7 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -4028,6 +4017,9 @@ static int igb_tso(struct igb_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -4148,26 +4140,32 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
}
-static __le32 igb_tx_cmd_type(u32 tx_flags)
+#define IGB_SET_FLAG(_input, _flag, _result) \
+ ((_flag <= _result) ? \
+ ((u32)(_input & _flag) * (_result / _flag)) : \
+ ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
{
/* set type for advanced descriptor with frame checksum insertion */
- __le32 cmd_type = cpu_to_le32(E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS |
- E1000_ADVTXD_DCMD_DEXT);
+ u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
/* set HW vlan bit if vlan is present */
- if (tx_flags & IGB_TX_FLAGS_VLAN)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_VLE);
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
+ (E1000_ADVTXD_DCMD_VLE));
+
+ /* set segmentation bits for TSO */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
+ (E1000_ADVTXD_DCMD_TSE));
-#ifdef CONFIG_IGB_PTP
/* set timestamp bit if present */
- if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP))
- cmd_type |= cpu_to_le32(E1000_ADVTXD_MAC_TSTAMP);
-#endif /* CONFIG_IGB_PTP */
+ cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
+ (E1000_ADVTXD_MAC_TSTAMP));
- /* set segmentation bits for TSO */
- if (tx_flags & IGB_TX_FLAGS_TSO)
- cmd_type |= cpu_to_le32(E1000_ADVTXD_DCMD_TSE);
+ /* insert frame checksum */
+ cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
return cmd_type;
}
@@ -4178,19 +4176,19 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
{
u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
- /* 82575 requires a unique index per ring if any offload is enabled */
- if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
- test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */
- if (tx_flags & IGB_TX_FLAGS_CSUM) {
- olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_CSUM,
+ (E1000_TXD_POPTS_TXSM << 8));
- /* insert IPv4 checksum */
- if (tx_flags & IGB_TX_FLAGS_IPV4)
- olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
- }
+ /* insert IPv4 checksum */
+ olinfo_status |= IGB_SET_FLAG(tx_flags,
+ IGB_TX_FLAGS_IPV4,
+ (E1000_TXD_POPTS_IXSM << 8));
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
}
@@ -4209,33 +4207,37 @@ static void igb_tx_map(struct igb_ring *tx_ring,
struct sk_buff *skb = first->skb;
struct igb_tx_buffer *tx_buffer;
union e1000_adv_tx_desc *tx_desc;
+ struct skb_frag_struct *frag;
dma_addr_t dma;
- struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
- unsigned int data_len = skb->data_len;
- unsigned int size = skb_headlen(skb);
- unsigned int paylen = skb->len - hdr_len;
- __le32 cmd_type;
+ unsigned int data_len, size;
u32 tx_flags = first->tx_flags;
+ u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
u16 i = tx_ring->next_to_use;
tx_desc = IGB_TX_DESC(tx_ring, i);
- igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
- cmd_type = igb_tx_cmd_type(tx_flags);
+ igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
- /* record length, and DMA address */
- dma_unmap_len_set(first, len, size);
- dma_unmap_addr_set(first, dma, dma);
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
+ tx_buffer = first;
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ if (dma_mapping_error(tx_ring->dev, dma))
+ goto dma_error;
+
+ /* record length, and DMA address */
+ dma_unmap_len_set(tx_buffer, len, size);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
- for (;;) {
while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
tx_desc->read.cmd_type_len =
- cmd_type | cpu_to_le32(IGB_MAX_DATA_PER_TXD);
+ cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
i++;
tx_desc++;
@@ -4243,18 +4245,18 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
dma += IGB_MAX_DATA_PER_TXD;
size -= IGB_MAX_DATA_PER_TXD;
- tx_desc->read.olinfo_status = 0;
tx_desc->read.buffer_addr = cpu_to_le64(dma);
}
if (likely(!data_len))
break;
- tx_desc->read.cmd_type_len = cmd_type | cpu_to_le32(size);
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
i++;
tx_desc++;
@@ -4262,32 +4264,22 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_desc = IGB_TX_DESC(tx_ring, 0);
i = 0;
}
+ tx_desc->read.olinfo_status = 0;
size = skb_frag_size(frag);
data_len -= size;
dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
- size, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_ring->dev, dma))
- goto dma_error;
+ size, DMA_TO_DEVICE);
tx_buffer = &tx_ring->tx_buffer_info[i];
- dma_unmap_len_set(tx_buffer, len, size);
- dma_unmap_addr_set(tx_buffer, dma, dma);
-
- tx_desc->read.olinfo_status = 0;
- tx_desc->read.buffer_addr = cpu_to_le64(dma);
-
- frag++;
}
- netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
-
/* write last descriptor with RS and EOP bits */
- cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
- if (unlikely(skb->no_fcs))
- cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
- tx_desc->read.cmd_type_len = cmd_type;
+ cmd_type |= size | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
/* set the timestamp */
first->time_stamp = jiffies;
@@ -4372,9 +4364,7 @@ static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
-#ifdef CONFIG_IGB_PTP
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
-#endif /* CONFIG_IGB_PTP */
struct igb_tx_buffer *first;
int tso;
u32 tx_flags = 0;
@@ -4397,7 +4387,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
first->bytecount = skb->len;
first->gso_segs = 1;
-#ifdef CONFIG_IGB_PTP
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!(adapter->ptp_tx_skb))) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
@@ -4407,7 +4396,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
if (adapter->hw.mac.type == e1000_82576)
schedule_work(&adapter->ptp_tx_work);
}
-#endif /* CONFIG_IGB_PTP */
if (vlan_tx_tag_present(skb)) {
tx_flags |= IGB_TX_FLAGS_VLAN;
@@ -4467,10 +4455,11 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
* The minimum packet size with TCTL.PSP set is 17 so pad the skb
* in order to meet this minimum size requirement.
*/
- if (skb->len < 17) {
- if (skb_padto(skb, 17))
+ if (unlikely(skb->len < 17)) {
+ if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
@@ -4800,7 +4789,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -4811,7 +4799,6 @@ static irqreturn_t igb_msix_other(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
wr32(E1000_EIMS, adapter->eims_other);
@@ -4851,45 +4838,63 @@ static irqreturn_t igb_msix_ring(int irq, void *data)
}
#ifdef CONFIG_IGB_DCA
+static void igb_update_tx_dca(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
+ E1000_DCA_TXCTRL_DATA_RRO_EN |
+ E1000_DCA_TXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
+}
+
+static void igb_update_rx_dca(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ int cpu)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
+
+ if (hw->mac.type != e1000_82575)
+ rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
+
+ /*
+ * We can enable relaxed ordering for reads, but not writes when
+ * DCA is enabled. This is due to a known issue in some chipsets
+ * which will cause the DCA tag to be cleared.
+ */
+ rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
+ E1000_DCA_RXCTRL_DESC_DCA_EN;
+
+ wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
+}
+
static void igb_update_dca(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
- struct e1000_hw *hw = &adapter->hw;
int cpu = get_cpu();
if (q_vector->cpu == cpu)
goto out_no_update;
- if (q_vector->tx.ring) {
- int q = q_vector->tx.ring->reg_idx;
- u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
- dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_TXCTRL_CPUID_SHIFT;
- }
- dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
- wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
- }
- if (q_vector->rx.ring) {
- int q = q_vector->rx.ring->reg_idx;
- u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
- if (hw->mac.type == e1000_82575) {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else {
- dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
- dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
- E1000_DCA_RXCTRL_CPUID_SHIFT;
- }
- dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
- dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
- wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
- }
+ if (q_vector->tx.ring)
+ igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
+
+ if (q_vector->rx.ring)
+ igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
+
q_vector->cpu = cpu;
out_no_update:
put_cpu();
@@ -5545,7 +5550,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5556,7 +5560,6 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5599,7 +5602,6 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1);
}
-#ifdef CONFIG_IGB_PTP
if (icr & E1000_ICR_TS) {
u32 tsicr = rd32(E1000_TSICR);
@@ -5610,7 +5612,6 @@ static irqreturn_t igb_intr(int irq, void *data)
schedule_work(&adapter->ptp_tx_work);
}
}
-#endif /* CONFIG_IGB_PTP */
napi_schedule(&q_vector->napi);
@@ -5840,6 +5841,181 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
return !!budget;
}
+/**
+ * igb_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void igb_reuse_rx_page(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *old_buff)
+{
+ struct igb_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
+
+ new_buff = &rx_ring->rx_buffer_info[nta];
+
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
+
+ /* transfer page from old buffer to new buffer */
+ memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+
+ /* sync the buffer for use by the device */
+ dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
+ old_buff->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+}
+
+/**
+ * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool igb_add_rx_frag(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+
+ if ((size <= IGB_RX_HDR_LEN) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
+
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
+ va += IGB_TS_HDR_LEN;
+ size -= IGB_TS_HDR_LEN;
+ }
+
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(page) == numa_node_id()))
+ return true;
+
+ /* this page cannot be reused so discard it */
+ put_page(page);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, IGB_RX_BUFSZ);
+
+ /* avoid re-using remote pages */
+ if (unlikely(page_to_nid(page) != numa_node_id()))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= IGB_RX_BUFSZ;
+
+ /*
+ * since we are the only owner of the page and we need to
+ * increment it, just set the value to 2 in order to avoid
+ * an unnecessary locked operation
+ */
+ atomic_set(&page->_count, 2);
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += SKB_DATA_ALIGN(size);
+
+ if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
+ return false;
+
+ /* bump ref count on page before it is given to the stack */
+ get_page(page);
+#endif
+
+ return true;
+}
+
+static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+
+ /*
+ * This memory barrier is needed to keep us from reading
+ * any other fields out of the rx_desc until we know the
+ * RXD_STAT_DD bit is set
+ */
+ rmb();
+
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) +
+ rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+ IGB_RX_HDR_LEN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /*
+ * we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -5889,224 +6065,389 @@ static inline void igb_rx_hash(struct igb_ring *ring,
skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
}
-static void igb_rx_vlan(struct igb_ring *ring,
- union e1000_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+/**
+ * igb_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
{
- if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
- u16 vid;
- if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
- test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags))
- vid = be16_to_cpu(rx_desc->wb.upper.vlan);
- else
- vid = le16_to_cpu(rx_desc->wb.upper.vlan);
+ u32 ntc = rx_ring->next_to_clean + 1;
- __vlan_hwaccel_put_tag(skb, vid);
- }
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+ if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+ return false;
+
+ return true;
}
-static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
-{
- /* HW will not DMA in data larger than the given buffer, even if it
- * parses the (NFS, of course) header to be larger. In that case, it
- * fills the header buffer and spills the rest into the page.
+/**
+ * igb_get_headlen - determine size of header for LRO/GRO
+ * @data: pointer to the start of the headers
+ * @max_len: total length of section to find headers in
+ *
+ * This function is meant to determine the length of headers that will
+ * be recognized by hardware for LRO, and GRO offloads. The main
+ * motivation of doing this is to only perform one pull for IPv4 TCP
+ * packets so that we can do basic things like calculating the gso_size
+ * based on the average data per packet.
+ **/
+static unsigned int igb_get_headlen(unsigned char *data,
+ unsigned int max_len)
+{
+ union {
+ unsigned char *network;
+ /* l2 headers */
+ struct ethhdr *eth;
+ struct vlan_hdr *vlan;
+ /* l3 headers */
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
+ } hdr;
+ __be16 protocol;
+ u8 nexthdr = 0; /* default to not TCP */
+ u8 hlen;
+
+ /* this should never happen, but better safe than sorry */
+ if (max_len < ETH_HLEN)
+ return max_len;
+
+ /* initialize network frame pointer */
+ hdr.network = data;
+
+ /* set first protocol and move network header forward */
+ protocol = hdr.eth->h_proto;
+ hdr.network += ETH_HLEN;
+
+ /* handle any vlan tag if present */
+ if (protocol == __constant_htons(ETH_P_8021Q)) {
+ if ((hdr.network - data) > (max_len - VLAN_HLEN))
+ return max_len;
+
+ protocol = hdr.vlan->h_vlan_encapsulated_proto;
+ hdr.network += VLAN_HLEN;
+ }
+
+ /* handle L3 protocols */
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct iphdr)))
+ return max_len;
+
+ /* access ihl as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[0] & 0x0F) << 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct iphdr))
+ return hdr.network - data;
+
+ /* record next protocol if header is present */
+ if (!hdr.ipv4->frag_off)
+ nexthdr = hdr.ipv4->protocol;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
+ /* record next protocol */
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
+ } else {
+ return hdr.network - data;
+ }
+
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP */
+ if (nexthdr == IPPROTO_TCP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
+ return max_len;
+
+ /* access doff as a u8 to avoid unaligned access on ia64 */
+ hlen = (hdr.network[12] & 0xF0) >> 2;
+
+ /* verify hlen meets minimum size requirements */
+ if (hlen < sizeof(struct tcphdr))
+ return hdr.network - data;
+
+ hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
+ }
+
+ /*
+ * If everything has gone correctly hdr.network should be the
+ * data section of the packet and will be the end of the header.
+ * If not then it probably represents the end of the last recognized
+ * header.
*/
- u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
- E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
- if (hlen > IGB_RX_HDR_LEN)
- hlen = IGB_RX_HDR_LEN;
- return hlen;
+ if ((hdr.network - data) < max_len)
+ return hdr.network - data;
+ else
+ return max_len;
}
-static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
+/**
+ * igb_pull_tail - igb specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an igb specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void igb_pull_tail(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
{
- struct igb_ring *rx_ring = q_vector->rx.ring;
- union e1000_adv_rx_desc *rx_desc;
- const int current_node = numa_node_id();
- unsigned int total_bytes = 0, total_packets = 0;
- u16 cleaned_count = igb_desc_unused(rx_ring);
- u16 i = rx_ring->next_to_clean;
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_desc = IGB_RX_DESC(rx_ring, i);
+ /*
+ * it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
- struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
- struct sk_buff *skb = buffer_info->skb;
- union e1000_adv_rx_desc *next_rxd;
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ /* retrieve timestamp from buffer */
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- buffer_info->skb = NULL;
- prefetch(skb->data);
+ /* update pointers to remove timestamp header */
+ skb_frag_size_sub(frag, IGB_TS_HDR_LEN);
+ frag->page_offset += IGB_TS_HDR_LEN;
+ skb->data_len -= IGB_TS_HDR_LEN;
+ skb->len -= IGB_TS_HDR_LEN;
- i++;
- if (i == rx_ring->count)
- i = 0;
+ /* move va to start of packet data */
+ va += IGB_TS_HDR_LEN;
+ }
- next_rxd = IGB_RX_DESC(rx_ring, i);
- prefetch(next_rxd);
+ /*
+ * we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
- /*
- * This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * RXD_STAT_DD bit is set
- */
- rmb();
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (!skb_is_nonlinear(skb)) {
- __skb_put(skb, igb_get_hlen(rx_desc));
- dma_unmap_single(rx_ring->dev, buffer_info->dma,
- IGB_RX_HDR_LEN,
- DMA_FROM_DEVICE);
- buffer_info->dma = 0;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
+
+/**
+ * igb_cleanup_headers - Correct corrupted or empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being fixed
+ *
+ * Address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+
+ if (unlikely((igb_test_staterr(rx_desc,
+ E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ struct net_device *netdev = rx_ring->netdev;
+ if (!(netdev->features & NETIF_F_RXALL)) {
+ dev_kfree_skb_any(skb);
+ return true;
}
+ }
- if (rx_desc->wb.upper.length) {
- u16 length = le16_to_cpu(rx_desc->wb.upper.length);
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ igb_pull_tail(rx_ring, rx_desc, skb);
- skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
- buffer_info->page,
- buffer_info->page_offset,
- length);
+ /* if skb_pad returns an error the skb was freed */
+ if (unlikely(skb->len < 60)) {
+ int pad_len = 60 - skb->len;
- skb->len += length;
- skb->data_len += length;
- skb->truesize += PAGE_SIZE / 2;
+ if (skb_pad(skb, pad_len))
+ return true;
+ __skb_put(skb, pad_len);
+ }
- if ((page_count(buffer_info->page) != 1) ||
- (page_to_nid(buffer_info->page) != current_node))
- buffer_info->page = NULL;
- else
- get_page(buffer_info->page);
+ return false;
+}
- dma_unmap_page(rx_ring->dev, buffer_info->page_dma,
- PAGE_SIZE / 2, DMA_FROM_DEVICE);
- buffer_info->page_dma = 0;
- }
+/**
+ * igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ *
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ * other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct net_device *dev = rx_ring->netdev;
- if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
- struct igb_rx_buffer *next_buffer;
- next_buffer = &rx_ring->rx_buffer_info[i];
- buffer_info->skb = next_buffer->skb;
- buffer_info->dma = next_buffer->dma;
- next_buffer->skb = skb;
- next_buffer->dma = 0;
- goto next_desc;
- }
+ igb_rx_hash(rx_ring, rx_desc, skb);
- if (unlikely((igb_test_staterr(rx_desc,
- E1000_RXDEXT_ERR_FRAME_ERR_MASK))
- && !(rx_ring->netdev->features & NETIF_F_RXALL))) {
- dev_kfree_skb_any(skb);
- goto next_desc;
- }
+ igb_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IGB_PTP
- igb_ptp_rx_hwtstamp(q_vector, rx_desc, skb);
-#endif /* CONFIG_IGB_PTP */
- igb_rx_hash(rx_ring, rx_desc, skb);
- igb_rx_checksum(rx_ring, rx_desc, skb);
- igb_rx_vlan(rx_ring, rx_desc, skb);
+ igb_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
- total_bytes += skb->len;
- total_packets++;
+ if ((dev->features & NETIF_F_HW_VLAN_RX) &&
+ igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
+ u16 vid;
+ if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
+ test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
+ vid = be16_to_cpu(rx_desc->wb.upper.vlan);
+ else
+ vid = le16_to_cpu(rx_desc->wb.upper.vlan);
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ __vlan_hwaccel_put_tag(skb, vid);
+ }
- napi_gro_receive(&q_vector->napi, skb);
+ skb_record_rx_queue(skb, rx_ring->queue_index);
- budget--;
-next_desc:
- if (!budget)
- break;
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+ struct igb_ring *rx_ring = q_vector->rx.ring;
+ struct sk_buff *skb = rx_ring->skb;
+ unsigned int total_bytes = 0, total_packets = 0;
+ u16 cleaned_count = igb_desc_unused(rx_ring);
+
+ do {
+ union e1000_adv_rx_desc *rx_desc;
- cleaned_count++;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
igb_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- /* use prefetched values */
- rx_desc = next_rxd;
- }
+ rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
- rx_ring->next_to_clean = i;
- u64_stats_update_begin(&rx_ring->rx_syncp);
- rx_ring->rx_stats.packets += total_packets;
- rx_ring->rx_stats.bytes += total_bytes;
- u64_stats_update_end(&rx_ring->rx_syncp);
- q_vector->rx.total_packets += total_packets;
- q_vector->rx.total_bytes += total_bytes;
+ if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_DD))
+ break;
- if (cleaned_count)
- igb_alloc_rx_buffers(rx_ring, cleaned_count);
+ /* retrieve a buffer from the ring */
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
- return !!budget;
-}
+ /* exit if we failed to retrieve a buffer */
+ if (!skb)
+ break;
-static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
- struct igb_rx_buffer *bi)
-{
- struct sk_buff *skb = bi->skb;
- dma_addr_t dma = bi->dma;
+ cleaned_count++;
- if (dma)
- return true;
+ /* fetch next buffer in frame if non-eop */
+ if (igb_is_non_eop(rx_ring, rx_desc))
+ continue;
- if (likely(!skb)) {
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IGB_RX_HDR_LEN);
- bi->skb = skb;
- if (!skb) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
+ /* verify the packet layout is correct */
+ if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
+ skb = NULL;
+ continue;
}
- /* initialize skb for ring */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- }
+ /* probably a little skewed due to removing CRC */
+ total_bytes += skb->len;
- dma = dma_map_single(rx_ring->dev, skb->data,
- IGB_RX_HDR_LEN, DMA_FROM_DEVICE);
+ /* populate checksum, timestamp, VLAN, and protocol */
+ igb_process_skb_fields(rx_ring, rx_desc, skb);
- if (dma_mapping_error(rx_ring->dev, dma)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ napi_gro_receive(&q_vector->napi, skb);
- bi->dma = dma;
- return true;
+ /* reset skb pointer */
+ skb = NULL;
+
+ /* update budget accounting */
+ total_packets++;
+ } while (likely(total_packets < budget));
+
+ /* place incomplete frames back on ring for completion */
+ rx_ring->skb = skb;
+
+ u64_stats_update_begin(&rx_ring->rx_syncp);
+ rx_ring->rx_stats.packets += total_packets;
+ rx_ring->rx_stats.bytes += total_bytes;
+ u64_stats_update_end(&rx_ring->rx_syncp);
+ q_vector->rx.total_packets += total_packets;
+ q_vector->rx.total_bytes += total_bytes;
+
+ if (cleaned_count)
+ igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+ return (total_packets < budget);
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t page_dma = bi->page_dma;
- unsigned int page_offset = bi->page_offset ^ (PAGE_SIZE / 2);
+ dma_addr_t dma;
- if (page_dma)
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page))
return true;
- if (!page) {
- page = __skb_alloc_page(GFP_ATOMIC, bi->skb);
- bi->page = page;
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_failed++;
- return false;
- }
+ /* alloc new page for storage */
+ page = __skb_alloc_page(GFP_ATOMIC | __GFP_COLD, NULL);
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return false;
}
- page_dma = dma_map_page(rx_ring->dev, page,
- page_offset, PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /*
+ * if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
+ */
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_page(page);
- if (dma_mapping_error(rx_ring->dev, page_dma)) {
rx_ring->rx_stats.alloc_failed++;
return false;
}
- bi->page_dma = page_dma;
- bi->page_offset = page_offset;
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
+
return true;
}
@@ -6120,22 +6461,23 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
struct igb_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ /* nothing to do */
+ if (!cleaned_count)
+ return;
+
rx_desc = IGB_RX_DESC(rx_ring, i);
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
- while (cleaned_count--) {
- if (!igb_alloc_mapped_skb(rx_ring, bi))
- break;
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info. */
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
-
+ do {
if (!igb_alloc_mapped_page(rx_ring, bi))
break;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ /*
+ * Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc++;
bi++;
@@ -6148,17 +6490,25 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/* clear the hdr_addr for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0;
- }
+
+ cleaned_count--;
+ } while (cleaned_count);
i += rx_ring->count;
if (rx_ring->next_to_use != i) {
+ /* record the next descriptor to use */
rx_ring->next_to_use = i;
- /* Force memory writes to complete before letting h/w
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = i;
+
+ /*
+ * Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
- * such as IA-64). */
+ * such as IA-64).
+ */
wmb();
writel(i, rx_ring->tail);
}
@@ -6207,10 +6557,8 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGMIIREG:
case SIOCSMIIREG:
return igb_mii_ioctl(netdev, ifr, cmd);
-#ifdef CONFIG_IGB_PTP
case SIOCSHWTSTAMP:
return igb_ptp_hwtstamp_ioctl(netdev, ifr, cmd);
-#endif /* CONFIG_IGB_PTP */
default:
return -EOPNOTSUPP;
}
@@ -6478,7 +6826,7 @@ static int igb_resume(struct device *dev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6492,7 +6840,9 @@ static int igb_resume(struct device *dev)
wr32(E1000_WUS, ~0);
if (netdev->flags & IFF_UP) {
+ rtnl_lock();
err = __igb_open(netdev, true);
+ rtnl_unlock();
if (err)
return err;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ee21445157a3..ab3429729bde 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
}
-void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
- union e1000_adv_rx_desc *rx_desc,
+/**
+ * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
+ * @q_vector: Pointer to interrupt specific structure
+ * @va: Pointer to address containing Rx buffer
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the first buffer of an
+ * incoming frame. The value is stored in little endian format starting on
+ * byte 8.
+ */
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
+ unsigned char *va,
+ struct sk_buff *skb)
+{
+ __le64 *regval = (__le64 *)va;
+
+ /*
+ * The timestamp is recorded in little endian format.
+ * DWORD: 0 1 2 3
+ * Field: Reserved Reserved SYSTIML SYSTIMH
+ */
+ igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ le64_to_cpu(regval[1]));
+}
+
+/**
+ * igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
+ * @q_vector: Pointer to interrupt specific structure
+ * @skb: Buffer containing timestamp and packet
+ *
+ * This function is meant to retrieve a timestamp from the internal registers
+ * of the adapter and store it in the skb.
+ */
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
- if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
- E1000_RXDADV_STAT_TS))
- return;
-
/*
* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps.
*/
- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
- u32 *stamp = (u32 *)skb->data;
- regval = le32_to_cpu(*(stamp + 2));
- regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
- skb_pull(skb, IGB_TS_HDR_LEN);
- } else {
- if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
- return;
+ if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
+ return;
- regval = rd32(E1000_RXSTMPL);
- regval |= (u64)rd32(E1000_RXSTMPH) << 32;
- }
+ regval = rd32(E1000_RXSTMPL);
+ regval |= (u64)rd32(E1000_RXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
}
@@ -532,18 +553,6 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_ALL:
- /*
- * register TSYNCRXCFG must be set, therefore it is not
- * possible to time stamp both Sync and Delay_Req messages
- * => fall back to time stamping all packets
- */
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
@@ -554,31 +563,33 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
- tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
- is_l2 = true;
- is_l4 = true;
- config.rx_filter = HWTSTAMP_FILTER_SOME;
- break;
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
is_l2 = true;
is_l4 = true;
break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_ALL:
+ /* 82576 cannot timestamp all packets, which it needs to do to
+ * support both V1 Sync and Delay_Req messages
+ */
+ if (hw->mac.type != e1000_82576) {
+ tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ }
+ /* fall through */
default:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -596,6 +607,9 @@ int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
if ((hw->mac.type >= e1000_82580) && tsync_rx_ctl) {
tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ is_l2 = true;
+ is_l4 = true;
if ((hw->mac.type == e1000_i210) ||
(hw->mac.type == e1000_i211)) {
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 3e18045d8f89..d9fa999b1685 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -46,6 +46,7 @@
#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */
#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define E1000_RXDEXT_STATERR_LB 0x00040000
#define E1000_RXDEXT_STATERR_CE 0x01000000
#define E1000_RXDEXT_STATERR_SE 0x02000000
#define E1000_RXDEXT_STATERR_SEQ 0x04000000
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index a895e2f7b34d..fdca7b672776 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -295,7 +295,7 @@ struct igbvf_info {
/* hardware capability, feature, and workaround flags */
#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 0ac11f527a84..277f5dfe3d90 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -47,7 +47,7 @@
#include "igbvf.h"
-#define DRV_VERSION "2.0.1-k"
+#define DRV_VERSION "2.0.2-k"
char igbvf_driver_name[] = "igbvf";
const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
@@ -107,12 +107,19 @@ static void igbvf_receive_skb(struct igbvf_adapter *adapter,
struct sk_buff *skb,
u32 status, u16 vlan)
{
+ u16 vid;
+
if (status & E1000_RXD_STAT_VP) {
- u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ if ((adapter->flags & IGBVF_FLAG_RX_LB_VLAN_BSWAP) &&
+ (status & E1000_RXDEXT_STATERR_LB))
+ vid = be16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
+ else
+ vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
if (test_bit(vid, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, vid);
}
- netif_receive_skb(skb);
+
+ napi_gro_receive(&adapter->rx_ring->napi, skb);
}
static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
@@ -184,6 +191,13 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ buffer_info->page_dma)) {
+ __free_page(buffer_info->page);
+ buffer_info->page = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
if (!buffer_info->skb) {
@@ -197,6 +211,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
bufsz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ goto no_buffers;
+ }
}
/* Refresh the desc even if buffer_addrs didn't change because
* each write-back erases this info. */
@@ -1078,7 +1098,7 @@ out:
* igbvf_alloc_queues - Allocate memory for all rings
* @adapter: board private structure to initialize
**/
-static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
+static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -1530,7 +1550,7 @@ void igbvf_reinit_locked(struct igbvf_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
+static int igbvf_sw_init(struct igbvf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
s32 rc;
@@ -2598,8 +2618,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit igbvf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct igbvf_adapter *adapter;
@@ -2754,6 +2773,10 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */
igbvf_reset(adapter);
+ /* set hardware-specific flags */
+ if (adapter->hw.mac.type == e1000_vfadapt_i350)
+ adapter->flags |= IGBVF_FLAG_RX_LB_VLAN_BSWAP;
+
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
if (err)
@@ -2794,7 +2817,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit igbvf_remove(struct pci_dev *pdev)
+static void igbvf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2851,7 +2874,7 @@ static struct pci_driver igbvf_driver = {
.name = igbvf_driver_name,
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
- .remove = __devexit_p(igbvf_remove),
+ .remove = igbvf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = igbvf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d99a2d51b948..ae96c10251be 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -73,7 +73,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
static int ixgb_init_module(void);
static void ixgb_exit_module(void);
static int ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-static void __devexit ixgb_remove(struct pci_dev *pdev);
+static void ixgb_remove(struct pci_dev *pdev);
static int ixgb_sw_init(struct ixgb_adapter *adapter);
static int ixgb_open(struct net_device *netdev);
static int ixgb_close(struct net_device *netdev);
@@ -125,7 +125,7 @@ static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
- .remove = __devexit_p(ixgb_remove),
+ .remove = ixgb_remove,
.err_handler = &ixgb_err_handler
};
@@ -391,7 +391,7 @@ static const struct net_device_ops ixgb_netdev_ops = {
* and a hardware reset occur.
**/
-static int __devinit
+static int
ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
@@ -558,7 +558,7 @@ err_dma_mask:
* memory.
**/
-static void __devexit
+static void
ixgb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -584,7 +584,7 @@ ixgb_remove(struct pci_dev *pdev)
* OS network device settings (MTU size).
**/
-static int __devinit
+static int
ixgb_sw_init(struct ixgb_adapter *adapter)
{
struct ixgb_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_param.c b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
index 07d83ab46e21..04a60640ddda 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_param.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_param.c
@@ -47,7 +47,7 @@
#define IXGB_PARAM_INIT { [0 ... IXGB_MAX_NIC] = OPTION_UNSET }
#define IXGB_PARAM(X, desc) \
- static int __devinitdata X[IXGB_MAX_NIC+1] \
+ static int X[IXGB_MAX_NIC+1] \
= IXGB_PARAM_INIT; \
static unsigned int num_##X = 0; \
module_param_array_named(X, X, int, &num_##X, 0); \
@@ -199,7 +199,7 @@ struct ixgb_option {
} arg;
};
-static int __devinit
+static int
ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
{
if (*value == OPTION_UNSET) {
@@ -257,7 +257,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
* in a variable in the adapter structure.
**/
-void __devinit
+void
ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 89f40e51fc13..f3a632bf8d96 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,11 +34,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
-ixgbe-$(CONFIG_IXGBE_PTP) += ixgbe_ptp.o
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 30efc9f0f47a..8e786764c60e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -36,11 +36,9 @@
#include <linux/aer.h>
#include <linux/if_vlan.h>
-#ifdef CONFIG_IXGBE_PTP
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_clock_kernel.h>
-#endif /* CONFIG_IXGBE_PTP */
#include "ixgbe_type.h"
#include "ixgbe_common.h"
@@ -135,6 +133,7 @@ struct vf_data_storage {
u16 tx_rate;
u16 vlan_count;
u8 spoofchk_enabled;
+ unsigned int vf_api;
};
struct vf_macvlans {
@@ -482,8 +481,9 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
/* Tx fast path data */
int num_tx_queues;
@@ -571,7 +571,6 @@ struct ixgbe_adapter {
u32 interrupt_event;
u32 led_reg;
-#ifdef CONFIG_IXGBE_PTP
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
unsigned long last_overflow_check;
@@ -580,8 +579,6 @@ struct ixgbe_adapter {
struct timecounter tc;
int rx_hwtstamp_filter;
u32 base_incval;
- u32 cycle_speed;
-#endif /* CONFIG_IXGBE_PTP */
/* SR-IOV */
DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -600,6 +597,8 @@ struct ixgbe_adapter {
#ifdef CONFIG_DEBUG_FS
struct dentry *ixgbe_dbg_adapter;
#endif /*CONFIG_DEBUG_FS*/
+
+ u8 default_up;
};
struct ixgbe_fdir_filter {
@@ -691,6 +690,7 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
+extern bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -739,7 +739,6 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
-#ifdef CONFIG_IXGBE_PTP
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
@@ -751,7 +750,7 @@ extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct ifreq *ifr, int cmd);
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
+extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
-#endif /* CONFIG_IXGBE_PTP */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 1077cb2b38db..1073aea5da40 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -62,7 +62,6 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -99,9 +98,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
- u32 reg_anlp1 = 0;
- u32 i = 0;
u16 list_offset, data_offset, data_value;
+ bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -137,28 +135,36 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Now restart DSP by setting Restart_AN and clearing LMS */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
- IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
- IXGBE_AUTOC_AN_RESTART));
-
- /* Wait for AN to leave state 0 */
- for (i = 0; i < 10; i++) {
- usleep_range(4000, 8000);
- reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
- if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
- break;
+ /* Need SW/FW semaphore around AUTOC writes if LESM on,
+ * likewise reset_pipeline requires lock as it also writes
+ * AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto setup_sfp_out;
+
+ got_lock = true;
+ }
+
+ /* Restart DSP and set SFI mode */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+ IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL));
+
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock) {
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ got_lock = false;
}
- if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
- hw_dbg(hw, "sfp module setup not complete\n");
+
+ if (ret_val) {
+ hw_dbg(hw, " sfp module setup not complete\n");
ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
goto setup_sfp_out;
}
-
- /* Restart DSP by setting Restart_AN and return to SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
- IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
- IXGBE_AUTOC_AN_RESTART));
}
setup_sfp_out:
@@ -394,14 +400,26 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
s32 status = 0;
+ bool got_lock = false;
+
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto out;
+
+ got_lock = true;
+ }
/* Restart link */
- autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
IXGBE_AUTOC_LMS_KX4_KX_KR ||
(autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
@@ -425,6 +443,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
/* Add delay to filter out noises during initial link setup */
msleep(50);
+out:
return status;
}
@@ -779,6 +798,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+ bool got_lock = false;
/* Check to see if speed passed in is supported. */
status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
@@ -836,9 +856,26 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is on,
+ * likewise reset_pipeline requires us to hold this lock as
+ * it also writes to AUTOC.
+ */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status != 0)
+ goto out;
+
+ got_lock = true;
+ }
+
/* Restart link */
- autoc |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -994,9 +1031,28 @@ mac_reset_top:
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
- if (autoc != hw->mac.orig_autoc)
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
- IXGBE_AUTOC_AN_RESTART));
+ if (autoc != hw->mac.orig_autoc) {
+ /* Need SW/FW semaphore around AUTOC writes if LESM is
+ * on, likewise reset_pipeline requires us to hold
+ * this lock as it also writes to AUTOC.
+ */
+ bool got_lock = false;
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ status = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (status)
+ goto reset_hw_out;
+
+ got_lock = true;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ }
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
@@ -1022,7 +1078,7 @@ mac_reset_top:
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
@@ -1983,7 +2039,7 @@ fw_version_out:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2080,6 +2136,50 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
return ret_val;
}
+/**
+ * ixgbe_reset_pipeline_82599 - perform pipeline reset
+ *
+ * @hw: pointer to hardware structure
+ *
+ * Reset pipeline by asserting Restart_AN together with LMS change to ensure
+ * full pipeline reset. Note - We must hold the SW/FW semaphore before writing
+ * to AUTOC, so this function assumes the semaphore is held.
+ **/
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+{
+ s32 i, autoc_reg, ret_val;
+ s32 anlp1_reg = 0;
+
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+
+ /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+
+ /* Wait for AN to leave state 0 */
+ for (i = 0; i < 10; i++) {
+ usleep_range(4000, 8000);
+ anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+ if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)
+ break;
+ }
+
+ if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) {
+ hw_dbg(hw, "auto negotiation not completed\n");
+ ret_val = IXGBE_ERR_RESET_FAILED;
+ goto reset_pipeline_out;
+ }
+
+ ret_val = 0;
+
+reset_pipeline_out:
+ /* Write AUTOC register with original LMS field and Restart_AN */
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return ret_val;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index dbf37e4a45fd..5e68afdd502a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,13 +65,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
- return 0;
case IXGBE_DEV_ID_82599_T3_LOM:
return 0;
default:
@@ -90,6 +89,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
+ bool got_lock = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -210,8 +210,29 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
*
*/
if (hw->phy.media_type == ixgbe_media_type_backplane) {
- reg_bp |= IXGBE_AUTOC_AN_RESTART;
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on, likewise reset_pipeline requries the lock as
+ * it also writes AUTOC.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
(ixgbe_device_supports_autoneg_fc(hw) == 0)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
@@ -1762,30 +1783,6 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_validate_mac_addr - Validate MAC address
- * @mac_addr: pointer to MAC address.
- *
- * Tests a MAC address to ensure it is a valid Individual Address
- **/
-s32 ixgbe_validate_mac_addr(u8 *mac_addr)
-{
- s32 status = 0;
-
- /* Make sure it is not a multicast address */
- if (IXGBE_IS_MULTICAST(mac_addr))
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Not a broadcast address */
- else if (IXGBE_IS_BROADCAST(mac_addr))
- status = IXGBE_ERR_INVALID_MAC_ADDR;
- /* Reject the zero address */
- else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
- mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0)
- status = IXGBE_ERR_INVALID_MAC_ADDR;
-
- return status;
-}
-
-/**
* ixgbe_set_rar_generic - Set Rx address register
* @hw: pointer to hardware structure
* @index: Receive address register to write
@@ -1889,8 +1886,7 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
* to the permanent address.
* Otherwise, use the permanent address from the eeprom.
*/
- if (ixgbe_validate_mac_addr(hw->mac.addr) ==
- IXGBE_ERR_INVALID_MAC_ADDR) {
+ if (!is_valid_ether_addr(hw->mac.addr)) {
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
@@ -2617,6 +2613,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
/*
* Link must be up to auto-blink the LEDs;
@@ -2625,10 +2622,28 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ bool got_lock = false;
+
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
IXGBE_WRITE_FLUSH(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2637,7 +2652,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
@@ -2649,18 +2665,40 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ s32 ret_val = 0;
+ bool got_lock = false;
+
+ /* Need the SW/FW semaphore around AUTOC writes if 82599 and
+ * LESM is on.
+ */
+ if ((hw->mac.type == ixgbe_mac_82599EB) &&
+ ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (ret_val)
+ goto out;
+
+ got_lock = true;
+ }
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ ixgbe_reset_pipeline_82599(hw);
+
+ if (got_lock)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
IXGBE_WRITE_FLUSH(hw);
- return 0;
+out:
+ return ret_val;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index d813d1188c36..f7a0970a251c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -78,9 +78,9 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
-s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
@@ -107,6 +107,7 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
+s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 8d3a21889099..50aa546b8c7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -37,20 +37,6 @@ static struct dentry *ixgbe_dbg_root;
static char ixgbe_dbg_reg_ops_buf[256] = "";
/**
- * ixgbe_dbg_reg_ops_open - prep the debugfs pokee data item when opened
- * @inode: inode that was opened
- * @filp: file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer where
- * we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_reg_ops_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/**
* ixgbe_dbg_reg_ops_read - read for reg_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
@@ -61,23 +47,27 @@ static ssize_t ixgbe_dbg_reg_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- char buf[256];
- int bytes_not_copied;
+ char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
- len = snprintf(buf, sizeof(buf), "%s: %s\n",
- adapter->netdev->name, ixgbe_dbg_reg_ops_buf);
- if (count < len)
+ buf = kasprintf(GFP_KERNEL, "%s: %s\n",
+ adapter->netdev->name,
+ ixgbe_dbg_reg_ops_buf);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
return -ENOSPC;
- bytes_not_copied = copy_to_user(buffer, buf, len);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- *ppos = len;
+ kfree(buf);
return len;
}
@@ -93,7 +83,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- int bytes_not_copied;
+ int len;
/* don't allow partial writes */
if (*ppos != 0)
@@ -101,14 +91,15 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_reg_ops_buf))
return -ENOSPC;
- bytes_not_copied = copy_from_user(ixgbe_dbg_reg_ops_buf, buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- else if (bytes_not_copied < count)
- count -= bytes_not_copied;
- else
- return -ENOSPC;
- ixgbe_dbg_reg_ops_buf[count] = '\0';
+ len = simple_write_to_buffer(ixgbe_dbg_reg_ops_buf,
+ sizeof(ixgbe_dbg_reg_ops_buf)-1,
+ ppos,
+ buffer,
+ count);
+ if (len < 0)
+ return len;
+
+ ixgbe_dbg_reg_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_reg_ops_buf, "write", 5) == 0) {
u32 reg, value;
@@ -142,7 +133,7 @@ static ssize_t ixgbe_dbg_reg_ops_write(struct file *filp,
static const struct file_operations ixgbe_dbg_reg_ops_fops = {
.owner = THIS_MODULE,
- .open = ixgbe_dbg_reg_ops_open,
+ .open = simple_open,
.read = ixgbe_dbg_reg_ops_read,
.write = ixgbe_dbg_reg_ops_write,
};
@@ -150,20 +141,6 @@ static const struct file_operations ixgbe_dbg_reg_ops_fops = {
static char ixgbe_dbg_netdev_ops_buf[256] = "";
/**
- * ixgbe_dbg_netdev_ops_open - prep the debugfs netdev_ops data item
- * @inode: inode that was opened
- * @filp: file info
- *
- * Stash the adapter pointer hiding in the inode into the file pointer
- * where we can find it later in the read and write calls
- **/
-static int ixgbe_dbg_netdev_ops_open(struct inode *inode, struct file *filp)
-{
- filp->private_data = inode->i_private;
- return 0;
-}
-
-/**
* ixgbe_dbg_netdev_ops_read - read for netdev_ops datum
* @filp: the opened file
* @buffer: where to write the data for the user to read
@@ -175,23 +152,27 @@ static ssize_t ixgbe_dbg_netdev_ops_read(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- char buf[256];
- int bytes_not_copied;
+ char *buf;
int len;
/* don't allow partial reads */
if (*ppos != 0)
return 0;
- len = snprintf(buf, sizeof(buf), "%s: %s\n",
- adapter->netdev->name, ixgbe_dbg_netdev_ops_buf);
- if (count < len)
+ buf = kasprintf(GFP_KERNEL, "%s: %s\n",
+ adapter->netdev->name,
+ ixgbe_dbg_netdev_ops_buf);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
return -ENOSPC;
- bytes_not_copied = copy_to_user(buffer, buf, len);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- *ppos = len;
+ kfree(buf);
return len;
}
@@ -207,7 +188,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
size_t count, loff_t *ppos)
{
struct ixgbe_adapter *adapter = filp->private_data;
- int bytes_not_copied;
+ int len;
/* don't allow partial writes */
if (*ppos != 0)
@@ -215,15 +196,15 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
if (count >= sizeof(ixgbe_dbg_netdev_ops_buf))
return -ENOSPC;
- bytes_not_copied = copy_from_user(ixgbe_dbg_netdev_ops_buf,
- buffer, count);
- if (bytes_not_copied < 0)
- return bytes_not_copied;
- else if (bytes_not_copied < count)
- count -= bytes_not_copied;
- else
- return -ENOSPC;
- ixgbe_dbg_netdev_ops_buf[count] = '\0';
+ len = simple_write_to_buffer(ixgbe_dbg_netdev_ops_buf,
+ sizeof(ixgbe_dbg_netdev_ops_buf)-1,
+ ppos,
+ buffer,
+ count);
+ if (len < 0)
+ return len;
+
+ ixgbe_dbg_netdev_ops_buf[len] = '\0';
if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);
@@ -238,7 +219,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp,
static const struct file_operations ixgbe_dbg_netdev_ops_fops = {
.owner = THIS_MODULE,
- .open = ixgbe_dbg_netdev_ops_open,
+ .open = simple_open,
.read = ixgbe_dbg_netdev_ops_read,
.write = ixgbe_dbg_netdev_ops_write,
};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 116f0e901bee..326858424345 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -383,6 +383,11 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return -EINVAL;
+ /* some devices do not support autoneg of link flow control */
+ if ((pause->autoneg == AUTONEG_ENABLE) &&
+ (ixgbe_device_supports_autoneg_fc(hw) != 0))
+ return -EINVAL;
+
fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
@@ -887,24 +892,23 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+ struct ixgbe_ring *temp_ring;
int i, err = 0;
u32 new_rx_count, new_tx_count;
- bool need_update = false;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
- new_rx_count = max_t(u32, ring->rx_pending, IXGBE_MIN_RXD);
- new_rx_count = min_t(u32, new_rx_count, IXGBE_MAX_RXD);
- new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
-
- new_tx_count = max_t(u32, ring->tx_pending, IXGBE_MIN_TXD);
- new_tx_count = min_t(u32, new_tx_count, IXGBE_MAX_TXD);
+ new_tx_count = clamp_t(u32, ring->tx_pending,
+ IXGBE_MIN_TXD, IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring[0]->count) &&
- (new_rx_count == adapter->rx_ring[0]->count)) {
+ new_rx_count = clamp_t(u32, ring->rx_pending,
+ IXGBE_MIN_RXD, IXGBE_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring_count) &&
+ (new_rx_count == adapter->rx_ring_count)) {
/* nothing to do */
return 0;
}
@@ -922,81 +926,80 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
goto clear_reset;
}
- temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
- if (!temp_tx_ring) {
+ /* allocate temporary buffer to store rings in */
+ i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
+ temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
+
+ if (!temp_ring) {
err = -ENOMEM;
goto clear_reset;
}
+ ixgbe_down(adapter);
+
+ /*
+ * Setup new Tx resources and free the old Tx resources in that order.
+ * We can then assign the new resources to the rings via a memcpy.
+ * The advantage to this approach is that we are guaranteed to still
+ * have resources even in the case of an allocation failure.
+ */
if (new_tx_count != adapter->tx_ring_count) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
sizeof(struct ixgbe_ring));
- temp_tx_ring[i].count = new_tx_count;
- err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
+
+ temp_ring[i].count = new_tx_count;
+ err = ixgbe_setup_tx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_tx_resources(&temp_tx_ring[i]);
+ ixgbe_free_tx_resources(&temp_ring[i]);
}
- goto clear_reset;
+ goto err_setup;
}
}
- need_update = true;
- }
- temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
- if (!temp_rx_ring) {
- err = -ENOMEM;
- goto err_setup;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter->tx_ring[i]);
+
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
+
+ adapter->tx_ring_count = new_tx_count;
}
+ /* Repeat the process for the Rx rings if needed */
if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
- memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct ixgbe_ring));
- temp_rx_ring[i].count = new_rx_count;
- err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
+
+ temp_ring[i].count = new_rx_count;
+ err = ixgbe_setup_rx_resources(&temp_ring[i]);
if (err) {
while (i) {
i--;
- ixgbe_free_rx_resources(&temp_rx_ring[i]);
+ ixgbe_free_rx_resources(&temp_ring[i]);
}
goto err_setup;
}
+
}
- need_update = true;
- }
- /* if rings need to be updated, here's the place to do it in one shot */
- if (need_update) {
- ixgbe_down(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter->rx_ring[i]);
- /* tx */
- if (new_tx_count != adapter->tx_ring_count) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- ixgbe_free_tx_resources(adapter->tx_ring[i]);
- memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->tx_ring_count = new_tx_count;
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct ixgbe_ring));
}
- /* rx */
- if (new_rx_count != adapter->rx_ring_count) {
- for (i = 0; i < adapter->num_rx_queues; i++) {
- ixgbe_free_rx_resources(adapter->rx_ring[i]);
- memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
- sizeof(struct ixgbe_ring));
- }
- adapter->rx_ring_count = new_rx_count;
- }
- ixgbe_up(adapter);
+ adapter->rx_ring_count = new_rx_count;
}
- vfree(temp_rx_ring);
err_setup:
- vfree(temp_tx_ring);
+ ixgbe_up(adapter);
+ vfree(temp_ring);
clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
@@ -2669,7 +2672,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
struct ixgbe_adapter *adapter = netdev_priv(dev);
switch (adapter->hw.mac.type) {
-#ifdef CONFIG_IXGBE_PTP
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -2695,7 +2697,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
-#endif /* CONFIG_IXGBE_PTP */
default:
return ethtool_op_get_ts_info(dev, info);
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index ae73ef14fdf3..252850d9a3e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n");
+
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
+
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 17ecbcedd548..8c74f739011d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
- else
- cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
+#ifdef CONFIG_IXGBE_DCA
+ /* initialize CPU for DCA */
+ q_vector->cpu = -1;
+
+#endif
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
@@ -821,6 +824,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* initialize pointer to rings */
ring = q_vector->ring;
+ /* intialize ITR */
+ if (txr_count && !rxr_count) {
+ /* tx only vector */
+ if (adapter->tx_itr_setting == 1)
+ q_vector->itr = IXGBE_10K_ITR;
+ else
+ q_vector->itr = adapter->tx_itr_setting;
+ } else {
+ /* rx or rx/tx vector */
+ if (adapter->rx_itr_setting == 1)
+ q_vector->itr = IXGBE_20K_ITR;
+ else
+ q_vector->itr = adapter->rx_itr_setting;
+ }
+
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fa3d552e1f4a..20a5af6d87d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -44,6 +44,7 @@
#include <linux/ethtool.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
+#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
@@ -62,11 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define MAJ 3
-#define MIN 9
-#define BUILD 15
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD) "-k"
+#define DRV_VERSION "3.11.33-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2012 Intel Corporation.";
@@ -335,11 +332,13 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
+ pr_info(" %s %s %s %s\n",
+ "Queue [NTU] [NTC] [bi(ntc)->dma ]",
+ "leng", "ntw", "timestamp");
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n",
n, tx_ring->next_to_use, tx_ring->next_to_clean,
(u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
@@ -355,13 +354,37 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
/* Transmit Descriptor Formats
*
- * Advanced Transmit Descriptor
+ * 82598 Advanced Transmit Descriptor
* +--------------------------------------------------------------+
* 0 | Buffer Address [63:0] |
* +--------------------------------------------------------------+
- * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
+ * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
* +--------------------------------------------------------------+
* 63 46 45 40 39 36 35 32 31 24 23 20 19 0
+ *
+ * 82598 Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | NXTSEQ |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
+ *
+ * 82599+ Advanced Transmit Descriptor
+ * +--------------------------------------------------------------+
+ * 0 | Buffer Address [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN |
+ * +--------------------------------------------------------------+
+ * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0
+ *
+ * 82599+ Advanced Transmit Descriptor (Write-Back Format)
+ * +--------------------------------------------------------------+
+ * 0 | RSV [63:0] |
+ * +--------------------------------------------------------------+
+ * 8 | RSV | STA | RSV |
+ * +--------------------------------------------------------------+
+ * 63 36 35 32 31 0
*/
for (n = 0; n < adapter->num_tx_queues; n++) {
@@ -369,40 +392,43 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] "
- "[PlPOIdStDDt Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("%s%s %s %s %s %s\n",
+ "T [desc] [address 63:0 ] ",
+ "[PlPOIdStDDt Ln] [bi->dma ] ",
+ "leng", "ntw", "timestamp", "bi->skb");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
tx_desc = IXGBE_TX_DESC(tx_ring, i);
tx_buffer = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
- le64_to_cpu(u0->a),
- le64_to_cpu(u0->b),
- (u64)dma_unmap_addr(tx_buffer, dma),
- dma_unmap_len(tx_buffer, len),
- tx_buffer->next_to_watch,
- (u64)tx_buffer->time_stamp,
- tx_buffer->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- pr_cont(" NTC/U\n");
- else if (i == tx_ring->next_to_use)
- pr_cont(" NTU\n");
- else if (i == tx_ring->next_to_clean)
- pr_cont(" NTC\n");
- else
- pr_cont("\n");
-
- if (netif_msg_pktdata(adapter) &&
- tx_buffer->skb)
- print_hex_dump(KERN_INFO, "",
- DUMP_PREFIX_ADDRESS, 16, 1,
- tx_buffer->skb->data,
+ if (dma_unmap_len(tx_buffer, len) > 0) {
+ pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p",
+ i,
+ le64_to_cpu(u0->a),
+ le64_to_cpu(u0->b),
+ (u64)dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
- true);
+ tx_buffer->next_to_watch,
+ (u64)tx_buffer->time_stamp,
+ tx_buffer->skb);
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ pr_cont(" NTC/U\n");
+ else if (i == tx_ring->next_to_use)
+ pr_cont(" NTU\n");
+ else if (i == tx_ring->next_to_clean)
+ pr_cont(" NTC\n");
+ else
+ pr_cont("\n");
+
+ if (netif_msg_pktdata(adapter) &&
+ tx_buffer->skb)
+ print_hex_dump(KERN_INFO, "",
+ DUMP_PREFIX_ADDRESS, 16, 1,
+ tx_buffer->skb->data,
+ dma_unmap_len(tx_buffer, len),
+ true);
+ }
}
}
@@ -422,7 +448,9 @@ rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
- /* Advanced Receive Descriptor (Read) Format
+ /* Receive Descriptor Formats
+ *
+ * 82598 Advanced Receive Descriptor (Read) Format
* 63 1 0
* +-----------------------------------------------------+
* 0 | Packet Buffer Address [63:1] |A0/NSE|
@@ -431,27 +459,52 @@ rx_ring_summary:
* +-----------------------------------------------------+
*
*
- * Advanced Receive Descriptor (Write-Back) Format
+ * 82598 Advanced Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 30 21 20 16 15 4 3 0
* +------------------------------------------------------+
- * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
- * | Checksum Ident | | | | Type | Type |
+ * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS |
+ * | Packet | IP | | | | Type | Type |
+ * | Checksum | Ident | | | | | |
* +------------------------------------------------------+
* 8 | VLAN Tag | Length | Extended Error | Extended Status |
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
+ *
+ * 82599+ Advanced Receive Descriptor (Read) Format
+ * 63 1 0
+ * +-----------------------------------------------------+
+ * 0 | Packet Buffer Address [63:1] |A0/NSE|
+ * +----------------------------------------------+------+
+ * 8 | Header Buffer Address [63:1] | DD |
+ * +-----------------------------------------------------+
+ *
+ *
+ * 82599+ Advanced Receive Descriptor (Write-Back) Format
+ *
+ * 63 48 47 32 31 30 21 20 17 16 4 3 0
+ * +------------------------------------------------------+
+ * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS |
+ * |/ RTT / PCoE_PARAM | | | CNT | Type | Type |
+ * |/ Flow Dir Flt ID | | | | | |
+ * +------------------------------------------------------+
+ * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP |
+ * +------------------------------------------------------+
+ * 63 48 47 32 31 20 19 0
*/
+
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
+ pr_info("%s%s%s",
+ "R [desc] [ PktBuf A0] ",
+ "[ HeadBuf DD] [bi->dma ] [bi->skb ] ",
"<-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
+ pr_info("%s%s%s",
+ "RWB[desc] [PcsmIpSHl PtRs] ",
+ "[vl er S cks ln] ---------------- [bi->skb ] ",
"<-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
@@ -646,6 +699,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_hw_stats *hwstats = &adapter->stats;
u32 xoff[8] = {0};
+ u8 tc;
int i;
bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
@@ -659,21 +713,26 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
/* update stats for each tc, only valid with PFC enabled */
for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+ u32 pxoffrxc;
+
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+ pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
break;
default:
- xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+ pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
}
- hwstats->pxoffrxc[i] += xoff[i];
+ hwstats->pxoffrxc[i] += pxoffrxc;
+ /* Get the TC for given UP */
+ tc = netdev_get_prio_tc_map(adapter->netdev, i);
+ xoff[tc] += pxoffrxc;
}
/* disarm tx queues that have received xoff frames */
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- u8 tc = tx_ring->dcb_tc;
+ tc = tx_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
}
@@ -791,10 +850,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
-#endif
/* free the skb */
dev_kfree_skb_any(tx_buffer->skb);
@@ -967,7 +1024,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
* which will cause the DCA tag to be cleared.
*/
rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN |
- IXGBE_DCA_RXCTRL_DATA_DCA_EN |
IXGBE_DCA_RXCTRL_DESC_DCA_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
@@ -1244,6 +1300,7 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
struct vlan_hdr *vlan;
/* l3 headers */
struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
} hdr;
__be16 protocol;
u8 nexthdr = 0; /* default to not TCP */
@@ -1281,20 +1338,30 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
if (hlen < sizeof(struct iphdr))
return hdr.network - data;
+ /* record next protocol if header is present */
+ if (!hdr.ipv4->frag_off)
+ nexthdr = hdr.ipv4->protocol;
+ } else if (protocol == __constant_htons(ETH_P_IPV6)) {
+ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr)))
+ return max_len;
+
/* record next protocol */
- nexthdr = hdr.ipv4->protocol;
- hdr.network += hlen;
+ nexthdr = hdr.ipv6->nexthdr;
+ hlen = sizeof(struct ipv6hdr);
#ifdef IXGBE_FCOE
} else if (protocol == __constant_htons(ETH_P_FCOE)) {
if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN))
return max_len;
- hdr.network += FCOE_HEADER_LEN;
+ hlen = FCOE_HEADER_LEN;
#endif
} else {
return hdr.network - data;
}
- /* finally sort out TCP */
+ /* relocate pointer to start of L4 header */
+ hdr.network += hlen;
+
+ /* finally sort out TCP/UDP */
if (nexthdr == IPPROTO_TCP) {
if ((hdr.network - data) > (max_len - sizeof(struct tcphdr)))
return max_len;
@@ -1307,6 +1374,11 @@ static unsigned int ixgbe_get_headlen(unsigned char *data,
return hdr.network - data;
hdr.network += hlen;
+ } else if (nexthdr == IPPROTO_UDP) {
+ if ((hdr.network - data) > (max_len - sizeof(struct udphdr)))
+ return max_len;
+
+ hdr.network += sizeof(struct udphdr);
}
/*
@@ -1369,9 +1441,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
-#endif
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1781,7 +1851,7 @@ dma_sync:
**/
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
- int budget)
+ const int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
#ifdef IXGBE_FCOE
@@ -1832,7 +1902,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
/* populate checksum, timestamp, VLAN, and protocol */
ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
@@ -1865,8 +1934,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
- budget--;
- } while (likely(budget));
+ total_rx_packets++;
+ } while (likely(total_rx_packets < budget));
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1878,7 +1947,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
- return !!budget;
+ return (total_rx_packets < budget);
}
/**
@@ -1914,20 +1983,6 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
ixgbe_for_each_ring(ring, q_vector->tx)
ixgbe_set_ivar(adapter, 1, ring->reg_idx, v_idx);
- if (q_vector->tx.ring && !q_vector->rx.ring) {
- /* tx only vector */
- if (adapter->tx_itr_setting == 1)
- q_vector->itr = IXGBE_10K_ITR;
- else
- q_vector->itr = adapter->tx_itr_setting;
- } else {
- /* rx or rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
- }
-
ixgbe_write_eitr(q_vector);
}
@@ -2324,10 +2379,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
break;
}
-#ifdef CONFIG_IXGBE_PTP
if (adapter->hw.mac.type == ixgbe_mac_X540)
mask |= IXGBE_EIMS_TIMESYNC;
-#endif
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
@@ -2393,10 +2446,8 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2588,10 +2639,8 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
}
ixgbe_check_fan_failure(adapter, eicr);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
ixgbe_ptp_check_pps_event(adapter, eicr);
-#endif
/* would disable interrupts here but EIAM disabled it */
napi_schedule(&q_vector->napi);
@@ -2699,12 +2748,6 @@ static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- /* rx/tx vector */
- if (adapter->rx_itr_setting == 1)
- q_vector->itr = IXGBE_20K_ITR;
- else
- q_vector->itr = adapter->rx_itr_setting;
-
ixgbe_write_eitr(q_vector);
ixgbe_set_ivar(adapter, 0, 0, 0);
@@ -3132,14 +3175,6 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
- /* If operating in IOV mode set RLPML for X540 */
- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
- hw->mac.type == ixgbe_mac_X540) {
- rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
- rxdctl |= ((ring->netdev->mtu + ETH_HLEN +
- ETH_FCS_LEN + VLAN_HLEN) | IXGBE_RXDCTL_RLPML_EN);
- }
-
if (hw->mac.type == ixgbe_mac_82598EB) {
/*
* enable cache line friendly hardware writes:
@@ -3211,7 +3246,8 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
@@ -3234,8 +3270,6 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- /* enable Tx loopback for VF/PF communication */
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
@@ -3263,6 +3297,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
+
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK;
@@ -3271,9 +3310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
- /* MHADD will allow an extra 4 bytes past for vlan tagged frames */
- max_frame += VLAN_HLEN;
-
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
@@ -4072,11 +4108,8 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
else
ixgbe_configure_msi_and_legacy(adapter);
- /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.enable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* enable the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -4192,6 +4225,9 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
+
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_reset(adapter);
}
/**
@@ -4393,11 +4429,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
ixgbe_clean_all_tx_rings(adapter);
@@ -4429,11 +4462,12 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
+static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
+ u32 fwsm;
#ifdef CONFIG_IXGBE_DCB
int j;
struct tc_configuration *tc;
@@ -4457,7 +4491,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
- adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
+ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM);
+ if (fwsm & IXGBE_FWSM_TS_ENABLED)
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
@@ -4533,7 +4569,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg = false;
+ hw->fc.disable_fc_autoneg =
+ (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
#ifdef CONFIG_PCI_IOV
/* assign number of SR-IOV VFs */
@@ -4828,14 +4865,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
/*
- * For 82599EB we cannot allow PF to change MTU greater than 1500
- * in SR-IOV mode as it may cause buffer overruns in guest VFs that
- * don't allocate and chain buffers correctly.
+ * For 82599EB we cannot allow legacy VFs to enable their receive
+ * paths when MTU greater than 1500 is configured. So display a
+ * warning that legacy VFs will be disabled.
*/
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
- return -EINVAL;
+ e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -4901,6 +4938,8 @@ static int ixgbe_open(struct net_device *netdev)
if (err)
goto err_set_queues;
+ ixgbe_ptp_init(adapter);
+
ixgbe_up_complete(adapter);
return 0;
@@ -4932,6 +4971,8 @@ static int ixgbe_close(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ ixgbe_ptp_stop(adapter);
+
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
@@ -5022,14 +5063,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (wufc) {
ixgbe_set_rx_mode(netdev);
- /*
- * enable the optics for both mult-speed fiber and
- * 82599 SFP+ fiber as we can WoL.
- */
- if (hw->mac.ops.enable_tx_laser &&
- (hw->phy.multispeed_fiber ||
- (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
- hw->mac.type == ixgbe_mac_82599EB)))
+ /* enable the optics for 82599 SFP+ fiber as we can WoL */
+ if (hw->mac.ops.enable_tx_laser)
hw->mac.ops.enable_tx_laser(hw);
/* turn on all-multi mode if wake on multicast is enabled */
@@ -5442,6 +5477,23 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
adapter->link_speed = link_speed;
}
+static void ixgbe_update_default_up(struct ixgbe_adapter *adapter)
+{
+#ifdef CONFIG_IXGBE_DCB
+ struct net_device *netdev = adapter->netdev;
+ struct dcb_app app = {
+ .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE,
+ .protocol = 0,
+ };
+ u8 up = 0;
+
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+ up = dcb_ieee_getapp_mask(netdev, &app);
+
+ adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0;
+#endif
+}
+
/**
* ixgbe_watchdog_link_is_up - update netif_carrier status and
* print link up message
@@ -5482,9 +5534,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
break;
}
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
(link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5501,6 +5552,9 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
netif_carrier_on(netdev);
ixgbe_check_vf_rate_limit(adapter);
+ /* update the default user priority for VFs */
+ ixgbe_update_default_up(adapter);
+
/* ping all the active vfs to let them know link has changed */
ixgbe_ping_all_vfs(adapter);
}
@@ -5526,9 +5580,8 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_start_cyclecounter(adapter);
-#endif
+ if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
netif_carrier_off(netdev);
@@ -5833,9 +5886,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_watchdog_subtask(adapter);
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
-#ifdef CONFIG_IXGBE_PTP
ixgbe_ptp_overflow_check(adapter);
-#endif
ixgbe_service_event_complete(adapter);
}
@@ -5988,10 +6039,8 @@ static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
if (tx_flags & IXGBE_TX_FLAGS_HW_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
-#ifdef CONFIG_IXGBE_PTP
if (tx_flags & IXGBE_TX_FLAGS_TSTAMP)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_MAC_TSTAMP);
-#endif
/* set segmentation enable bits for TSO/FSO */
#ifdef IXGBE_FCOE
@@ -6393,12 +6442,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
skb_tx_timestamp(skb);
-#ifdef CONFIG_IXGBE_PTP
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
}
-#endif
#ifdef CONFIG_PCI_IOV
/*
@@ -6485,6 +6532,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
if (skb_pad(skb, 17 - skb->len))
return NETDEV_TX_OK;
skb->len = 17;
+ skb_set_tail_pointer(skb, 17);
}
tx_ring = adapter->tx_ring[skb->queue_mapping];
@@ -6547,10 +6595,8 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
switch (cmd) {
-#ifdef CONFIG_IXGBE_PTP
case SIOCSHWTSTAMP:
return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
-#endif
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -6910,13 +6956,16 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
return -EOPNOTSUPP;
- if (ndm->ndm_state & NUD_PERMANENT) {
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n",
ixgbe_driver_name);
return -EINVAL;
}
- if (is_unicast_ether_addr(addr)) {
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS;
if (netdev_uc_count(dev) < rar_uc_entries)
@@ -6974,6 +7023,61 @@ static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
return idx;
}
+static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ __u16 mode;
+ u32 reg = 0;
+
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode == BRIDGE_MODE_VEPA) {
+ reg = 0;
+ adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB;
+ } else if (mode == BRIDGE_MODE_VEB) {
+ reg = IXGBE_PFDTXGSWC_VT_LBEN;
+ adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+ } else
+ return -EINVAL;
+
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg);
+
+ e_info(drv, "enabling bridge mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+ }
+
+ return 0;
+}
+
+static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u16 mode;
+
+ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
+ return 0;
+
+ if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB)
+ mode = BRIDGE_MODE_VEB;
+ else
+ mode = BRIDGE_MODE_VEPA;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode);
+}
+
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
.ndo_stop = ixgbe_close,
@@ -7013,6 +7117,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fdb_add = ixgbe_ndo_fdb_add,
.ndo_fdb_del = ixgbe_ndo_fdb_del,
.ndo_fdb_dump = ixgbe_ndo_fdb_dump,
+ .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
+ .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
};
/**
@@ -7042,6 +7148,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
break;
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
+ case IXGBE_SUBDEV_ID_82599_ECNA_DP:
is_wol_supported = 1;
break;
}
@@ -7079,8 +7186,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit ixgbe_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbe_adapter *adapter = NULL;
@@ -7340,7 +7446,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len);
- if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
+ if (!is_valid_ether_addr(netdev->perm_addr)) {
e_dev_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
@@ -7364,10 +7470,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_init(adapter);
-#endif /* CONFIG_IXGBE_PTP*/
-
/* save off EEPROM version number */
hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh);
hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl);
@@ -7420,11 +7522,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_register;
- /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
- if (hw->mac.ops.disable_tx_laser &&
- ((hw->phy.multispeed_fiber) ||
- ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
- (hw->mac.type == ixgbe_mac_82599EB))))
+ /* power down the optics for 82599 SFP+ fiber */
+ if (hw->mac.ops.disable_tx_laser)
hw->mac.ops.disable_tx_laser(hw);
/* carrier off reporting is important to ethtool even BEFORE open */
@@ -7493,7 +7592,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit ixgbe_remove(struct pci_dev *pdev)
+static void ixgbe_remove(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -7505,9 +7604,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
set_bit(__IXGBE_DOWN, &adapter->state);
cancel_work_sync(&adapter->service_task);
-#ifdef CONFIG_IXGBE_PTP
- ixgbe_ptp_stop(adapter);
-#endif
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7736,7 +7832,7 @@ static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
- .remove = __devexit_p(ixgbe_remove),
+ .remove = ixgbe_remove,
#ifdef CONFIG_PM
.suspend = ixgbe_suspend,
.resume = ixgbe_resume,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 310bdd961075..42dd65e6ac97 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -62,12 +62,39 @@
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+/* definitions to support mailbox API version negotiation */
+
+/*
+ * Each element denotes a version of the API; existing numbers may not
+ * change; any additions must go at the end
+ */
+enum ixgbe_pfvf_api_rev {
+ ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
+ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
+ /* This value should always be last */
+ ixgbe_mbox_api_unknown, /* indicates that API version is not known */
+};
+
+/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
-#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
-#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+
+/* mailbox API, version 1.0 VF requests */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
+#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index d9291316ee9f..1a751c9d09c4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -387,6 +387,15 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
struct ixgbe_hw *hw = &adapter->hw;
struct ptp_clock_event event;
+ event.type = PTP_CLOCK_PPS;
+
+ /* this check is necessary in case the interrupt was enabled via some
+ * alternative means (ex. debug_fs). Better to check here than
+ * everywhere that calls this function.
+ */
+ if (!adapter->ptp_clock)
+ return;
+
switch (hw->mac.type) {
case ixgbe_mac_X540:
ptp_clock_event(adapter->ptp_clock, &event);
@@ -411,7 +420,7 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
struct timespec ts;
- if ((adapter->flags2 & IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED) &&
+ if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
adapter->last_overflow_check = jiffies;
@@ -554,12 +563,14 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
adapter = q_vector->adapter;
hw = &adapter->hw;
+ if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ return;
+
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
/* Check if we have a valid timestamp and make sure the skb should
* have been timestamped */
- if (likely(!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID) ||
- !ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
+ if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
return;
/*
@@ -622,8 +633,7 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
- u32 tsync_rx_mtrl = 0;
- bool is_l4 = false;
+ u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
@@ -646,16 +656,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
+ tsync_rx_mtrl = 0;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
- is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- is_l4 = true;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -668,7 +677,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- is_l4 = true;
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
@@ -693,42 +701,15 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
/* Store filter value for later use */
adapter->rx_hwtstamp_filter = config.rx_filter;
- /* define ethertype filter for timestamped packets */
+ /* define ethertype filter for timestamping L2 packets */
if (is_l2)
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(3),
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
(IXGBE_ETQF_FILTER_EN | /* enable filter */
IXGBE_ETQF_1588 | /* enable timestamping */
ETH_P_1588)); /* 1588 eth protocol type */
else
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(3), 0);
-
-#define PTP_PORT 319
- /* L4 Queue Filter[3]: filter by destination port and protocol */
- if (is_l4) {
- u32 ftqf = (IXGBE_FTQF_PROTOCOL_UDP /* UDP */
- | IXGBE_FTQF_POOL_MASK_EN /* Pool not compared */
- | IXGBE_FTQF_QUEUE_ENABLE);
-
- ftqf |= ((IXGBE_FTQF_PROTOCOL_COMP_MASK /* protocol check */
- & IXGBE_FTQF_DEST_PORT_MASK /* dest check */
- & IXGBE_FTQF_SOURCE_PORT_MASK) /* source check */
- << IXGBE_FTQF_5TUPLE_MASK_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
- IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(3),
- (3 << IXGBE_IMIR_RX_QUEUE_SHIFT_82599 |
- IXGBE_IMIR_SIZE_BP_82599));
-
- /* enable port check */
- IXGBE_WRITE_REG(hw, IXGBE_SDPQF(3),
- (htons(PTP_PORT) |
- htons(PTP_PORT) << 16));
-
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), ftqf);
-
- tsync_rx_mtrl |= PTP_PORT << 16;
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_FTQF(3), 0);
- }
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
@@ -759,58 +740,20 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
* ixgbe_ptp_start_cyclecounter - create the cycle counter from hw
* @adapter: pointer to the adapter structure
*
- * this function initializes the timecounter and cyclecounter
- * structures for use in generated a ns counter from the arbitrary
- * fixed point cycles registers in the hardware.
- *
- * A change in link speed impacts the frequency of the DMA clock on
- * the device, which is used to generate the cycle counter
- * registers. Therefor this function is called whenever the link speed
- * changes.
- *
- * This function also turns on the SDP pin for clock out feature (X540
- * only), because this is where the shift is first calculated.
+ * This function should be called to set the proper values for the TIMINCA
+ * register and tell the cyclecounter structure what the tick rate of SYSTIME
+ * is. It does not directly modify SYSTIME registers or the timecounter
+ * structure. It should be called whenever a new TIMINCA value is necessary,
+ * such as during initialization or when the link speed changes.
*/
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 incval = 0;
- u32 timinca = 0;
u32 shift = 0;
- u32 cycle_speed;
unsigned long flags;
/**
- * Determine what speed we need to set the cyclecounter
- * for. It should be different for 100Mb, 1Gb, and 10Gb. Treat
- * unknown speeds as 10Gb. (Hence why we can't just copy the
- * link_speed.
- */
- switch (adapter->link_speed) {
- case IXGBE_LINK_SPEED_100_FULL:
- case IXGBE_LINK_SPEED_1GB_FULL:
- case IXGBE_LINK_SPEED_10GB_FULL:
- cycle_speed = adapter->link_speed;
- break;
- default:
- /* cycle speed should be 10Gb when there is no link */
- cycle_speed = IXGBE_LINK_SPEED_10GB_FULL;
- break;
- }
-
- /*
- * grab the current TIMINCA value from the register so that it can be
- * double checked. If the register value has been cleared, it must be
- * reset to the correct value for generating a cyclecounter. If
- * TIMINCA is zero, the SYSTIME registers do not increment at all.
- */
- timinca = IXGBE_READ_REG(hw, IXGBE_TIMINCA);
-
- /* Bail if the cycle speed didn't change and TIMINCA is non-zero */
- if (adapter->cycle_speed == cycle_speed && timinca)
- return;
-
- /**
* Scale the NIC cycle counter by a large factor so that
* relatively small corrections to the frequency can be added
* or subtracted. The drawbacks of a large factor include
@@ -819,8 +762,12 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* to nanoseconds using only a multiplier and a right-shift,
* and (c) the value must fit within the timinca register space
* => math based on internal DMA clock rate and available bits
+ *
+ * Note that when there is no link, internal DMA clock is same as when
+ * link speed is 10Gb. Set the registers correctly even when link is
+ * down to preserve the clock setting
*/
- switch (cycle_speed) {
+ switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
incval = IXGBE_INCVAL_100;
shift = IXGBE_INCVAL_SHIFT_100;
@@ -830,6 +777,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
shift = IXGBE_INCVAL_SHIFT_1GB;
break;
case IXGBE_LINK_SPEED_10GB_FULL:
+ default:
incval = IXGBE_INCVAL_10GB;
shift = IXGBE_INCVAL_SHIFT_10GB;
break;
@@ -857,18 +805,11 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
return;
}
- /* reset the system time registers */
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
- IXGBE_WRITE_FLUSH(hw);
-
- /* store the new cycle speed */
- adapter->cycle_speed = cycle_speed;
-
+ /* update the base incval used to calculate frequency adjustment */
ACCESS_ONCE(adapter->base_incval) = incval;
smp_mb();
- /* grab the ptp lock */
+ /* need lock to prevent incorrect read while modifying cyclecounter */
spin_lock_irqsave(&adapter->tmreg_lock, flags);
memset(&adapter->cc, 0, sizeof(adapter->cc));
@@ -877,6 +818,31 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
adapter->cc.shift = shift;
adapter->cc.mult = 1;
+ spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+}
+
+/**
+ * ixgbe_ptp_reset
+ * @adapter: the ixgbe private board structure
+ *
+ * When the MAC resets, all timesync features are reset. This function should be
+ * called to re-enable the PTP clock structure. It will re-init the timecounter
+ * structure based on the kernel time as well as setup the cycle counter data.
+ */
+void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ /* set SYSTIME registers to 0 just in case */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
+ IXGBE_WRITE_FLUSH(hw);
+
+ ixgbe_ptp_start_cyclecounter(adapter);
+
+ spin_lock_irqsave(&adapter->tmreg_lock, flags);
+
/* reset the ns time counter */
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
@@ -904,7 +870,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -918,7 +884,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
+ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -942,11 +908,6 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->tmreg_lock);
- ixgbe_ptp_start_cyclecounter(adapter);
-
- /* (Re)start the overflow check */
- adapter->flags2 |= IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED;
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -955,6 +916,11 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ ixgbe_ptp_reset(adapter);
+
+ /* set the flag that PTP has been enabled */
+ adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+
return;
}
@@ -967,7 +933,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
/* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED |
+ adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
IXGBE_FLAG2_PTP_PPS_ENABLED);
ixgbe_ptp_setup_sdp(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index dce48bf64d96..85cddac673ef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -117,6 +117,10 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
}
}
+ /* Initialize default switching mode VEB */
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB;
+
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
@@ -150,16 +154,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
IXGBE_FLAG2_RSC_ENABLED);
-#ifdef IXGBE_FCOE
- /*
- * When SR-IOV is enabled 82599 cannot support jumbo frames
- * so we must disable FCoE because we cannot support FCoE MTU.
- */
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
- adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED |
- IXGBE_FLAG_FCOE_CAPABLE);
-#endif
-
/* enable spoof checking for all VFs */
for (i = 0; i < adapter->num_vfs; i++)
adapter->vfinfo[i].spoofchk_enabled = true;
@@ -265,8 +259,11 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
- int entries, u16 *hash_list, u32 vf)
+ u32 *msgbuf, u32 vf)
{
+ int entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ u16 *hash_list = (u16 *)&msgbuf[1];
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
struct ixgbe_hw *hw = &adapter->hw;
int i;
@@ -353,31 +350,89 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- int new_mtu = msgbuf[1];
+ int max_frame = msgbuf[1];
u32 max_frs;
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- /* Only X540 supports jumbo frames in IOV mode */
- if (adapter->hw.mac.type != ixgbe_mac_X540)
- return;
+ /*
+ * For 82599EB we have to keep all PFs and VFs operating with
+ * the same max_frame value in order to avoid sending an oversize
+ * frame to a VF. In order to guarantee this is handled correctly
+ * for all cases we have several special exceptions to take into
+ * account before we can enable the VF for receive
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+ u32 reg_offset, vf_shift, vfre;
+ s32 err = 0;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_11:
+ /*
+ * Version 1.1 supports jumbo frames on VFs if PF has
+ * jumbo frames enabled which means legacy VFs are
+ * disabled
+ */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ break;
+ default:
+ /*
+ * If the PF or VF are running w/ jumbo frames enabled
+ * we need to shut down the VF Rx path as we cannot
+ * support jumbo frames on legacy VFs
+ */
+ if ((pf_max_frame > ETH_FRAME_LEN) ||
+ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)))
+ err = -EINVAL;
+ break;
+ }
+
+ /* determine VF receive enable location */
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable or disable receive depending on error */
+ vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ if (err)
+ vfre &= ~(1 << vf_shift);
+ else
+ vfre |= 1 << vf_shift;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
+
+ if (err) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return err;
+ }
+ }
/* MTU < 68 is an error and causes problems on some kernels */
- if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) {
- e_err(drv, "VF mtu %d out of range\n", new_mtu);
- return;
+ if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) {
+ e_err(drv, "VF max_frame %d out of range\n", max_frame);
+ return -EINVAL;
}
- max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
- IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
- if (max_frs < new_mtu) {
- max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
+ /* pull current max frame size from hardware */
+ max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+ max_frs &= IXGBE_MHADD_MFS_MASK;
+ max_frs >>= IXGBE_MHADD_MFS_SHIFT;
+
+ if (max_frs < max_frame) {
+ max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
}
- e_info(hw, "VF requests change max MTU to %d\n", new_mtu);
+ e_info(hw, "VF requests change max MTU to %d\n", max_frame);
+
+ return 0;
}
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
@@ -392,35 +447,47 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
}
-static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
+static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
+ u16 vid, u16 qos, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT;
- if (vid)
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
- (vid | IXGBE_VMVIR_VLANA_DEFAULT));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir);
}
+static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
+}
static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
+ u8 num_tcs = netdev_get_num_tc(adapter->netdev);
+
+ /* add PF assigned VLAN or VLAN 0 */
+ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf);
/* reset offloads to defaults */
- if (adapter->vfinfo[vf].pf_vlan) {
- ixgbe_set_vf_vlan(adapter, true,
- adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter,
- (adapter->vfinfo[vf].pf_vlan |
- (adapter->vfinfo[vf].pf_qos <<
- VLAN_PRIO_SHIFT)), vf);
- ixgbe_set_vmolr(hw, vf, false);
+ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan);
+
+ /* set outgoing tags for VFs */
+ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) {
+ ixgbe_clear_vmvir(adapter, vf);
} else {
- ixgbe_set_vf_vlan(adapter, true, 0, vf);
- ixgbe_set_vmvir(adapter, 0, vf);
- ixgbe_set_vmolr(hw, vf, true);
+ if (vfinfo->pf_qos || !num_tcs)
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ vfinfo->pf_qos, vf);
+ else
+ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan,
+ adapter->default_up, vf);
+
+ if (vfinfo->spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
}
/* reset multicast table array for vf */
@@ -430,6 +497,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_set_rx_mode(adapter->netdev);
hw->mac.ops.clear_rar(hw, rar_entry);
+
+ /* reset VF api back to unknown */
+ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
}
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
@@ -521,30 +591,221 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
return 0;
}
-static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg;
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u32 reg, msgbuf[4];
u32 reg_offset, vf_shift;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ e_info(probe, "VF Reset msg received from vf %d\n", vf);
+
+ /* reset the filters for the device */
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* set vf mac address */
+ ixgbe_set_vf_mac(adapter, vf, vf_mac);
vf_shift = vf % 32;
reg_offset = vf / 32;
- /* enable transmit and receive for vf */
+ /* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+ /* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= (reg | (1 << vf_shift));
+ reg |= 1 << vf_shift;
+ /*
+ * The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
+ * For more info take a look at ixgbe_set_vf_lpe
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+ struct net_device *dev = adapter->netdev;
+ int pf_max_frame = dev->mtu + ETH_HLEN;
+
+#ifdef CONFIG_FCOE
+ if (dev->features & NETIF_F_FCOE_MTU)
+ pf_max_frame = max_t(int, pf_max_frame,
+ IXGBE_FCOE_JUMBO_FRAME_SIZE);
+
+#endif /* CONFIG_FCOE */
+ if (pf_max_frame > ETH_FRAME_LEN)
+ reg &= ~(1 << vf_shift);
+ }
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+ /* enable VF mailbox for further messages */
+ adapter->vfinfo[vf].clear_to_send = true;
+
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
reg |= (1 << vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
- ixgbe_vf_reset_event(adapter, vf);
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, ETH_ALEN);
+
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ if (adapter->vfinfo[vf].pf_set_mac &&
+ memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac,
+ ETH_ALEN)) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set MAC address\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
+}
+
+static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
+ int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ int err;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+
+ if (adapter->vfinfo[vf].pf_vlan || tcs) {
+ e_warn(drv,
+ "VF %d attempted to override administratively set VLAN configuration\n"
+ "Reload the VF driver to resume operations\n",
+ vf);
+ return -1;
+ }
+
+ if (add)
+ adapter->vfinfo[vf].vlan_count++;
+ else if (adapter->vfinfo[vf].vlan_count)
+ adapter->vfinfo[vf].vlan_count--;
+
+ err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ if (!err && adapter->vfinfo[vf].spoofchk_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+
+ return err;
+}
+
+static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ int err;
+
+ if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
+ e_warn(drv,
+ "VF %d requested MACVLAN filter but is administratively denied\n",
+ vf);
+ return -1;
+ }
+
+ /* An non-zero index indicates the VF is setting a filter */
+ if (index) {
+ if (!is_valid_ether_addr(new_mac)) {
+ e_warn(drv, "VF %d attempted to set invalid mac\n", vf);
+ return -1;
+ }
+
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives.
+ */
+ if (adapter->vfinfo[vf].spoofchk_enabled)
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ }
+
+ err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
+ if (err == -ENOSPC)
+ e_warn(drv,
+ "VF %d has requested a MACVLAN filter but there is no space for it\n",
+ vf);
+
+ return err < 0;
+}
+
+static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ int api = msgbuf[1];
+
+ switch (api) {
+ case ixgbe_mbox_api_10:
+ case ixgbe_mbox_api_11:
+ adapter->vfinfo[vf].vf_api = api;
+ return 0;
+ default:
+ break;
+ }
+
+ e_info(drv, "VF %d requested invalid api version %u\n", vf, api);
+
+ return -1;
+}
+
+static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter,
+ u32 *msgbuf, u32 vf)
+{
+ struct net_device *dev = adapter->netdev;
+ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
+ unsigned int default_tc = 0;
+ u8 num_tcs = netdev_get_num_tc(dev);
+
+ /* verify the PF is supporting the correct APIs */
+ switch (adapter->vfinfo[vf].vf_api) {
+ case ixgbe_mbox_api_20:
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return -1;
+ }
+
+ /* only allow 1 Tx queue for bandwidth limiting */
+ msgbuf[IXGBE_VF_TX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+ msgbuf[IXGBE_VF_RX_QUEUES] = __ALIGN_MASK(1, ~vmdq->mask);
+
+ /* if TCs > 1 determine which TC belongs to default user priority */
+ if (num_tcs > 1)
+ default_tc = netdev_get_prio_tc_map(dev, adapter->default_up);
+
+ /* notify VF of need for VLAN tag stripping, and correct queue */
+ if (num_tcs)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = num_tcs;
+ else if (adapter->vfinfo[vf].pf_vlan || adapter->vfinfo[vf].pf_qos)
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 1;
+ else
+ msgbuf[IXGBE_VF_TRANS_VLAN] = 0;
+
+ /* notify VF of default queue */
+ msgbuf[IXGBE_VF_DEF_QUEUE] = default_tc;
+
+ return 0;
}
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
@@ -553,10 +814,6 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
- int entries;
- u16 *hash_list;
- int add, vid, index;
- u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -572,39 +829,13 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* flush the ack before we write any messages back */
IXGBE_WRITE_FLUSH(hw);
+ if (msgbuf[0] == IXGBE_VF_RESET)
+ return ixgbe_vf_reset_msg(adapter, vf);
+
/*
* until the vf completes a virtual function reset it should not be
* allowed to start any configuration.
*/
-
- if (msgbuf[0] == IXGBE_VF_RESET) {
- unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- new_mac = (u8 *)(&msgbuf[1]);
- e_info(probe, "VF Reset msg received from vf %d\n", vf);
- adapter->vfinfo[vf].clear_to_send = false;
- ixgbe_vf_reset_msg(adapter, vf);
- adapter->vfinfo[vf].clear_to_send = true;
-
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac)
- ixgbe_set_vf_mac(adapter, vf, vf_mac);
- else
- ixgbe_set_vf_mac(adapter,
- vf, adapter->vfinfo[vf].vf_mac_addresses);
-
- /* reply to reset with ack and vf mac address */
- msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, ETH_ALEN);
- /*
- * Piggyback the multicast filter type so VF can compute the
- * correct vectors
- */
- msgbuf[3] = hw->mac.mc_filter_type;
- ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
-
- return retval;
- }
-
if (!adapter->vfinfo[vf].clear_to_send) {
msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
ixgbe_write_mbx(hw, msgbuf, 1, vf);
@@ -613,70 +844,25 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
switch ((msgbuf[0] & 0xFFFF)) {
case IXGBE_VF_SET_MAC_ADDR:
- new_mac = ((u8 *)(&msgbuf[1]));
- if (is_valid_ether_addr(new_mac) &&
- !adapter->vfinfo[vf].pf_set_mac) {
- ixgbe_set_vf_mac(adapter, vf, new_mac);
- } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
- new_mac, ETH_ALEN)) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set MAC address\nReload "
- "the VF driver to resume operations\n", vf);
- retval = -1;
- }
+ retval = ixgbe_set_vf_mac_addr(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MULTICAST:
- entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- hash_list = (u16 *)&msgbuf[1];
- retval = ixgbe_set_vf_multicasts(adapter, entries,
- hash_list, vf);
- break;
- case IXGBE_VF_SET_LPE:
- ixgbe_set_vf_lpe(adapter, msgbuf);
+ retval = ixgbe_set_vf_multicasts(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_VLAN:
- add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
- >> IXGBE_VT_MSGINFO_SHIFT;
- vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
- if (adapter->vfinfo[vf].pf_vlan) {
- e_warn(drv, "VF %d attempted to override "
- "administratively set VLAN configuration\n"
- "Reload the VF driver to resume operations\n",
- vf);
- retval = -1;
- } else {
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
- retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (!retval && adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- }
+ retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf);
break;
case IXGBE_VF_SET_MACVLAN:
- index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
- IXGBE_VT_MSGINFO_SHIFT;
- if (adapter->vfinfo[vf].pf_set_mac && index > 0) {
- e_warn(drv, "VF %d requested MACVLAN filter but is "
- "administratively denied\n", vf);
- retval = -1;
- break;
- }
- /*
- * If the VF is allowed to set MAC filters then turn off
- * anti-spoofing to avoid false positives. An index
- * greater than 0 will indicate the VF is setting a
- * macvlan MAC filter.
- */
- if (index > 0 && adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
- retval = ixgbe_set_vf_macvlan(adapter, vf, index,
- (unsigned char *)(&msgbuf[1]));
- if (retval == -ENOSPC)
- e_warn(drv, "VF %d has requested a MACVLAN filter "
- "but there is no space for it\n", vf);
+ retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_API_NEGOTIATE:
+ retval = ixgbe_negotiate_vf_api(adapter, msgbuf, vf);
+ break;
+ case IXGBE_VF_GET_QUEUES:
+ retval = ixgbe_get_vf_queues(adapter, msgbuf, vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -692,7 +878,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
- ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ ixgbe_write_mbx(hw, msgbuf, mbx_size, vf);
return retval;
}
@@ -783,7 +969,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
- ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
if (adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
@@ -803,7 +989,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
} else {
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan, vf);
- ixgbe_set_vmvir(adapter, vlan, vf);
+ ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
if (adapter->vfinfo[vf].vlan_count)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0722f3368092..9cd8a13711d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -56,6 +56,7 @@
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
+#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -1833,15 +1834,6 @@ enum {
/* Number of 100 microseconds we wait for PCI Express master disable */
#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
-/* Check whether address is multicast. This is little-endian specific check.*/
-#define IXGBE_IS_MULTICAST(Address) \
- (bool)(((u8 *)(Address))[0] & ((u8)0x01))
-
-/* Check whether an address is broadcast. */
-#define IXGBE_IS_BROADCAST(Address) \
- ((((u8 *)(Address))[0] == ((u8)0xff)) && \
- (((u8 *)(Address))[1] == ((u8)0xff)))
-
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
#define IXGBE_RAH_VIND_SHIFT 18
@@ -1962,6 +1954,8 @@ enum {
#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
#define IXGBE_MRQC_L3L4TXSWEN 0x00008000
+#define IXGBE_FWSM_TS_ENABLED 0x1
+
/* Queue Drop Enable */
#define IXGBE_QDE_ENABLE 0x00000001
#define IXGBE_QDE_IDX_MASK 0x00007F00
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index de4da5219b71..c73b92993391 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -152,7 +152,7 @@ mac_reset_top:
hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
/* Add the SAN MAC address to the RAR only if it's a valid address */
- if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+ if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
hw->mac.san_addr, 0, IXGBE_RAH_AV);
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index da17ccf5c09d..3147795bd135 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,8 +33,11 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_VF_IRQ_CLEAR_MASK 7
-#define IXGBE_VF_MAX_TX_QUEUES 1
-#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_VF_MAX_TX_QUEUES 8
+#define IXGBE_VF_MAX_RX_QUEUES 8
+
+/* DCB define */
+#define IXGBE_VF_MAX_TRAFFIC_CLASS 8
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 4a9c9c285685..fc0af9a3bb35 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -58,7 +58,6 @@ struct ixgbevf_ring {
struct ixgbevf_ring *next;
struct net_device *netdev;
struct device *dev;
- struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@@ -75,6 +74,8 @@ struct ixgbevf_ring {
u64 total_bytes;
u64 total_packets;
struct u64_stats_sync syncp;
+ u64 hw_csum_rx_error;
+ u64 hw_csum_rx_good;
u16 head;
u16 tail;
@@ -89,8 +90,8 @@ struct ixgbevf_ring {
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
-#define MAX_RX_QUEUES 1
-#define MAX_TX_QUEUES 1
+#define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
+#define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
#define IXGBEVF_DEFAULT_TXD 1024
#define IXGBEVF_DEFAULT_RXD 512
@@ -101,10 +102,10 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
-#define IXGBEVF_RXBUFFER_3K 3072
-#define IXGBEVF_RXBUFFER_7K 7168
-#define IXGBEVF_RXBUFFER_15K 15360
-#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+#define IXGBEVF_RXBUFFER_2K 2048
+#define IXGBEVF_RXBUFFER_4K 4096
+#define IXGBEVF_RXBUFFER_8K 8192
+#define IXGBEVF_RXBUFFER_10K 10240
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
@@ -229,6 +230,7 @@ struct ixgbevf_adapter {
*/
u32 flags;
#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
/* OS defined structs */
struct net_device *netdev;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de1ad506665d..257357ae66c3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -58,7 +58,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.6.0-k"
+#define DRV_VERSION "2.7.12-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -99,6 +99,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
struct ixgbevf_ring *rx_ring,
@@ -120,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue
- *
*/
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector)
@@ -287,17 +287,19 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
if (is_vlan && test_bit(tag & VLAN_VID_MASK, adapter->active_vlans))
__vlan_hwaccel_put_tag(skb, tag);
- napi_gro_receive(&q_vector->napi, skb);
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
+ napi_gro_receive(&q_vector->napi, skb);
+ else
+ netif_rx(skb);
}
/**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
- * @adapter: address of board private structure
+ * @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified
**/
-static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
- struct ixgbevf_ring *ring,
+static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -309,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
/* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
@@ -317,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) {
- adapter->hw_csum_rx_error++;
+ ring->hw_csum_rx_error++;
return;
}
/* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- adapter->hw_csum_rx_good++;
+ ring->hw_csum_rx_good++;
}
/**
@@ -337,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi;
- struct sk_buff *skb;
unsigned int i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
- skb = bi->skb;
- if (!skb) {
+
+ if (!bi->skb) {
+ struct sk_buff *skb;
+
skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len);
if (!skb) {
@@ -353,11 +356,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
goto no_buffers;
}
bi->skb = skb;
- }
- if (!bi->dma) {
+
bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ dev_kfree_skb(skb);
+ bi->skb = NULL;
+ dev_err(&pdev->dev, "RX DMA map failed\n");
+ break;
+ }
}
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
@@ -370,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
-
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
}
}
@@ -454,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc;
}
- ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb);
+ ixgbevf_rx_checksum(rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -471,6 +478,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* Workaround hardware that can't do proper VEPA multicast
+ * source pruning.
+ */
+ if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
+ !(compare_ether_addr(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source))) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
ixgbevf_receive_skb(q_vector, skb, staterr, rx_desc);
next_desc:
@@ -533,9 +550,11 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
else
per_ring_budget = budget;
+ adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbevf_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
+ adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
@@ -743,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
return IRQ_HANDLED;
}
-
/**
* ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused
@@ -1065,20 +1083,20 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter)
max_frame += VLAN_HLEN;
/*
- * Make best use of allocation by using all but 1K of a
- * power of 2 allocation that will be used for skb->head.
+ * Allocate buffer sizes that fit well into 32K and
+ * take into account max frame size of 9.5K
*/
if ((hw->mac.type == ixgbe_mac_X540_vf) &&
(max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- else if (max_frame <= IXGBEVF_RXBUFFER_3K)
- rx_buf_len = IXGBEVF_RXBUFFER_3K;
- else if (max_frame <= IXGBEVF_RXBUFFER_7K)
- rx_buf_len = IXGBEVF_RXBUFFER_7K;
- else if (max_frame <= IXGBEVF_RXBUFFER_15K)
- rx_buf_len = IXGBEVF_RXBUFFER_15K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_2K)
+ rx_buf_len = IXGBEVF_RXBUFFER_2K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_4K)
+ rx_buf_len = IXGBEVF_RXBUFFER_4K;
+ else if (max_frame <= IXGBEVF_RXBUFFER_8K)
+ rx_buf_len = IXGBEVF_RXBUFFER_8K;
else
- rx_buf_len = IXGBEVF_MAX_RXBUFFER;
+ rx_buf_len = IXGBEVF_RXBUFFER_10K;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i].rx_buf_len = rx_buf_len;
@@ -1128,15 +1146,12 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err;
- if (!hw->mac.ops.set_vfta)
- return -EOPNOTSUPP;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* add VID to filter table */
err = hw->mac.ops.set_vfta(hw, vid, 0, true);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
/* translate error return types so error makes sense */
if (err == IXGBE_ERR_MBX)
@@ -1156,13 +1171,12 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw;
int err = -EOPNOTSUPP;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* remove VID from filter table */
- if (hw->mac.ops.set_vfta)
- err = hw->mac.ops.set_vfta(hw, vid, 0, false);
+ err = hw->mac.ops.set_vfta(hw, vid, 0, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(vid, adapter->active_vlans);
@@ -1206,27 +1220,27 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
}
/**
- * ixgbevf_set_rx_mode - Multicast set
+ * ixgbevf_set_rx_mode - Multicast and unicast set
* @netdev: network interface device structure
*
* The set_rx_method entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast mode.
+ * list, unicast address list or the network interface flags are updated.
+ * This routine is responsible for configuring the hardware for proper
+ * multicast mode and configuring requested unicast filters.
**/
static void ixgbevf_set_rx_mode(struct net_device *netdev)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
/* reprogram multicast list */
- if (hw->mac.ops.update_mc_addr_list)
- hw->mac.ops.update_mc_addr_list(hw, netdev);
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1290,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
"not set within the polling period\n", rxr);
}
- ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
+ adapter->rx_ring[rxr].count - 1);
}
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
@@ -1335,11 +1349,12 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_10,
+ int api[] = { ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
ixgbe_mbox_api_unknown };
int err = 0, idx = 0;
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
err = ixgbevf_negotiate_api_version(hw, api[idx]);
@@ -1348,7 +1363,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
idx++;
}
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
}
static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
@@ -1389,16 +1404,14 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
ixgbevf_configure_msix(adapter);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar) {
- if (is_valid_ether_addr(hw->mac.addr))
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- else
- hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- }
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
clear_bit(__IXGBEVF_DOWN, &adapter->state);
ixgbevf_napi_enable_all(adapter);
@@ -1413,12 +1426,87 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
mod_timer(&adapter->watchdog_timer, jiffies);
}
+static int ixgbevf_reset_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+
+ /* allocate resources on the ring */
+ err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter, &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ return err;
+ }
+ }
+
+ /* free the existing rings and queues */
+ ixgbevf_free_all_rx_resources(adapter);
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ /* reset ring to vector mapping */
+ ixgbevf_reset_q_vectors(adapter);
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ return 0;
+}
+
void ixgbevf_up(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
ixgbevf_negotiate_api(adapter);
+ ixgbevf_reset_queues(adapter);
+
ixgbevf_configure(adapter);
ixgbevf_up_complete(adapter);
@@ -1497,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
return;
/* Free all the Tx ring sk_buffs */
-
for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
@@ -1593,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
- /*
- * Check if PF is up before re-init. If not then skip until
- * later when the PF is up and ready to service requests from
- * the VF via mailbox. If the VF is up and running then the
- * watchdog task will continue to schedule reset tasks until
- * the PF is up and running.
- */
ixgbevf_down(adapter);
ixgbevf_up(adapter);
@@ -1611,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- spin_lock(&adapter->mbx_lock);
-
if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n");
else
hw->mac.ops.init_hw(hw);
- spin_unlock(&adapter->mbx_lock);
-
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len);
@@ -1628,10 +1704,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
- int vectors)
+static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
{
- int err, vector_threshold;
+ int err = 0;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -1647,21 +1724,18 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
- if (!err) /* Success in acquiring all requested vectors. */
+ if (!err || err < 0) /* Success or a nasty failure. */
break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
- if (vectors < vector_threshold) {
- /* Can't allocate enough MSI-X interrupts? Oh well.
- * This just means we'll go with either a single MSI
- * vector or fall back to legacy interrupts.
- */
- hw_dbg(&adapter->hw,
- "Unable to allocate MSI-X interrupts\n");
+ if (vectors < vector_threshold)
+ err = -ENOMEM;
+
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
@@ -1672,6 +1746,8 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
*/
adapter->num_msix_vectors = vectors;
}
+
+ return err;
}
/**
@@ -1717,6 +1793,7 @@ static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
adapter->tx_ring[i].count = adapter->tx_ring_count;
adapter->tx_ring[i].queue_index = i;
+ /* reg_idx may be remapped later by DCB config */
adapter->tx_ring[i].reg_idx = i;
adapter->tx_ring[i].dev = &adapter->pdev->dev;
adapter->tx_ring[i].netdev = adapter->netdev;
@@ -1774,7 +1851,9 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
- ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
+ if (err)
+ goto out;
err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
if (err)
@@ -1834,18 +1913,13 @@ err_out:
**/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors;
- int napi_vectors;
-
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- napi_vectors = adapter->num_rx_queues;
+ int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
adapter->q_vector[q_idx] = NULL;
- if (q_idx < napi_vectors)
- netif_napi_del(&q_vector->napi);
+ netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
}
@@ -1935,7 +2009,7 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
-static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
@@ -1950,8 +2024,11 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
hw->subsystem_device_id = pdev->subsystem_device;
hw->mbx.ops.init_params(hw);
- hw->mac.max_tx_queues = MAX_TX_QUEUES;
- hw->mac.max_rx_queues = MAX_RX_QUEUES;
+
+ /* assume legacy case in which PF would only give VF 2 queues */
+ hw->mac.max_tx_queues = 2;
+ hw->mac.max_rx_queues = 2;
+
err = hw->mac.ops.reset_hw(hw);
if (err) {
dev_info(&pdev->dev,
@@ -1966,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
goto out;
}
memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
- adapter->netdev->addr_len);
+ adapter->netdev->addr_len);
}
/* lock to protect mailbox accesses */
@@ -2016,6 +2093,7 @@ out:
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc);
@@ -2029,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfgotc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
adapter->stats.vfmprc);
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->hw_csum_rx_error +=
+ adapter->rx_ring[i].hw_csum_rx_error;
+ adapter->hw_csum_rx_good +=
+ adapter->rx_ring[i].hw_csum_rx_good;
+ adapter->rx_ring[i].hw_csum_rx_error = 0;
+ adapter->rx_ring[i].hw_csum_rx_good = 0;
+ }
}
/**
@@ -2103,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up;
+ s32 need_reset;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
@@ -2110,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* Always check the link on the watchdog because we have
* no LSC interrupt
*/
- if (hw->mac.ops.check_link) {
- s32 need_reset;
-
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- need_reset = hw->mac.ops.check_link(hw, &link_speed,
- &link_up, false);
+ need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
- if (need_reset) {
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- schedule_work(&adapter->reset_task);
- goto pf_has_reset;
- }
- } else {
- /* always assume link is up, if no check link
- * function */
- link_speed = IXGBE_LINK_SPEED_10GB_FULL;
- link_up = true;
+ if (need_reset) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
}
adapter->link_up = link_up;
adapter->link_speed = link_speed;
@@ -2377,6 +2455,63 @@ static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
&adapter->rx_ring[i]);
}
+static int ixgbevf_setup_queues(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ unsigned int def_q = 0;
+ unsigned int num_tcs = 0;
+ unsigned int num_rx_queues = 1;
+ int err, i;
+
+ spin_lock_bh(&adapter->mbx_lock);
+
+ /* fetch queue configuration from the PF */
+ err = ixgbevf_get_queues(hw, &num_tcs, &def_q);
+
+ spin_unlock_bh(&adapter->mbx_lock);
+
+ if (err)
+ return err;
+
+ if (num_tcs > 1) {
+ /* update default Tx ring register index */
+ adapter->tx_ring[0].reg_idx = def_q;
+
+ /* we need as many queues as traffic classes */
+ num_rx_queues = num_tcs;
+ }
+
+ /* nothing to do if we have the correct number of queues */
+ if (adapter->num_rx_queues == num_rx_queues)
+ return 0;
+
+ /* allocate new rings */
+ rx_ring = kcalloc(num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return -ENOMEM;
+
+ /* setup ring fields */
+ for (i = 0; i < num_rx_queues; i++) {
+ rx_ring[i].count = adapter->rx_ring_count;
+ rx_ring[i].queue_index = i;
+ rx_ring[i].reg_idx = i;
+ rx_ring[i].dev = &adapter->pdev->dev;
+ rx_ring[i].netdev = adapter->netdev;
+ }
+
+ /* free the existing ring and queues */
+ adapter->num_rx_queues = 0;
+ kfree(adapter->rx_ring);
+
+ /* move new rings into position on the adapter struct */
+ adapter->rx_ring = rx_ring;
+ adapter->num_rx_queues = num_rx_queues;
+
+ return 0;
+}
+
/**
* ixgbevf_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -2413,6 +2548,11 @@ static int ixgbevf_open(struct net_device *netdev)
ixgbevf_negotiate_api(adapter);
+ /* setup queue reg_idx and Rx queue count */
+ err = ixgbevf_setup_queues(adapter);
+ if (err)
+ goto err_setup_queues;
+
/* allocate transmit descriptors */
err = ixgbevf_setup_all_tx_resources(adapter);
if (err)
@@ -2451,6 +2591,7 @@ err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
err_setup_tx:
ixgbevf_free_all_tx_resources(adapter);
+err_setup_queues:
ixgbevf_reset(adapter);
err_setup_reset:
@@ -2562,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags)
{
-
-
-
u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
@@ -2678,10 +2816,10 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
tx_buffer_info->dma =
skb_frag_dma_map(tx_ring->dev, frag,
offset, size, DMA_TO_DEVICE);
- tx_buffer_info->mapped_as_page = true;
if (dma_mapping_error(tx_ring->dev,
tx_buffer_info->dma))
goto dma_error;
+ tx_buffer_info->mapped_as_page = true;
tx_buffer_info->next_to_watch = i;
len -= size;
@@ -2754,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
-
}
/*
@@ -2823,6 +2960,11 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
unsigned short f;
#endif
+ u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
+ if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
tx_ring = &adapter->tx_ring[r_idx];
@@ -2902,12 +3044,11 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- spin_lock(&adapter->mbx_lock);
+ spin_lock_bh(&adapter->mbx_lock);
- if (hw->mac.ops.set_rar)
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
- spin_unlock(&adapter->mbx_lock);
+ spin_unlock_bh(&adapter->mbx_lock);
return 0;
}
@@ -2925,8 +3066,15 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
- if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ switch (adapter->hw.api_version) {
+ case ixgbe_mbox_api_11:
max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ default:
+ if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
+ max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
+ break;
+ }
/* MTU < 68 is an error and causes problems on some kernels */
if ((new_mtu < 68) || (max_frame > max_possible_frame))
@@ -3094,8 +3242,7 @@ static void ixgbevf_assign_netdev_ops(struct net_device *dev)
* The OS initialization, configuring of the adapter private structure,
* and a hardware reset occur.
**/
-static int __devinit ixgbevf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
struct ixgbevf_adapter *adapter = NULL;
@@ -3223,10 +3370,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- /* pick up the PCI bus settings for reporting later */
- if (hw->mac.ops.get_bus_info)
- hw->mac.ops.get_bus_info(hw);
-
strcpy(netdev->name, "eth%d");
err = register_netdev(netdev);
@@ -3270,7 +3413,7 @@ err_dma:
* Hot-Plug event, or because the driver is going to be removed from
* memory.
**/
-static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+static void ixgbevf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3384,7 +3527,7 @@ static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
.id_table = ixgbevf_pci_tbl,
.probe = ixgbevf_probe,
- .remove = __devexit_p(ixgbevf_remove),
+ .remove = ixgbevf_remove,
#ifdef CONFIG_PM
/* Power Management Hooks */
.suspend = ixgbevf_suspend,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index 946ce86f337f..0bc30058ff82 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -85,6 +85,7 @@
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
+ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -100,6 +101,15 @@ enum ixgbe_pfvf_api_rev {
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
+/* mailbox API, version 1.1 VF requests */
+#define IXGBE_VF_GET_QUEUE 0x09 /* get queue configuration */
+
+/* GET_QUEUES return data indices within the mailbox */
+#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
+#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
+#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */
+#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
+
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
/* word in permanent address message with the current multicast type */
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c7447e6fcc8..0c94557b53df 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -331,6 +331,9 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
netdev_for_each_mc_addr(ha, netdev) {
if (i == cnt)
break;
+ if (is_link_local_ether_addr(ha->addr))
+ continue;
+
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
@@ -513,6 +516,64 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ u32 msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUE;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+ err = hw->mbx.ops.write_posted(hw, msg, 5);
+
+ if (!err)
+ err = hw->mbx.ops.read_posted(hw, msg, 5);
+
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we we didn't get an ACK there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
+
static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 47f11a584d8c..7b1f502d1716 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -174,5 +174,7 @@ struct ixgbevf_info {
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */