aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c553
1 files changed, 402 insertions, 151 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index aed8d029b23d..c4003a88bbf6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -65,9 +65,6 @@
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
#include "ixgbe_sriov.h"
-#ifdef CONFIG_IXGBE_VXLAN
-#include <net/vxlan.h>
-#endif
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
@@ -175,6 +172,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static struct workqueue_struct *ixgbe_wq;
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
@@ -316,7 +315,7 @@ static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
!test_bit(__IXGBE_REMOVING, &adapter->state) &&
!test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
- schedule_work(&adapter->service_task);
+ queue_work(ixgbe_wq, &adapter->service_task);
}
static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
@@ -1484,7 +1483,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring,
return;
if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) {
- ring->rx_stats.csum_err++;
+ skb->ip_summed = CHECKSUM_NONE;
return;
}
/* If we checked the outer header let the stack know */
@@ -1635,6 +1634,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
struct sk_buff *skb)
{
struct net_device *dev = rx_ring->netdev;
+ u32 flags = rx_ring->q_vector->adapter->flags;
ixgbe_update_rsc_stats(rx_ring, skb);
@@ -1642,8 +1642,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
- if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
+ if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED))
+ ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
@@ -1659,6 +1659,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb)
{
+ skb_mark_napi_id(skb, &q_vector->napi);
if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb);
else
@@ -2123,7 +2124,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
#endif /* IXGBE_FCOE */
- skb_mark_napi_id(skb, &q_vector->napi);
ixgbe_rx_skb(q_vector, skb);
/* update budget accounting */
@@ -2741,7 +2741,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
- ixgbe_ptp_check_pps_event(adapter, eicr);
+ ixgbe_ptp_check_pps_event(adapter);
/* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state))
@@ -2757,7 +2757,7 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
/* EIAM disabled interrupts (on this vector) for us */
if (q_vector->rx.ring || q_vector->tx.ring)
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
return IRQ_HANDLED;
}
@@ -2786,7 +2786,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
- if (!ixgbe_qv_lock_napi(q_vector))
+ /* Exit if we are called by netpoll or busy polling is active */
+ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
return budget;
/* attempt to distribute budget to each queue fairly, but don't allow
@@ -2947,10 +2948,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (unlikely(eicr & IXGBE_EICR_TIMESYNC))
- ixgbe_ptp_check_pps_event(adapter, eicr);
+ ixgbe_ptp_check_pps_event(adapter);
/* would disable interrupts here but EIAM disabled it */
- napi_schedule(&q_vector->napi);
+ napi_schedule_irqoff(&q_vector->napi);
/*
* re-enable link(maybe) and non-queue interrupts, no flush.
@@ -3315,8 +3316,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
}
/**
- * Return a number of entries in the RSS indirection table
- *
+ * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries
* @adapter: device handle
*
* - 82598/82599/X540: 128
@@ -3334,8 +3334,7 @@ u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter)
}
/**
- * Write the RETA table to HW
- *
+ * ixgbe_store_reta - Write the RETA table to HW
* @adapter: device handle
*
* Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
@@ -3374,8 +3373,7 @@ void ixgbe_store_reta(struct ixgbe_adapter *adapter)
}
/**
- * Write the RETA table to HW (for x550 devices in SRIOV mode)
- *
+ * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode)
* @adapter: device handle
*
* Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW.
@@ -3621,6 +3619,9 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
+ /* Force flushing of IXGBE_RDLEN to prevent MDD */
+ IXGBE_WRITE_FLUSH(hw);
+
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx);
@@ -3704,6 +3705,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
/* Map PF MAC address in RAR Entry 0 to first pool following VFs */
hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0));
+ /* clear VLAN promisc flag so VFTA will be updated if necessary */
+ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
/*
* Set up VF register offsets for selected VT Mode,
* i.e. 32 or 64 VFs for SR-IOV
@@ -3901,12 +3905,56 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
set_bit(vid, adapter->active_vlans);
return 0;
}
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 vlvf;
+ int idx;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (idx = IXGBE_VLVF_ENTRIES; --idx;) {
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx));
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ return idx;
+}
+
+void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bits, word;
+ int idx;
+
+ idx = ixgbe_find_vlvf_entry(hw, vid);
+ if (!idx)
+ return;
+
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ word = idx * 2 + (VMDQ_P(0) / 32);
+ bits = ~(1 << (VMDQ_P(0)) % 32);
+ bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+
+ /* Disable the filter so this falls into the default pool. */
+ if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) {
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0);
+ }
+}
+
static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid)
{
@@ -3914,7 +3962,11 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false);
+ if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+ ixgbe_update_pf_promisc_vlvf(adapter, vid);
+ else
+ hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
+
clear_bit(vid, adapter->active_vlans);
return 0;
@@ -3992,6 +4044,129 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
}
}
+static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl, i;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ default:
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+ break;
+ /* fall through */
+ case ixgbe_mac_82598EB:
+ /* legacy case, we can just disable VLAN filtering */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ return;
+ }
+
+ /* We are already in VLAN promisc, nothing to do */
+ if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
+ return;
+
+ /* Set flag so we don't redo unnecessary work */
+ adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+
+ /* Add PF to all active pools */
+ for (i = IXGBE_VLVF_ENTRIES; --i;) {
+ u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
+ u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
+
+ vlvfb |= 1 << (VMDQ_P(0) % 32);
+ IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
+ }
+
+ /* Set all bits in the VLAN filter table array */
+ for (i = hw->mac.vft_size; i--;)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U);
+}
+
+#define VFTA_BLOCK_SIZE 8
+static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
+ u32 vid_start = vfta_offset * 32;
+ u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
+ u32 i, vid, word, bits;
+
+ for (i = IXGBE_VLVF_ENTRIES; --i;) {
+ u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
+
+ /* pull VLAN ID from VLVF */
+ vid = vlvf & VLAN_VID_MASK;
+
+ /* only concern outselves with a certain range */
+ if (vid < vid_start || vid >= vid_end)
+ continue;
+
+ if (vlvf) {
+ /* record VLAN ID in VFTA */
+ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+
+ /* if PF is part of this then continue */
+ if (test_bit(vid, adapter->active_vlans))
+ continue;
+ }
+
+ /* remove PF from the pool */
+ word = i * 2 + VMDQ_P(0) / 32;
+ bits = ~(1 << (VMDQ_P(0) % 32));
+ bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
+ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
+ }
+
+ /* extract values from active_vlans and write back to VFTA */
+ for (i = VFTA_BLOCK_SIZE; i--;) {
+ vid = (vfta_offset + i) * 32;
+ word = vid / BITS_PER_LONG;
+ bits = vid % BITS_PER_LONG;
+
+ vfta[i] |= adapter->active_vlans[word] >> bits;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]);
+ }
+}
+
+static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vlnctrl, i;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ default:
+ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
+ break;
+ /* fall through */
+ case ixgbe_mac_82598EB:
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+ return;
+ }
+
+ /* We are not in VLAN promisc, nothing to do */
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ return;
+
+ /* Set flag so we don't redo unnecessary work */
+ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC;
+
+ for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE)
+ ixgbe_scrub_vfta(adapter, i);
+}
+
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
u16 vid;
@@ -4034,124 +4209,156 @@ static int ixgbe_write_mc_addr_list(struct net_device *netdev)
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
- hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
- adapter->mac_table[i].queue,
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
+
+ if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ mac_table->addr,
+ mac_table->pool,
IXGBE_RAH_AV);
else
hw->mac.ops.clear_rar(hw, i);
-
- adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
}
}
-#endif
+#endif
static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
- if (adapter->mac_table[i].state &
- IXGBE_MAC_STATE_IN_USE)
- hw->mac.ops.set_rar(hw, i,
- adapter->mac_table[i].addr,
- adapter->mac_table[i].queue,
- IXGBE_RAH_AV);
- else
- hw->mac.ops.clear_rar(hw, i);
- adapter->mac_table[i].state &=
- ~(IXGBE_MAC_STATE_MODIFIED);
- }
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED))
+ continue;
+
+ mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED;
+
+ if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ mac_table->addr,
+ mac_table->pool,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
}
}
static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
{
- int i;
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- eth_zero_addr(adapter->mac_table[i].addr);
- adapter->mac_table[i].queue = 0;
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
+ mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
}
+
ixgbe_sync_mac_table(adapter);
}
-static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool)
{
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i, count = 0;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state == 0)
- count++;
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ /* do not count default RAR as available */
+ if (mac_table->state & IXGBE_MAC_STATE_DEFAULT)
+ continue;
+
+ /* only count unused and addresses that belong to us */
+ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) {
+ if (mac_table->pool != pool)
+ continue;
+ }
+
+ count++;
}
+
return count;
}
/* this function destroys the first RAR entry */
-static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
- u8 *addr)
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter)
{
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
- adapter->mac_table[0].queue = VMDQ_P(0);
- adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
- IXGBE_MAC_STATE_IN_USE);
- hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
- adapter->mac_table[0].queue,
+ memcpy(&mac_table->addr, hw->mac.addr, ETH_ALEN);
+ mac_table->pool = VMDQ_P(0);
+
+ mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE;
+
+ hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool,
IXGBE_RAH_AV);
}
-int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ const u8 *addr, u16 pool)
{
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ if (mac_table->state & IXGBE_MAC_STATE_IN_USE)
continue;
- adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
- IXGBE_MAC_STATE_IN_USE);
- ether_addr_copy(adapter->mac_table[i].addr, addr);
- adapter->mac_table[i].queue = queue;
+
+ ether_addr_copy(mac_table->addr, addr);
+ mac_table->pool = pool;
+
+ mac_table->state |= IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE;
+
ixgbe_sync_mac_table(adapter);
+
return i;
}
+
return -ENOMEM;
}
-int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ const u8 *addr, u16 pool)
{
- /* search table for addr, if found, set to 0 and sync */
- int i;
+ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0];
struct ixgbe_hw *hw = &adapter->hw;
+ int i;
if (is_zero_ether_addr(addr))
return -EINVAL;
- for (i = 0; i < hw->mac.num_rar_entries; i++) {
- if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
- adapter->mac_table[i].queue == queue) {
- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
- eth_zero_addr(adapter->mac_table[i].addr);
- adapter->mac_table[i].queue = 0;
- ixgbe_sync_mac_table(adapter);
- return 0;
- }
+ /* search table for addr, if found clear IN_USE flag and sync */
+ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) {
+ /* we can only delete an entry if it is in use */
+ if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE))
+ continue;
+ /* we only care about entries that belong to the given pool */
+ if (mac_table->pool != pool)
+ continue;
+ /* we only care about a specific MAC address */
+ if (!ether_addr_equal(addr, mac_table->addr))
+ continue;
+
+ mac_table->state |= IXGBE_MAC_STATE_MODIFIED;
+ mac_table->state &= ~IXGBE_MAC_STATE_IN_USE;
+
+ ixgbe_sync_mac_table(adapter);
+
+ return 0;
}
+
return -ENOMEM;
}
/**
@@ -4169,7 +4376,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
@@ -4183,6 +4390,25 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
return count;
}
+static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret;
+
+ ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0));
+
+ return min_t(int, ret, 0);
+}
+
+static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0));
+
+ return 0;
+}
+
/**
* ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
@@ -4197,12 +4423,10 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
- u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -4212,25 +4436,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
- /* Only disable hardware filter vlans in promiscuous mode
- * if SR-IOV and VMDQ are disabled - otherwise ensure
- * that hardware VLAN filters remain enabled.
- */
- if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
- IXGBE_FLAG_SRIOV_ENABLED))
- vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ ixgbe_vlan_promisc_enable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
+ ixgbe_vlan_promisc_disable(adapter);
}
/*
@@ -4238,8 +4455,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
- if (count < 0) {
+ if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
}
@@ -4275,7 +4491,6 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -5042,7 +5257,6 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
int err;
- u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -5078,10 +5292,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
- /* do not flush user set addresses */
- memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+
+ /* flush entries out of MAC table */
ixgbe_flush_sw_mac_table(adapter);
- ixgbe_mac_set_default_filter(adapter, old_addr);
+ __dev_uc_unsync(netdev, NULL);
+
+ /* do not flush user set addresses */
+ ixgbe_mac_set_default_filter(adapter);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5331,6 +5548,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
GFP_ATOMIC);
+ if (!adapter->mac_table)
+ return -ENOMEM;
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
@@ -6616,10 +6835,8 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *vfdev;
+ unsigned int vf;
u32 gpc;
- int pos;
- unsigned short vf_id;
if (!(netif_carrier_ok(adapter->netdev)))
return;
@@ -6636,26 +6853,17 @@ static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter)
if (!pdev)
return;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (!pos)
- return;
-
- /* get the device ID for the VF */
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id);
-
/* check status reg for all VFs owned by this PF */
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && (vfdev->physfn == pdev)) {
- u16 status_reg;
-
- pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
- if (status_reg & PCI_STATUS_REC_MASTER_ABORT)
- /* issue VFLR */
- ixgbe_issue_vf_flr(adapter, vfdev);
- }
+ for (vf = 0; vf < adapter->num_vfs; ++vf) {
+ struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev;
+ u16 status_reg;
- vfdev = pci_get_device(pdev->vendor, vf_id, vfdev);
+ if (!vfdev)
+ continue;
+ pci_read_config_word(vfdev, PCI_STATUS, &status_reg);
+ if (status_reg != IXGBE_FAILED_READ_CFG_WORD &&
+ status_reg & PCI_STATUS_REC_MASTER_ABORT)
+ ixgbe_issue_vf_flr(adapter, vfdev);
}
}
@@ -7024,6 +7232,7 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct tcphdr *tcphdr;
u8 *raw;
} transport_hdr;
+ __be16 frag_off;
if (skb->encapsulation) {
network_hdr.raw = skb_inner_network_header(skb);
@@ -7047,13 +7256,17 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
case 6:
vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
l4_hdr = network_hdr.ipv6->nexthdr;
+ if (likely((transport_hdr.raw - network_hdr.raw) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but version=%d\n",
- network_hdr.ipv4->version);
- }
+ break;
}
switch (l4_hdr) {
@@ -7074,16 +7287,18 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, version=%d, l4 proto=%x\n",
+ network_hdr.ipv4->version, l4_hdr);
}
- break;
+ skb_checksum_help(skb);
+ goto no_csum;
}
/* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
+no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
@@ -7358,7 +7573,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
- if (skb->encapsulation) {
+ if (!skb->encapsulation) {
+ th = tcp_hdr(skb);
+ } else {
#ifdef CONFIG_IXGBE_VXLAN
struct ixgbe_adapter *adapter = q_vector->adapter;
@@ -7377,14 +7594,34 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
#else
return;
#endif /* CONFIG_IXGBE_VXLAN */
- } else {
- /* Currently only IPv4/IPv6 with TCP is supported */
- if ((first->protocol != htons(ETH_P_IPV6) ||
- hdr.ipv6->nexthdr != IPPROTO_TCP) &&
- (first->protocol != htons(ETH_P_IP) ||
- hdr.ipv4->protocol != IPPROTO_TCP))
+ }
+
+ /* Currently only IPv4/IPv6 with TCP is supported */
+ switch (hdr.ipv4->version) {
+ case IPVERSION:
+ if (hdr.ipv4->protocol != IPPROTO_TCP)
return;
- th = tcp_hdr(skb);
+ break;
+ case 6:
+ if (likely((unsigned char *)th - hdr.network ==
+ sizeof(struct ipv6hdr))) {
+ if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ return;
+ } else {
+ __be16 frag_off;
+ u8 l4_hdr;
+
+ ipv6_skip_exthdr(skb, hdr.network - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ return;
+ if (l4_hdr != IPPROTO_TCP)
+ return;
+ }
+ break;
+ default:
+ return;
}
/* skip this packet since it is invalid or the socket is closing */
@@ -7419,10 +7656,12 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source;
- if (first->protocol == htons(ETH_P_IP)) {
+ switch (hdr.ipv4->version) {
+ case IPVERSION:
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
- } else {
+ break;
+ case 6:
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
hdr.ipv6->saddr.s6_addr32[1] ^
@@ -7432,6 +7671,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.ipv6->daddr.s6_addr32[1] ^
hdr.ipv6->daddr.s6_addr32[2] ^
hdr.ipv6->daddr.s6_addr32[3];
+ break;
+ default:
+ break;
}
#ifdef CONFIG_IXGBE_VXLAN
@@ -7659,17 +7901,16 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
- int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
- return ret > 0 ? 0 : ret;
+ ixgbe_mac_set_default_filter(adapter);
+
+ return 0;
}
static int
@@ -8155,7 +8396,10 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
/* guarantee we can provide a unique filter for the unicast address */
if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
- if (IXGBE_MAX_PF_MACVLANS <= netdev_uc_count(dev))
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ u16 pool = VMDQ_P(0);
+
+ if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool))
return -ENOMEM;
}
@@ -8387,7 +8631,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
IXGBE_MAX_TUNNEL_HDR_LEN))
- return features & ~NETIF_F_ALL_CSUM;
+ return features & ~NETIF_F_CSUM_MASK;
return features;
}
@@ -8784,8 +9028,8 @@ skip_sriov:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- netdev->features |= NETIF_F_SCTP_CSUM;
- netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ netdev->features |= NETIF_F_SCTP_CRC;
+ netdev->hw_features |= NETIF_F_SCTP_CRC |
NETIF_F_NTUPLE;
break;
default:
@@ -8801,8 +9045,7 @@ skip_sriov:
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
- netdev->hw_enc_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
@@ -8811,9 +9054,7 @@ skip_sriov:
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- netdev->hw_enc_features |= NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM;
+ netdev->hw_enc_features |= NETIF_F_RXCSUM;
break;
default:
break;
@@ -8873,7 +9114,7 @@ skip_sriov:
goto err_sw_init;
}
- ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+ ixgbe_mac_set_default_filter(adapter);
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -9328,6 +9569,12 @@ static int __init ixgbe_init_module(void)
pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version);
pr_info("%s\n", ixgbe_copyright);
+ ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name);
+ if (!ixgbe_wq) {
+ pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name);
+ return -ENOMEM;
+ }
+
ixgbe_dbg_init();
ret = pci_register_driver(&ixgbe_driver);
@@ -9359,6 +9606,10 @@ static void __exit ixgbe_exit_module(void)
pci_unregister_driver(&ixgbe_driver);
ixgbe_dbg_exit();
+ if (ixgbe_wq) {
+ destroy_workqueue(ixgbe_wq);
+ ixgbe_wq = NULL;
+ }
}
#ifdef CONFIG_IXGBE_DCA