aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe')
-rw-r--r--drivers/net/ixgbe/ixgbe.h102
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c645
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c244
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c119
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h14
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c129
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c589
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c279
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1354
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h99
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c2
19 files changed, 2024 insertions, 1691 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index e467b20ed1f0..e04a8e49e6dc 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -131,6 +131,13 @@ struct vf_macvlans {
u8 vf_macvlan[ETH_ALEN];
};
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
+#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -207,12 +214,10 @@ struct ixgbe_ring {
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
- u8 atr_sample_rate;
- u8 atr_count;
+ u8 __iomem *tail;
+
u16 count; /* amount of descriptors */
u16 rx_buf_len;
- u16 next_to_use;
- u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
@@ -220,15 +225,13 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
- u8 dcb_tc;
-
- u16 work_limit; /* max work per interrupt */
-
- u8 __iomem *tail;
+ u8 atr_sample_rate;
+ u8 atr_count;
- unsigned int total_bytes;
- unsigned int total_packets;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
union {
@@ -244,7 +247,6 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
- RING_F_DCB,
RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
@@ -255,7 +257,6 @@ enum ixgbe_ring_f_enum {
RING_F_ARRAY_SIZE /* must be last in enum set */
};
-#define IXGBE_MAX_DCB_INDICES 64
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
@@ -272,6 +273,18 @@ struct ixgbe_ring_feature {
int mask;
} ____cacheline_internodealigned_in_smp;
+struct ixgbe_ring_container {
+#if MAX_RX_QUEUES > MAX_TX_QUEUES
+ DECLARE_BITMAP(idx, MAX_RX_QUEUES);
+#else
+ DECLARE_BITMAP(idx, MAX_TX_QUEUES);
+#endif
+ unsigned int total_bytes; /* total bytes processed this int */
+ unsigned int total_packets; /* total packets processed this int */
+ u16 work_limit; /* total work allowed per interrupt */
+ u8 count; /* total number of rings in vector */
+ u8 itr; /* current ITR setting for ring */
+};
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
@@ -289,12 +302,7 @@ struct ixgbe_q_vector {
int cpu; /* CPU for DCA */
#endif
struct napi_struct napi;
- DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
- DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
- u8 rxr_count; /* Rx ring count assigned to this vector */
- u8 txr_count; /* Tx ring count assigned to this vector */
- u8 tx_itr;
- u8 rx_itr;
+ struct ixgbe_ring_container rx, tx;
u32 eitr;
cpumask_var_t affinity_mask;
char name[IFNAMSIZ + 9];
@@ -308,9 +316,13 @@ struct ixgbe_q_vector {
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
-#define IXGBE_DESC_UNUSED(R) \
- ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
- (R)->next_to_clean - (R)->next_to_use - 1)
+static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
+{
+ u16 ntc = ring->next_to_clean;
+ u16 ntu = ring->next_to_use;
+
+ return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
+}
#define IXGBE_RX_DESC_ADV(R, i) \
(&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
@@ -404,6 +416,9 @@ struct ixgbe_adapter {
u16 eitr_low;
u16 eitr_high;
+ /* Work limits */
+ u16 tx_work_limit;
+
/* TX */
struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
@@ -484,6 +499,17 @@ struct ixgbe_adapter {
struct vf_macvlans vf_mvs;
struct vf_macvlans *mv_list;
bool antispoofing_enabled;
+
+ struct hlist_head fdir_filter_list;
+ union ixgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+};
+
+struct ixgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union ixgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
};
enum ixbge_state_t {
@@ -545,31 +571,35 @@ extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
-extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
-extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
+extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
union ixgbe_atr_hash_dword input,
union ixgbe_atr_hash_dword common,
u8 queue);
-extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *input_masks,
- u16 soft_id, u8 queue);
-extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
+extern s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask);
+extern s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id);
+extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
+extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
+extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
-extern int ixgbe_fso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb);
+ union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb,
+ u32 staterr);
extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
struct scatterlist *sgl, unsigned int sgc);
extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 8179e5060a18..0d4e38264492 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -1242,6 +1242,47 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
}
}
+/**
+ * ixgbe_set_rxpba_82598 - Configure packet buffers
+ * @hw: pointer to hardware structure
+ * @dcb_config: pointer to ixgbe_dcb_config structure
+ *
+ * Configure packet buffers.
+ */
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+ int strategy)
+{
+ u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+ u8 i = 0;
+
+ if (!num_pb)
+ return;
+
+ /* Setup Rx packet buffer sizes */
+ switch (strategy) {
+ case PBA_STRATEGY_WEIGHTED:
+ /* Setup the first four at 80KB */
+ rxpktsize = IXGBE_RXPBSIZE_80KB;
+ for (; i < 4; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Setup the last four at 48KB...don't re-init i */
+ rxpktsize = IXGBE_RXPBSIZE_48KB;
+ /* Fall Through */
+ case PBA_STRATEGY_EQUAL:
+ default:
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ }
+
+ /* Setup Tx packet buffer sizes */
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+ return;
+}
+
static struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
@@ -1257,6 +1298,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
.setup_link = &ixgbe_setup_mac_link_82598,
+ .set_rxpba = &ixgbe_set_rxpba_82598,
.check_link = &ixgbe_check_mac_link_82598,
.get_link_capabilities = &ixgbe_get_link_capabilities_82598,
.led_on = &ixgbe_led_on_generic,
@@ -1274,6 +1316,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 8ee661245af3..3b3dd4df4c5c 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1107,153 +1107,87 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * ixgbe_set_fdir_rxpba_82599 - Initialize Flow Director Rx packet buffer
* @hw: pointer to hardware structure
* @pballoc: which mode to allocate filters with
**/
-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
+static s32 ixgbe_set_fdir_rxpba_82599(struct ixgbe_hw *hw, const u32 pballoc)
{
- u32 fdirctrl = 0;
- u32 pbsize;
+ u32 fdir_pbsize = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
+ u32 current_rxpbsize = 0;
int i;
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * initialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT;
-
+ /* reserve space for Flow Director filters */
switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 8k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+ case IXGBE_FDIR_PBALLOC_256K:
+ fdir_pbsize -= 256 << IXGBE_RXPBSIZE_SHIFT;
break;
case IXGBE_FDIR_PBALLOC_128K:
- /* 16k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+ fdir_pbsize -= 128 << IXGBE_RXPBSIZE_SHIFT;
break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 32k - 1 signature filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+ case IXGBE_FDIR_PBALLOC_64K:
+ fdir_pbsize -= 64 << IXGBE_RXPBSIZE_SHIFT;
break;
+ case IXGBE_FDIR_PBALLOC_NONE:
default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
+ return IXGBE_ERR_PARAM;
+ }
+ /* determine current RX packet buffer size */
+ for (i = 0; i < 8; i++)
+ current_rxpbsize += IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
- /* Prime the keys for hashing */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+ /* if there is already room for the filters do nothing */
+ if (current_rxpbsize <= fdir_pbsize)
+ return 0;
- /*
- * Poll init-done after we write the register. Estimated times:
- * 10G: PBALLOC = 11b, timing is 60us
- * 1G: PBALLOC = 11b, timing is 600us
- * 100M: PBALLOC = 11b, timing is 6ms
- *
- * Multiple these timings by 4 if under full Rx load
- *
- * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
- * 1 msec per poll time. If we're at line rate and drop to 100M, then
- * this might not finish in our poll time, but we can live with that
- * for now.
- */
- IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
- IXGBE_WRITE_FLUSH(hw);
- for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
- if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
- break;
- usleep_range(1000, 2000);
+ if (current_rxpbsize > hw->mac.rx_pb_size) {
+ /*
+ * if rxpbsize is greater than max then HW max the Rx buffer
+ * sizes are unconfigured or misconfigured since HW default is
+ * to give the full buffer to each traffic class resulting in
+ * the total size being buffer size 8x actual size
+ *
+ * This assumes no DCB since the RXPBSIZE registers appear to
+ * be unconfigured.
+ */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), fdir_pbsize);
+ for (i = 1; i < 8; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ } else {
+ /*
+ * Since the Rx packet buffer appears to have already been
+ * configured we need to shrink each packet buffer by enough
+ * to make room for the filters. As such we take each rxpbsize
+ * value and multiply it by a fraction representing the size
+ * needed over the size we currently have.
+ *
+ * We need to reduce fdir_pbsize and current_rxpbsize to
+ * 1/1024 of their original values in order to avoid
+ * overflowing the u32 being used to store rxpbsize.
+ */
+ fdir_pbsize >>= IXGBE_RXPBSIZE_SHIFT;
+ current_rxpbsize >>= IXGBE_RXPBSIZE_SHIFT;
+ for (i = 0; i < 8; i++) {
+ u32 rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+ rxpbsize *= fdir_pbsize;
+ rxpbsize /= current_rxpbsize;
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+ }
}
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
return 0;
}
/**
- * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
* @hw: pointer to hardware structure
- * @pballoc: which mode to allocate filters with
+ * @fdirctrl: value to write to flow director control register
**/
-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- u32 fdirctrl = 0;
- u32 pbsize;
int i;
- /*
- * Before enabling Flow Director, the Rx Packet Buffer size
- * must be reduced. The new value is the current size minus
- * flow director memory usage size.
- */
- pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc));
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
- (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
-
- /*
- * The defaults in the HW for RX PB 1-7 are not zero and so should be
- * initialized to zero for non DCB mode otherwise actual total RX PB
- * would be bigger than programmed and filter space would run into
- * the PB 0 region.
- */
- for (i = 1; i < 8; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
-
- /* Send interrupt when 64 filters are left */
- fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
-
- /* Initialize the drop queue to Rx queue 127 */
- fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
-
- switch (pballoc) {
- case IXGBE_FDIR_PBALLOC_64K:
- /* 2k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
- break;
- case IXGBE_FDIR_PBALLOC_128K:
- /* 4k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
- break;
- case IXGBE_FDIR_PBALLOC_256K:
- /* 8k - 1 perfect filters */
- fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
- break;
- default:
- /* bad value */
- return IXGBE_ERR_CONFIG;
- };
-
- /* Turn perfect match filtering on */
- fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
- fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
-
- /* Move the flexible bytes to use the ethertype - shift 6 words */
- fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
-
/* Prime the keys for hashing */
IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
@@ -1271,10 +1205,6 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
* this might not finish in our poll time, but we can live with that
* for now.
*/
-
- /* Set the maximum length per hash bucket to 0xA filters */
- fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT);
-
IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
@@ -1283,101 +1213,77 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
break;
usleep_range(1000, 2000);
}
- if (i >= IXGBE_FDIR_INIT_DONE_POLL)
- hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
- return 0;
+ if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+ hw_dbg(hw, "Flow Director poll time exceeded!\n");
}
-
/**
- * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR
- * @stream: input bitstream to compute the hash on
- * @key: 32-bit hash key
+ * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
**/
-static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
- u32 key)
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
{
- /*
- * The algorithm is as follows:
- * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
- * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
- * and A[n] x B[n] is bitwise AND between same length strings
- *
- * K[n] is 16 bits, defined as:
- * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
- * for n modulo 32 < 15, K[n] =
- * K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
- *
- * S[n] is 16 bits, defined as:
- * for n >= 15, S[n] = S[n:n - 15]
- * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
- *
- * To simplify for programming, the algorithm is implemented
- * in software this way:
- *
- * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
- *
- * for (i = 0; i < 352; i+=32)
- * hi_hash_dword[31:0] ^= Stream[(i+31):i];
- *
- * lo_hash_dword[15:0] ^= Stream[15:0];
- * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
- * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
- *
- * hi_hash_dword[31:0] ^= Stream[351:320];
- *
- * if(key[0])
- * hash[15:0] ^= Stream[15:0];
- *
- * for (i = 0; i < 16; i++) {
- * if (key[i])
- * hash[15:0] ^= lo_hash_dword[(i+15):i];
- * if (key[i + 16])
- * hash[15:0] ^= hi_hash_dword[(i+15):i];
- * }
- *
- */
- __be32 common_hash_dword = 0;
- u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
- u32 hash_result = 0;
- u8 i;
+ s32 err;
- /* record the flow_vm_vlan bits as they are a key part to the hash */
- flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
+ /* Before enabling Flow Director, verify the Rx Packet Buffer size */
+ err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
+ if (err)
+ return err;
- /* generate common hash dword */
- for (i = 10; i; i -= 2)
- common_hash_dword ^= atr_input->dword_stream[i] ^
- atr_input->dword_stream[i - 1];
+ /*
+ * Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
- hi_hash_dword = ntohl(common_hash_dword);
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
- /* low dword is word swapped version of common */
- lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+ return 0;
+}
- /* apply flow ID/VM pool/VLAN ID bits to hash words */
- hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+/**
+ * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ * @hw: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register, initially
+ * contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+ s32 err;
- /* Process bits 0 and 16 */
- if (key & 0x0001) hash_result ^= lo_hash_dword;
- if (key & 0x00010000) hash_result ^= hi_hash_dword;
+ /* Before enabling Flow Director, verify the Rx Packet Buffer size */
+ err = ixgbe_set_fdir_rxpba_82599(hw, fdirctrl);
+ if (err)
+ return err;
/*
- * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
- * delay this because bit 0 of the stream should not be processed
- * so we do not add the vlan until after bit 0 was processed
+ * Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
*/
- lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+ IXGBE_FDIRCTRL_REPORT_STATUS |
+ (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+ (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+ (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+ (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+ /* write hashes and fdirctrl register, poll for completion */
+ ixgbe_fdir_enable_82599(hw, fdirctrl);
- /* process the remaining 30 bits in the key 2 bits at a time */
- for (i = 15; i; i-- ) {
- if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
- if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
- }
-
- return hash_result & IXGBE_ATR_HASH_MASK;
+ return 0;
}
/*
@@ -1514,7 +1420,6 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
*/
fdirhashcmd = (u64)fdircmd << 32;
fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
-
IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
@@ -1522,6 +1427,101 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
return 0;
}
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ * @atr_input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applys the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+ union ixgbe_atr_input *input_mask)
+{
+
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+
+ /* Apply masks to input data */
+ input->dword_stream[0] &= input_mask->dword_stream[0];
+ input->dword_stream[1] &= input_mask->dword_stream[1];
+ input->dword_stream[2] &= input_mask->dword_stream[2];
+ input->dword_stream[3] &= input_mask->dword_stream[3];
+ input->dword_stream[4] &= input_mask->dword_stream[4];
+ input->dword_stream[5] &= input_mask->dword_stream[5];
+ input->dword_stream[6] &= input_mask->dword_stream[6];
+ input->dword_stream[7] &= input_mask->dword_stream[7];
+ input->dword_stream[8] &= input_mask->dword_stream[8];
+ input->dword_stream[9] &= input_mask->dword_stream[9];
+ input->dword_stream[10] &= input_mask->dword_stream[10];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(input->dword_stream[1] ^
+ input->dword_stream[2] ^
+ input->dword_stream[3] ^
+ input->dword_stream[4] ^
+ input->dword_stream[5] ^
+ input->dword_stream[6] ^
+ input->dword_stream[7] ^
+ input->dword_stream[8] ^
+ input->dword_stream[9] ^
+ input->dword_stream[10]);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /*
+ * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the vlan until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+ IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+ /*
+ * Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
/**
* ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
* @input_mask: mask to be bit swapped
@@ -1531,11 +1531,11 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* generate a correctly swapped value we need to bit swap the mask and that
* is what is accomplished by this function.
**/
-static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
- u32 mask = ntohs(input_masks->dst_port_mask);
+ u32 mask = ntohs(input_mask->formatted.dst_port);
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
- mask |= ntohs(input_masks->src_port_mask);
+ mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
@@ -1557,52 +1557,14 @@ static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
#define IXGBE_STORE_AS_BE16(_value) \
- (((u16)(_value) >> 8) | ((u16)(_value) << 8))
+ ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8))
-/**
- * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
- * @hw: pointer to hardware structure
- * @input: input bitstream
- * @input_masks: bitwise masks for relevant fields
- * @soft_id: software index into the silicon hash tables for filter storage
- * @queue: queue index to direct traffic to
- *
- * Note that the caller to this function must lock before calling, since the
- * hardware writes must be protected from one another.
- **/
-s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_input *input,
- struct ixgbe_atr_input_masks *input_masks,
- u16 soft_id, u8 queue)
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input_mask)
{
- u32 fdirhash;
- u32 fdircmd;
- u32 fdirport, fdirtcpm;
- u32 fdirvlan;
- /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
- u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
- IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
-
- /*
- * Check flow_type formatting, and bail out before we touch the hardware
- * if there's a configuration issue
- */
- switch (input->formatted.flow_type) {
- case IXGBE_ATR_FLOW_TYPE_IPV4:
- /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
- fdirm |= IXGBE_FDIRM_L4P;
- case IXGBE_ATR_FLOW_TYPE_SCTPV4:
- if (input_masks->dst_port_mask || input_masks->src_port_mask) {
- hw_dbg(hw, " Error on src/dst port mask\n");
- return IXGBE_ERR_CONFIG;
- }
- case IXGBE_ATR_FLOW_TYPE_TCPV4:
- case IXGBE_ATR_FLOW_TYPE_UDPV4:
- break;
- default:
- hw_dbg(hw, " Error on flow type input\n");
- return IXGBE_ERR_CONFIG;
- }
+ /* mask IPv6 since it is currently not supported */
+ u32 fdirm = IXGBE_FDIRM_DIPv6;
+ u32 fdirtcpm;
/*
* Program the relevant mask registers. If src/dst_port or src/dst_addr
@@ -1614,41 +1576,71 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
* point in time.
*/
- /* Program FDIRM */
- switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
- case 0xEFFF:
- /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
- fdirm &= ~IXGBE_FDIRM_VLANID;
- case 0xE000:
- /* Unmask VLAN prio - bit 1 */
- fdirm &= ~IXGBE_FDIRM_VLANP;
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ hw_dbg(hw, " bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_POOL;
+ case 0x7F:
break;
- case 0x0FFF:
- /* Unmask VLAN ID - bit 0 */
- fdirm &= ~IXGBE_FDIRM_VLANID;
+ default:
+ hw_dbg(hw, " Error on vm pool mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= IXGBE_FDIRM_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ hw_dbg(hw, " Error on src/dst port mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+ case IXGBE_ATR_L4TYPE_MASK:
break;
+ default:
+ hw_dbg(hw, " Error on flow type mask\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) {
case 0x0000:
- /* do nothing, vlans already masked */
+ /* mask VLAN ID, fall through to mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0x0FFF:
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
+ case 0xE000:
+ /* mask VLAN ID only, fall through */
+ fdirm |= IXGBE_FDIRM_VLANID;
+ case 0xEFFF:
+ /* no VLAN fields masked */
break;
default:
hw_dbg(hw, " Error on VLAN mask\n");
return IXGBE_ERR_CONFIG;
}
- if (input_masks->flex_mask & 0xFFFF) {
- if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
- hw_dbg(hw, " Error on flexible byte mask\n");
- return IXGBE_ERR_CONFIG;
- }
- /* Unmask Flex Bytes - bit 4 */
- fdirm &= ~IXGBE_FDIRM_FLEX;
+ switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes, fall through */
+ fdirm |= IXGBE_FDIRM_FLEX;
+ case 0xFFFF:
+ break;
+ default:
+ hw_dbg(hw, " Error on flexible byte mask\n");
+ return IXGBE_ERR_CONFIG;
}
/* Now mask VM pool and destination IPv6 - bits 5 and 2 */
IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
/* store the TCP/UDP port masks, bit reversed from port layout */
- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
+ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
/* write both the same so that UDP and TCP use the same mask */
IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
@@ -1656,24 +1648,32 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* store source and destination IP masks (big-enian) */
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
- ~input_masks->src_ip_mask[0]);
+ ~input_mask->formatted.src_ip[0]);
IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
- ~input_masks->dst_ip_mask[0]);
+ ~input_mask->formatted.dst_ip[0]);
- /* Apply masks to input data */
- input->formatted.vlan_id &= input_masks->vlan_id_mask;
- input->formatted.flex_bytes &= input_masks->flex_mask;
- input->formatted.src_port &= input_masks->src_port_mask;
- input->formatted.dst_port &= input_masks->dst_port_mask;
- input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
- input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
+ return 0;
+}
- /* record vlan (little-endian) and flex_bytes(big-endian) */
- fdirvlan =
- IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
- fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
- fdirvlan |= ntohs(input->formatted.vlan_id);
- IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+ input->formatted.src_ip[0]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+ input->formatted.src_ip[1]);
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+ input->formatted.src_ip[2]);
+
+ /* record the source address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+ /* record the first 32 bits of the destination address (big-endian) */
+ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
/* record source and destination port (little-endian)*/
fdirport = ntohs(input->formatted.dst_port);
@@ -1681,29 +1681,80 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
fdirport |= ntohs(input->formatted.src_port);
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
- /* record the first 32 bits of the destination address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+ /* record vlan (little-endian) and flex_bytes(big-endian) */
+ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+ fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
- /* record the source address (big-endian) */
- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /*
+ * flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ IXGBE_WRITE_FLUSH(hw);
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ if (queue == IXGBE_FDIR_DROP_QUEUE)
+ fdircmd |= IXGBE_FDIRCMD_DROP;
fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+ fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
- /* we only want the bucket hash so drop the upper 16 bits */
- fdirhash = ixgbe_atr_compute_hash_82599(input,
- IXGBE_ATR_BUCKET_HASH_KEY);
- fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
-
- IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
return 0;
}
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+ union ixgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash;
+ u32 fdircmd = 0;
+ u32 retry_count;
+ s32 err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = input->formatted.bkt_hash;
+ fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+ /* flush hash to HW */
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Query if filter is present */
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+ for (retry_count = 10; retry_count; retry_count--) {
+ /* allow 10us for query to process */
+ udelay(10);
+ /* verify query completed successfully */
+ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+ if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+ break;
+ }
+
+ if (!retry_count)
+ err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+ IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+ }
+
+ return err;
+}
+
/**
* ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
* @hw: pointer to hardware structure
@@ -2146,6 +2197,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_link_capabilities_82599,
.led_on = &ixgbe_led_on_generic,
@@ -2163,6 +2215,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index b894b42a741c..777051f54e53 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1292,7 +1292,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
udelay(5);
ixgbe_standby_eeprom(hw);
- };
+ }
/*
* On some parts, SPI write time could vary from 0-20mSec on 3.3V
@@ -1374,7 +1374,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* EEPROM
*/
mask = mask >> 1;
- };
+ }
/* We leave the "DI" bit set to "0" when we leave this routine. */
eec &= ~IXGBE_EEC_DI;
@@ -3267,3 +3267,243 @@ s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
return 0;
}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw,
+ int num_pb,
+ u32 headroom,
+ int strategy)
+{
+ u32 pbsize = hw->mac.rx_pb_size;
+ int i = 0;
+ u32 rxpktsize, txpktsize, txpbthresh;
+
+ /* Reserve headroom */
+ pbsize -= headroom;
+
+ if (!num_pb)
+ num_pb = 1;
+
+ /* Divide remaining packet buffer space amongst the number
+ * of packet buffers requested using supplied strategy.
+ */
+ switch (strategy) {
+ case (PBA_STRATEGY_WEIGHTED):
+ /* pba_80_48 strategy weight first half of packet buffer with
+ * 5/8 of the packet buffer space.
+ */
+ rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8));
+ pbsize -= rxpktsize * (num_pb / 2);
+ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+ for (; i < (num_pb / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ /* Fall through to configure remaining packet buffers */
+ case (PBA_STRATEGY_EQUAL):
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Setup Tx packet buffer and threshold equally for all TCs
+ * TXPBTHRESH register is set in K so divide by 1024 and subtract
+ * 10 since the largest packet we support is just over 9K.
+ */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_pb; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < IXGBE_MAX_PB; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+ }
+}
+
+/**
+ * ixgbe_calculate_checksum - Calculate checksum for buffer
+ * @buffer: pointer to EEPROM
+ * @length: size of EEPROM to calculate a checksum for
+ * Calculates the checksum for some buffer on a specified length. The
+ * checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+ u32 i;
+ u8 sum = 0;
+
+ if (!buffer)
+ return 0;
+
+ for (i = 0; i < length; i++)
+ sum += buffer[i];
+
+ return (u8) (0 - sum);
+}
+
+/**
+ * ixgbe_host_interface_command - Issue command to manageability block
+ * @hw: pointer to the HW structure
+ * @buffer: contains the command to write and where the return status will
+ * be placed
+ * @lenght: lenght of buffer, must be multiple of 4 bytes
+ *
+ * Communicates with the manageability block. On success return 0
+ * else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+ u32 length)
+{
+ u32 hicr, i;
+ u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u8 buf_len, dword_len;
+
+ s32 ret_val = 0;
+
+ if (length == 0 || length & 0x3 ||
+ length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ hw_dbg(hw, "Buffer length failure.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Check that the host interface is enabled. */
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if ((hicr & IXGBE_HICR_EN) == 0) {
+ hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = length >> 2;
+
+ /*
+ * The device driver writes the relevant command block
+ * into the ram area.
+ */
+ for (i = 0; i < dword_len; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+ i, *((u32 *)buffer + i));
+
+ /* Setting this bit tells the ARC that a new command is pending. */
+ IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+ for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+ hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+ if (!(hicr & IXGBE_HICR_C))
+ break;
+ usleep_range(1000, 2000);
+ }
+
+ /* Check command successful completion. */
+ if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+ (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ hw_dbg(hw, "Command has failed with no status valid.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs */
+ dword_len = hdr_size >> 2;
+
+ /* first pull in the header so we know the buffer length */
+ for (i = 0; i < dword_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+ /* If there is any thing in data position pull it in */
+ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+ if (buf_len == 0)
+ goto out;
+
+ if (length < (buf_len + hdr_size)) {
+ hw_dbg(hw, "Buffer not large enough for reply message.\n");
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto out;
+ }
+
+ /* Calculate length in DWORDs, add one for odd lengths */
+ dword_len = (buf_len + 1) >> 2;
+
+ /* Pull in the rest of the buffer (i is where we left off)*/
+ for (; i < buf_len; i++)
+ *((u32 *)buffer + i) =
+ IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @min: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ *
+ * Sends driver version number to firmware through the manageability
+ * block. On success return 0
+ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 sub)
+{
+ struct ixgbe_hic_drv_info fw_cmd;
+ int i;
+ s32 ret_val = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
+ ret_val = IXGBE_ERR_SWFW_SYNC;
+ goto out;
+ }
+
+ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+ fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.ver_maj = maj;
+ fw_cmd.ver_min = min;
+ fw_cmd.ver_build = build;
+ fw_cmd.ver_sub = sub;
+ fw_cmd.hdr.checksum = 0;
+ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+ fw_cmd.pad = 0;
+ fw_cmd.pad2 = 0;
+
+ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+ ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+ sizeof(fw_cmd));
+ if (ret_val != 0)
+ continue;
+
+ if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+ FW_CEM_RESP_STATUS_SUCCESS)
+ ret_val = 0;
+ else
+ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+ break;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+ return ret_val;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 46be83cfb500..f24fd64a4c46 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -99,6 +99,11 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+ u8 build, u8 ver);
+
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
+ u32 headroom, int strategy);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 686a17aadef3..9d88c31487bc 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -258,15 +258,13 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->rx_pba_cfg,
- pfc_en, refill, max, bwgid,
- ptype);
+ ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
+ bwgid, ptype);
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->rx_pba_cfg,
- pfc_en, refill, max, bwgid,
- ptype, prio_tc);
+ ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
+ bwgid, ptype, prio_tc);
break;
default:
break;
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 944838fc7b59..e85826ae0320 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -123,11 +123,6 @@ struct tc_configuration {
u8 tc; /* Traffic class (TC) */
};
-enum dcb_rx_pba_cfg {
- pba_equal, /* PBA[0-7] each use 64KB FIFO */
- pba_80_48 /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */
-};
-
struct dcb_num_tcs {
u8 pg_tcs;
u8 pfc_tcs;
@@ -140,8 +135,6 @@ struct ixgbe_dcb_config {
u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */
bool pfc_mode_enable;
- enum dcb_rx_pba_cfg rx_pba_cfg;
-
u32 dcb_cfg_version; /* Not used...OS-specific? */
u32 link_speed; /* For bandwidth allocation validation purpose */
};
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 771d01a60d06..2288c3cac010 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -32,45 +32,6 @@
#include "ixgbe_dcb_82598.h"
/**
- * ixgbe_dcb_config_packet_buffers_82598 - Configure packet buffers
- * @hw: pointer to hardware structure
- * @dcb_config: pointer to ixgbe_dcb_config structure
- *
- * Configure packet buffers for DCB mode.
- */
-static s32 ixgbe_dcb_config_packet_buffers_82598(struct ixgbe_hw *hw, u8 rx_pba)
-{
- s32 ret_val = 0;
- u32 value = IXGBE_RXPBSIZE_64KB;
- u8 i = 0;
-
- /* Setup Rx packet buffer sizes */
- switch (rx_pba) {
- case pba_80_48:
- /* Setup the first four at 80KB */
- value = IXGBE_RXPBSIZE_80KB;
- for (; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
- /* Setup the last four at 48KB...don't re-init i */
- value = IXGBE_RXPBSIZE_48KB;
- /* Fall Through */
- case pba_equal:
- default:
- for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
-
- /* Setup Tx packet buffer sizes */
- for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
- IXGBE_TXPBSIZE_40KB);
- }
- break;
- }
-
- return ret_val;
-}
-
-/**
* ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter
* @hw: pointer to hardware structure
* @dcb_config: pointer to ixgbe_dcb_config structure
@@ -321,11 +282,9 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type)
{
- ixgbe_dcb_config_packet_buffers_82598(hw, rx_pba);
ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type);
ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max,
bwg_id, prio_type);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ixgbe/ixgbe_dcb_82598.h
index 1e9750c2b46b..2f318935561a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.h
@@ -91,8 +91,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw,
u8 *bwg_id,
u8 *prio_type);
-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type);
#endif /* _DCB_82598_CONFIG_H */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index d50cf78c234d..ade98200288c 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -31,63 +31,6 @@
#include "ixgbe_dcb_82599.h"
/**
- * ixgbe_dcb_config_packet_buffers_82599 - Configure DCB packet buffers
- * @hw: pointer to hardware structure
- * @rx_pba: method to distribute packet buffer
- *
- * Configure packet buffers for DCB mode.
- */
-static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
-{
- int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
- u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
- u32 rxpktsize;
- u32 txpktsize;
- u32 txpbthresh;
- u8 i = 0;
-
- /*
- * This really means configure the first half of the TCs
- * (Traffic Classes) to use 5/8 of the Rx packet buffer
- * space. To determine the size of the buffer for each TC,
- * we are multiplying the average size by 5/4 and applying
- * it to half of the traffic classes.
- */
- if (rx_pba == pba_80_48) {
- rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
- rx_pb_size -= rxpktsize * (num_tcs / 2);
- for (; i < (num_tcs / 2); i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- }
-
- /* Divide the remaining Rx packet buffer evenly among the TCs */
- rxpktsize = rx_pb_size / (num_tcs - i);
- for (; i < num_tcs; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
-
- /*
- * Setup Tx packet buffer and threshold equally for all TCs
- * TXPBTHRESH register is set in K so divide by 1024 and subtract
- * 10 since the largest packet we support is just over 9K.
- */
- txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
- txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
- for (i = 0; i < num_tcs; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
- }
-
- /* Clear unused TCs, if any, to zero buffer size*/
- for (; i < MAX_TRAFFIC_CLASS; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
- }
-
- return 0;
-}
-
-/**
* ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
* @hw: pointer to hardware structure
* @refill: refill credits index by traffic class
@@ -376,65 +319,8 @@ static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw)
}
/**
- * ixgbe_dcb_config_82599 - Configure general DCB parameters
- * @hw: pointer to hardware structure
- *
- * Configure general DCB parameters.
- */
-static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
-{
- u32 reg;
- u32 q;
-
- /* Disable the Tx desc arbiter so that MTQC can be changed */
- reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- reg |= IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
- /* Enable DCB for Rx with 8 TCs */
- reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
- switch (reg & IXGBE_MRQC_MRQE_MASK) {
- case 0:
- case IXGBE_MRQC_RT4TCEN:
- /* RSS disabled cases */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
- break;
- case IXGBE_MRQC_RSSEN:
- case IXGBE_MRQC_RTRSS4TCEN:
- /* RSS enabled cases */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RTRSS8TCEN;
- break;
- default:
- /* Unsupported value, assume stale data, overwrite no RSS */
- reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | IXGBE_MRQC_RT8TCEN;
- }
- IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
-
- /* Enable DCB for Tx with 8 TCs */
- reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
-
- /* Disable drop for all queues */
- for (q = 0; q < 128; q++)
- IXGBE_WRITE_REG(hw, IXGBE_QDE, q << IXGBE_QDE_IDX_SHIFT);
-
- /* Enable the Tx desc arbiter */
- reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
- reg &= ~IXGBE_RTTDCS_ARBDIS;
- IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
-
- /* Enable Security TX Buffer IFG for DCB */
- reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
- reg |= IXGBE_SECTX_DCB;
- IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
-
- return 0;
-}
-
-/**
* ixgbe_dcb_hw_config_82599 - Configure and enable DCB
* @hw: pointer to hardware structure
- * @rx_pba: method to distribute packet buffer
* @refill: refill credits index by traffic class
* @max: max credits index by traffic class
* @bwg_id: bandwidth grouping indexed by traffic class
@@ -443,12 +329,9 @@ static s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw)
*
* Configure dcb settings and enable dcb mode.
*/
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc)
{
- ixgbe_dcb_config_packet_buffers_82599(hw, rx_pba);
- ixgbe_dcb_config_82599(hw);
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id,
prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 2de71a503153..08d1749862a3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -86,17 +86,6 @@
#define IXGBE_RTTPCS_ARBD_SHIFT 22
#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */
-#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
-#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
-#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
-#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
-#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
-#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
-#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
-
-#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
-#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
-
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
@@ -127,8 +116,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
u8 *prio_type,
u8 *prio_tc);
-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw,
- u8 rx_pba, u8 pfc_en, u16 *refill,
+s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill,
u16 *max, u8 *bwg_id, u8 *prio_type,
u8 *prio_tc);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index 5e7ed225851a..0ace6ce1d0b4 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -114,20 +114,19 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
u8 err = 0;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ /* verify there is something to do, if not then exit */
+ if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return err;
+
if (state > 0) {
/* Turn on DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- goto out;
-
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
e_err(drv, "Enable failed, needs MSI-X\n");
err = 1;
goto out;
}
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
+ adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82598EB:
@@ -137,46 +136,30 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
break;
default:
break;
}
- adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
- if (!netdev_get_num_tc(netdev))
- ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
-
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
+ ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS);
} else {
/* Turn off DCB */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
-
- adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
- adapter->temp_dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
+ adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+ adapter->temp_dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- break;
- default:
- break;
- }
-
- ixgbe_setup_tc(netdev, 0);
-
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
+ break;
+ default:
+ break;
}
+ ixgbe_setup_tc(netdev, 0);
}
+
out:
return err;
}
@@ -347,24 +330,20 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ int ret;
+#ifdef IXGBE_FCOE
struct dcb_app app = {
.selector = DCB_APP_IDTYPE_ETHTYPE,
.protocol = ETH_P_FCOE,
};
u8 up = dcb_getapp(netdev, &app);
- int ret;
+#endif
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS);
if (ret)
return DCB_NO_HW_CHG;
- /* In IEEE mode app data must be parsed into DCBX format for
- * hardware routines.
- */
- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
- up = (1 << up);
-
#ifdef IXGBE_FCOE
if (up && (up != (1 << adapter->fcoe.up)))
adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
@@ -378,7 +357,7 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- ixgbe_fcoe_setapp(adapter, up);
+ adapter->fcoe.up = ffs(up) - 1;
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
@@ -691,24 +670,75 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return err;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+}
+#endif
+
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err = -EINVAL;
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
- return -EINVAL;
+ return err;
- dcb_setapp(dev, app);
+ err = dcb_ieee_setapp(dev, app);
#ifdef IXGBE_FCOE
- if (app->selector == 1 && app->protocol == ETH_P_FCOE &&
- adapter->fcoe.tc == app->priority)
- ixgbe_dcbnl_set_all(dev);
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app->priority;
+ ixgbe_dcbnl_devreset(dev);
+ }
#endif
return 0;
}
+static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
+ struct dcb_app *app)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ err = dcb_ieee_delapp(dev, app);
+
+#ifdef IXGBE_FCOE
+ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_FCOE) {
+ u8 app_mask = dcb_ieee_getapp_mask(dev, app);
+
+ if (app_mask & (1 << adapter->fcoe.up))
+ return err;
+
+ adapter->fcoe.up = app_mask ?
+ ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
+ ixgbe_dcbnl_devreset(dev);
+ }
+#endif
+ return err;
+}
+
static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -760,6 +790,7 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
.ieee_setpfc = ixgbe_dcbnl_ieee_setpfc,
.ieee_setapp = ixgbe_dcbnl_ieee_setapp,
+ .ieee_delapp = ixgbe_dcbnl_ieee_delapp,
.getstate = ixgbe_dcbnl_get_state,
.setstate = ixgbe_dcbnl_set_state,
.getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index cb1555bc8548..dc649553a0a6 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -27,6 +27,7 @@
/* ethtool support for ixgbe */
+#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -441,62 +442,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
return 0;
}
-static u32 ixgbe_get_rx_csum(struct net_device *netdev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
-}
-
-static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- if (data)
- adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
- else
- adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
-
- return 0;
-}
-
-static u32 ixgbe_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u32 feature_list;
-
- feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- feature_list |= NETIF_F_SCTP_CSUM;
- break;
- default:
- break;
- }
- if (data)
- netdev->features |= feature_list;
- else
- netdev->features &= ~feature_list;
-
- return 0;
-}
-
-static int ixgbe_set_tso(struct net_device *netdev, u32 data)
-{
- if (data) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- return 0;
-}
-
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2055,7 +2000,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2074,7 +2019,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
}
/* if in mixed tx/rx queues per vector mode, report only rx settings */
- if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count)
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0;
/* only valid if in constant ITR mode */
@@ -2139,12 +2084,12 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
bool need_reset = false;
/* don't accept tx specific changes if we've got mixed RxTx vectors */
- if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
+ if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count
&& ec->tx_coalesce_usecs)
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
@@ -2213,18 +2158,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
int num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i];
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->eitr = adapter->tx_eitr_param;
else
/* rx only or mixed */
q_vector->eitr = adapter->rx_eitr_param;
+ q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector);
}
/* Legacy Interrupt Mode */
} else {
q_vector = adapter->q_vector[0];
q_vector->eitr = adapter->rx_eitr_param;
+ q_vector->tx.work_limit = adapter->tx_work_limit;
ixgbe_write_eitr(q_vector);
}
@@ -2233,241 +2180,376 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
* also locks in RSC enable/disable which requires reset
*/
- if (need_reset) {
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
- }
+ if (need_reset)
+ ixgbe_do_reset(netdev);
return 0;
}
-static int ixgbe_set_flags(struct net_device *netdev, u32 data)
+static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- bool need_reset = false;
- int rc;
+ union ixgbe_atr_input *mask = &adapter->fdir_mask;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule = NULL;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
-#ifdef CONFIG_IXGBE_DCB
- if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- !(data & ETH_FLAG_RXVLAN))
+ if (!rule || fsp->location != rule->sw_idx)
return -EINVAL;
-#endif
- need_reset = (data & ETH_FLAG_RXVLAN) !=
- (netdev->features & NETIF_F_HW_VLAN_RX);
+ /* fill out the flow spec entry */
- if ((data & ETH_FLAG_RXHASH) &&
- !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
- return -EOPNOTSUPP;
-
- rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
- ETH_FLAG_RXHASH);
- if (rc)
- return rc;
-
- /* if state changes we need to update adapter->flags and reset */
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
- (!!(data & ETH_FLAG_LRO) !=
- !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
- if ((data & ETH_FLAG_LRO) &&
- (!adapter->rx_itr_setting ||
- (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
- e_info(probe, "rx-usecs set too low, "
- "not enabling RSC.\n");
- } else {
- adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- need_reset = true;
- break;
- case ixgbe_mac_X540: {
- int i;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- struct ixgbe_ring *ring =
- adapter->rx_ring[i];
- if (adapter->flags2 &
- IXGBE_FLAG2_RSC_ENABLED) {
- ixgbe_configure_rscctl(adapter,
- ring);
- } else {
- ixgbe_clear_rscctl(adapter,
- ring);
- }
- }
- }
- break;
- default:
- break;
- }
- }
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case IXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case IXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
}
- /*
- * Check if Flow Director n-tuple support was enabled or disabled. If
- * the state changed, we need to reset.
- */
- if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
- (!(data & ETH_FLAG_NTUPLE))) {
- /* turn off Flow Director perfect, set hash and reset */
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
- need_reset = true;
- } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
- (data & ETH_FLAG_NTUPLE)) {
- /* turn off Flow Director hash, enable perfect and reset */
- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
- need_reset = true;
- } else {
- /* no state change */
- }
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
+ fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == IXGBE_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
- if (need_reset) {
- if (netif_running(netdev))
- ixgbe_reinit_locked(adapter);
- else
- ixgbe_reset(adapter);
+ return 0;
+}
+
+static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *rule;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << adapter->fdir_pballoc) - 2;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
}
return 0;
}
-static int ixgbe_set_rx_ntuple(struct net_device *dev,
- struct ethtool_rx_ntuple *cmd)
+static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ void *rule_locs)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
- union ixgbe_atr_input input_struct;
- struct ixgbe_atr_input_masks input_masks;
- int target_queue;
- int err;
+ int ret = -EOPNOTSUPP;
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- return -EOPNOTSUPP;
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd,
+ (u32 *)rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ixgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2, *parent;
+ struct ixgbe_fdir_filter *rule;
+ int err = -EINVAL;
+
+ parent = NULL;
+ rule = NULL;
+
+ hlist_for_each_entry_safe(rule, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && (rule->sw_idx == sw_idx)) {
+ if (!input || (rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = ixgbe_fdir_erase_perfect_filter_82599(hw,
+ &rule->filter,
+ sw_idx);
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ adapter->fdir_filter_count--;
+ }
/*
- * Don't allow programming if the action is a queue greater than
- * the number of online Tx queues.
+ * If no input this was a delete, err should be 0 if a rule was
+ * successfully found and removed from the list else -EINVAL
*/
- if ((fs->action >= adapter->num_tx_queues) ||
- (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
- return -EINVAL;
+ if (!input)
+ return err;
- memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
- memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
- /* record flow type */
- switch (fs->flow_type) {
- case IPV4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
- break;
+ /* add filter to the list */
+ if (parent)
+ hlist_add_after(parent, &input->fdir_node);
+ else
+ hlist_add_head(&input->fdir_node,
+ &adapter->fdir_filter_list);
+
+ /* update counts */
+ adapter->fdir_filter_count++;
+
+ return 0;
+}
+
+static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
break;
case UDP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
break;
case SCTP_V4_FLOW:
- input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
break;
- default:
- return -1;
- }
-
- /* copy vlan tag minus the CFI bit */
- if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
- input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
- if (!fs->vlan_tag_mask) {
- input_masks.vlan_id_mask = htons(0xEFFF);
- } else {
- switch (~fs->vlan_tag_mask & 0xEFFF) {
- /* all of these are valid vlan-mask values */
- case 0xEFFF:
- case 0xE000:
- case 0x0FFF:
- case 0x0000:
- input_masks.vlan_id_mask =
- htons(~fs->vlan_tag_mask);
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
break;
- /* exit with error if vlan-mask is invalid */
- default:
- e_err(drv, "Partial VLAN ID or "
- "priority mask in vlan-mask is not "
- "supported by hardware\n");
- return -1;
}
+ default:
+ return 0;
}
+ break;
+ default:
+ return 0;
}
- /* make sure we only use the first 2 bytes of user data */
- if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
- input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
- if (!(fs->data_mask & 0xFFFF)) {
- input_masks.flex_mask = 0xFFFF;
- } else if (~fs->data_mask & 0xFFFF) {
- e_err(drv, "Partial user-def-mask is not "
- "supported by hardware\n");
- return -1;
- }
- }
+ return 1;
+}
+
+static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input mask;
+ int err;
+
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ return -EOPNOTSUPP;
/*
- * Copy input into formatted structures
- *
- * These assignments are based on the following logic
- * If neither input or mask are set assume value is masked out.
- * If input is set, but mask is not mask should default to accept all.
- * If input is not set, but mask is set then mask likely results in 0.
- * If input is set and mask is set then assign both.
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Rx queues.
*/
- if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
- input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
- if (!fs->m_u.tcp_ip4_spec.ip4src)
- input_masks.src_ip_mask[0] = 0xFFFFFFFF;
- else
- input_masks.src_ip_mask[0] =
- ~fs->m_u.tcp_ip4_spec.ip4src;
- }
- if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
- input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
- if (!fs->m_u.tcp_ip4_spec.ip4dst)
- input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
- else
- input_masks.dst_ip_mask[0] =
- ~fs->m_u.tcp_ip4_spec.ip4dst;
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= adapter->num_rx_queues))
+ return -EINVAL;
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
+ e_err(drv, "Location out of range\n");
+ return -EINVAL;
}
- if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
- input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
- if (!fs->m_u.tcp_ip4_spec.psrc)
- input_masks.src_port_mask = 0xFFFF;
- else
- input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union ixgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (!ixgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ e_err(drv, "Unrecognized flow type\n");
+ goto err_out;
}
- if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
- input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
- if (!fs->m_u.tcp_ip4_spec.pdst)
- input_masks.dst_port_mask = 0xFFFF;
- else
- input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
+
+ mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
+ mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
}
/* determine if we need to drop or route the packet */
- if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
- target_queue = MAX_RX_QUEUES - 1;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = IXGBE_FDIR_DROP_QUEUE;
else
- target_queue = fs->action;
+ input->action = fsp->ring_cookie;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (hlist_empty(&adapter->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ if (err) {
+ e_err(drv, "Error writing mask\n");
+ goto err_out_w_lock;
+ }
+ } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ e_err(drv, "Only one mask supported per port\n");
+ goto err_out_w_lock;
+ }
+
+ /* apply mask and compute/store hash */
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+
+ /* program filters to filter memory */
+ err = ixgbe_fdir_write_perfect_filter_82599(hw,
+ &input->filter, input->sw_idx,
+ (input->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[input->action]->reg_idx);
+ if (err)
+ goto err_out_w_lock;
+
+ ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return err;
+err_out_w_lock:
+ spin_unlock(&adapter->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return -EINVAL;
+}
+
+static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err;
spin_lock(&adapter->fdir_perfect_lock);
- err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
- &input_struct,
- &input_masks, 0,
- target_queue);
+ err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
spin_unlock(&adapter->fdir_perfect_lock);
- return err ? -1 : 0;
+ return err;
+}
+
+static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2486,16 +2568,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_ringparam = ixgbe_set_ringparam,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
- .get_rx_csum = ixgbe_get_rx_csum,
- .set_rx_csum = ixgbe_set_rx_csum,
- .get_tx_csum = ixgbe_get_tx_csum,
- .set_tx_csum = ixgbe_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
.get_msglevel = ixgbe_get_msglevel,
.set_msglevel = ixgbe_set_msglevel,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ixgbe_set_tso,
.self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
.set_phys_id = ixgbe_set_phys_id,
@@ -2503,9 +2577,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
.set_coalesce = ixgbe_set_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ixgbe_set_flags,
- .set_rx_ntuple = ixgbe_set_rx_ntuple,
+ .get_rxnfc = ixgbe_get_rxnfc,
+ .set_rxnfc = ixgbe_set_rxnfc,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05920726e824..824edae77865 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -26,9 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
-#ifdef CONFIG_IXGBE_DCB
-#include "ixgbe_dcb_82599.h"
-#endif /* CONFIG_IXGBE_DCB */
#include <linux/if_ether.h>
#include <linux/gfp.h>
#include <linux/if_vlan.h>
@@ -40,25 +37,6 @@
#include <scsi/libfcoe.h>
/**
- * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
- * @rx_desc: advanced rx descriptor
- *
- * Returns : true if it is FCoE pkt
- */
-static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
-{
- u16 p;
-
- p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info);
- if (p & IXGBE_RXDADV_PKTTYPE_ETQF) {
- p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK;
- p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT;
- return p == IXGBE_ETQF_FILTER_FCOE;
- }
- return false;
-}
-
-/**
* ixgbe_fcoe_clear_ddp - clear the given ddp context
* @ddp - ptr to the ixgbe_fcoe_ddp
*
@@ -128,14 +106,17 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
if (ddp->sgl)
pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
DMA_FROM_DEVICE);
- pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ if (ddp->pool) {
+ pci_pool_free(ddp->pool, ddp->udl, ddp->udp);
+ ddp->pool = NULL;
+ }
+
ixgbe_fcoe_clear_ddp(ddp);
out_ddp_put:
return len;
}
-
/**
* ixgbe_fcoe_ddp_setup - called to set up ddp context
* @netdev: the corresponding net_device
@@ -163,6 +144,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
unsigned int thislen = 0;
u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
dma_addr_t addr = 0;
+ struct pci_pool *pool;
if (!netdev || !sgl)
return 0;
@@ -199,12 +181,14 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 0;
}
- /* alloc the udl from our ddp pool */
- ddp->udl = pci_pool_alloc(fcoe->pool, GFP_ATOMIC, &ddp->udp);
+ /* alloc the udl from per cpu ddp pool */
+ pool = *per_cpu_ptr(fcoe->pool, get_cpu());
+ ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp);
if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n");
goto out_noddp_unmap;
}
+ ddp->pool = pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
@@ -268,6 +252,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
j++;
lastsize = 1;
}
+ put_cpu();
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
@@ -311,11 +296,12 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
return 1;
out_noddp_free:
- pci_pool_free(fcoe->pool, ddp->udl, ddp->udp);
+ pci_pool_free(pool, ddp->udl, ddp->udp);
ixgbe_fcoe_clear_ddp(ddp);
out_noddp_unmap:
pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE);
+ put_cpu();
return 0;
}
@@ -374,23 +360,20 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
*/
int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 staterr)
{
u16 xid;
u32 fctl;
- u32 sterr, fceofe, fcerr, fcstat;
+ u32 fceofe, fcerr, fcstat;
int rc = -EINVAL;
struct ixgbe_fcoe *fcoe;
struct ixgbe_fcoe_ddp *ddp;
struct fc_frame_header *fh;
struct fcoe_crc_eof *crc;
- if (!ixgbe_rx_is_fcoe(rx_desc))
- goto ddp_out;
-
- sterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR);
- fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE);
+ fcerr = (staterr & IXGBE_RXDADV_ERR_FCERR);
+ fceofe = (staterr & IXGBE_RXDADV_ERR_FCEOFE);
if (fcerr == IXGBE_FCERR_BADCRC)
skb_checksum_none_assert(skb);
else
@@ -419,7 +402,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcerr | fceofe)
goto ddp_out;
- fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
+ fcstat = (staterr & IXGBE_RXDADV_STAT_FCSTAT);
if (fcstat) {
/* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
@@ -465,24 +448,18 @@ ddp_out:
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/
-int ixgbe_fso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
u32 tx_flags, u8 *hdr_len)
{
- u8 sof, eof;
+ struct fc_frame_header *fh;
u32 vlan_macip_lens;
- u32 fcoe_sof_eof;
- u32 type_tucmd;
+ u32 fcoe_sof_eof = 0;
u32 mss_l4len_idx;
- int mss = 0;
- unsigned int i;
- struct ixgbe_tx_buffer *tx_buffer_info;
- struct ixgbe_adv_tx_context_desc *context_desc;
- struct fc_frame_header *fh;
+ u8 sof, eof;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
- e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
- skb_shinfo(skb)->gso_type);
+ dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
+ skb_shinfo(skb)->gso_type);
return -EINVAL;
}
@@ -492,23 +469,22 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
- fcoe_sof_eof = 0;
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
switch (sof) {
case FC_SOF_I2:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_I3:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
+ IXGBE_ADVTXD_FCOEF_ORIS;
break;
case FC_SOF_N2:
break;
case FC_SOF_N3:
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF;
+ fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
break;
default:
- e_warn(drv, "unknown sof = 0x%x\n", sof);
+ dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
return -EINVAL;
}
@@ -521,12 +497,11 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
break;
case FC_EOF_T:
/* lso needs ORIE */
- if (skb_is_gso(skb)) {
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
- fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE;
- } else {
+ if (skb_is_gso(skb))
+ fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
+ IXGBE_ADVTXD_FCOEF_ORIE;
+ else
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
- }
break;
case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
@@ -535,7 +510,7 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
break;
default:
- e_warn(drv, "unknown eof = 0x%x\n", eof);
+ dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
return -EINVAL;
}
@@ -544,47 +519,72 @@ int ixgbe_fso(struct ixgbe_adapter *adapter,
if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
- /* hdr_len includes fc_hdr if FCoE lso is enabled */
+ /* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof);
+
+ /* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb))
*hdr_len += (skb_transport_offset(skb) +
sizeof(struct fc_frame_header));
- /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = (skb_transport_offset(skb) +
- sizeof(struct fc_frame_header));
- vlan_macip_lens |= ((skb_transport_offset(skb) - 4)
- << IXGBE_ADVTXD_MACLEN_SHIFT);
- vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
-
- /* type_tycmd and mss: set TUCMD.FCoE to enable offload */
- type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT |
- IXGBE_ADVTXT_TUCMD_FCOE;
- if (skb_is_gso(skb))
- mss = skb_shinfo(skb)->gso_size;
+
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
- mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) |
- (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_transport_offset(skb) +
+ sizeof(struct fc_frame_header);
+ vlan_macip_lens |= (skb_transport_offset(skb) - 4)
+ << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
- i = tx_ring->next_to_use;
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
-
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
+ IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
return skb_is_gso(skb);
}
+static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
+{
+ unsigned int cpu;
+ struct pci_pool **pool;
+
+ for_each_possible_cpu(cpu) {
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ if (*pool)
+ pci_pool_destroy(*pool);
+ }
+ free_percpu(fcoe->pool);
+ fcoe->pool = NULL;
+}
+
+static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+ unsigned int cpu;
+ struct pci_pool **pool;
+ char pool_name[32];
+
+ fcoe->pool = alloc_percpu(struct pci_pool *);
+ if (!fcoe->pool)
+ return;
+
+ /* allocate pci pool for each cpu */
+ for_each_possible_cpu(cpu) {
+ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu);
+ pool = per_cpu_ptr(fcoe->pool, cpu);
+ *pool = pci_pool_create(pool_name,
+ adapter->pdev, IXGBE_FCPTR_MAX,
+ IXGBE_FCPTR_ALIGN, PAGE_SIZE);
+ if (!*pool) {
+ e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
+ return;
+ }
+ }
+}
+
/**
* ixgbe_configure_fcoe - configures registers for fcoe at start
* @adapter: ptr to ixgbe adapter
@@ -599,27 +599,21 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
-#ifdef CONFIG_IXGBE_DCB
- u8 tc;
- u32 up2tc;
-#endif
- /* create the pool for ddp if not created yet */
if (!fcoe->pool) {
- /* allocate ddp pool */
- fcoe->pool = pci_pool_create("ixgbe_fcoe_ddp",
- adapter->pdev, IXGBE_FCPTR_MAX,
- IXGBE_FCPTR_ALIGN, PAGE_SIZE);
- if (!fcoe->pool)
- e_err(drv, "failed to allocated FCoE DDP pool\n");
-
spin_lock_init(&fcoe->lock);
+ ixgbe_fcoe_ddp_pools_alloc(adapter);
+ if (!fcoe->pool) {
+ e_err(drv, "failed to alloc percpu fcoe DDP pools\n");
+ return;
+ }
+
/* Extra buffer to be shared by all DDPs for HW work around */
fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
if (fcoe->extra_ddp_buffer == NULL) {
e_err(drv, "failed to allocated extra DDP buffer\n");
- goto out_extra_ddp_buffer_alloc;
+ goto out_ddp_pools;
}
fcoe->extra_ddp_buffer_dma =
@@ -630,7 +624,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
if (dma_mapping_error(&adapter->pdev->dev,
fcoe->extra_ddp_buffer_dma)) {
e_err(drv, "failed to map extra DDP buffer\n");
- goto out_extra_ddp_buffer_dma;
+ goto out_extra_ddp_buffer;
}
}
@@ -670,25 +664,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_FCRXCTRL_FCOELLI |
IXGBE_FCRXCTRL_FCCRCBO |
(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
-#ifdef CONFIG_IXGBE_DCB
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT));
- tc &= (MAX_TRAFFIC_CLASS - 1);
- if (fcoe->tc == tc) {
- fcoe->up = i;
- break;
- }
- }
-#endif
-
return;
-out_extra_ddp_buffer_dma:
+out_extra_ddp_buffer:
kfree(fcoe->extra_ddp_buffer);
-out_extra_ddp_buffer_alloc:
- pci_pool_destroy(fcoe->pool);
- fcoe->pool = NULL;
+out_ddp_pools:
+ ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
@@ -704,18 +685,17 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
int i;
struct ixgbe_fcoe *fcoe = &adapter->fcoe;
- /* release ddp resource */
- if (fcoe->pool) {
- for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
- ixgbe_fcoe_ddp_put(adapter->netdev, i);
- dma_unmap_single(&adapter->pdev->dev,
- fcoe->extra_ddp_buffer_dma,
- IXGBE_FCBUFF_MIN,
- DMA_FROM_DEVICE);
- kfree(fcoe->extra_ddp_buffer);
- pci_pool_destroy(fcoe->pool);
- fcoe->pool = NULL;
- }
+ if (!fcoe->pool)
+ return;
+
+ for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
+ ixgbe_fcoe_ddp_put(adapter->netdev, i);
+ dma_unmap_single(&adapter->pdev->dev,
+ fcoe->extra_ddp_buffer_dma,
+ IXGBE_FCBUFF_MIN,
+ DMA_FROM_DEVICE);
+ kfree(fcoe->extra_ddp_buffer);
+ ixgbe_fcoe_ddp_pools_free(fcoe);
}
/**
@@ -811,41 +791,6 @@ out_disable:
return rc;
}
-#ifdef CONFIG_IXGBE_DCB
-/**
- * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE
- * @adapter : ixgbe adapter
- * @up : 802.1p user priority bitmap
- *
- * Finds out the traffic class from the input user priority
- * bitmap for FCoE.
- *
- * Returns : 0 on success otherwise returns 1 on error
- */
-u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
-{
- int i;
- u32 up2tc;
-
- /* valid user priority bitmap must not be 0 */
- if (up) {
- /* from user priority to the corresponding traffic class */
- up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC);
- for (i = 0; i < MAX_USER_PRIORITY; i++) {
- if (up & (1 << i)) {
- up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT);
- up2tc &= (MAX_TRAFFIC_CLASS - 1);
- adapter->fcoe.tc = (u8)up2tc;
- adapter->fcoe.up = i;
- return 0;
- }
- }
- }
-
- return 1;
-}
-#endif /* CONFIG_IXGBE_DCB */
-
/**
* ixgbe_fcoe_get_wwn - get world wide name for the node or the port
* @netdev : ixgbe adapter
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 5a650a4ace66..99de145e290d 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -62,20 +62,20 @@ struct ixgbe_fcoe_ddp {
struct scatterlist *sgl;
dma_addr_t udp;
u64 *udl;
+ struct pci_pool *pool;
};
struct ixgbe_fcoe {
-#ifdef CONFIG_IXGBE_DCB
- u8 tc;
- u8 up;
-#endif
- unsigned long mode;
+ struct pci_pool **pool;
atomic_t refcnt;
spinlock_t lock;
- struct pci_pool *pool;
struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
unsigned char *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
+ unsigned long mode;
+#ifdef CONFIG_IXGBE_DCB
+ u8 up;
+#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 08e8e25c159d..1be617545dc9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -32,8 +32,10 @@
#include <linux/vmalloc.h>
#include <linux/string.h>
#include <linux/in.h>
+#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/tcp.h>
+#include <linux/sctp.h>
#include <linux/pkt_sched.h>
#include <linux/ipv6.h>
#include <linux/slab.h>
@@ -53,11 +55,10 @@ char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
#define MAJ 3
-#define MIN 3
+#define MIN 4
#define BUILD 8
-#define KFIX 2
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
- __stringify(BUILD) "-k" __stringify(KFIX)
+ __stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2011 Intel Corporation.";
@@ -664,62 +665,6 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
/* tx_buffer_info must be completely set up in the transmit path */
}
-/**
- * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
- * @adapter: driver private struct
- * @index: reg idx of queue to query (0-127)
- *
- * Helper function to determine the traffic index for a particular
- * register index.
- *
- * Returns : a tc index for use in range 0-7, or 0-3
- */
-static u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
-{
- int tc = -1;
- int dcb_i = netdev_get_num_tc(adapter->netdev);
-
- /* if DCB is not enabled the queues have no TC */
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return tc;
-
- /* check valid range */
- if (reg_idx >= adapter->hw.mac.max_tx_queues)
- return tc;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82598EB:
- tc = reg_idx >> 2;
- break;
- default:
- if (dcb_i != 4 && dcb_i != 8)
- break;
-
- /* if VMDq is enabled the lowest order bits determine TC */
- if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
- IXGBE_FLAG_VMDQ_ENABLED)) {
- tc = reg_idx & (dcb_i - 1);
- break;
- }
-
- /*
- * Convert the reg_idx into the correct TC. This bitmask
- * targets the last full 32 ring traffic class and assigns
- * it a value of 1. From there the rest of the rings are
- * based on shifting the mask further up to include the
- * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
- * will only ever be 8 or 4 and that reg_idx will never
- * be greater then 128. The code without the power of 2
- * optimizations would be:
- * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
- */
- tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
- tc >>= 9 - (reg_idx >> 5);
- }
-
- return tc;
-}
-
static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -765,7 +710,7 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
/* disarm tx queues that have received xoff frames */
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
- u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+ u8 tc = tx_ring->dcb_tc;
if (xoff[tc])
clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
@@ -827,15 +772,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
return ret;
}
-#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
-
-/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
- (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
- MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-
/**
* ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
* @adapter: driver private struct
@@ -869,7 +805,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
- (count < tx_ring->work_limit)) {
+ (count < q_vector->tx.work_limit)) {
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
@@ -898,11 +834,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
tx_ring->next_to_clean = i;
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
tx_ring->stats.bytes += total_bytes;
+ tx_ring->stats.packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ q_vector->tx.total_bytes += total_bytes;
+ q_vector->tx.total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp);
if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
@@ -938,7 +874,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
- (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/
@@ -950,7 +886,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
}
}
- return count < tx_ring->work_limit;
+ return count < q_vector->tx.work_limit;
}
#ifdef CONFIG_IXGBE_DCA
@@ -1023,17 +959,17 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
if (q_vector->cpu == cpu)
goto out_no_update;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
@@ -1103,6 +1039,24 @@ static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
}
/**
+ * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type
+ * @adapter: address of board private structure
+ * @rx_desc: advanced rx descriptor
+ *
+ * Returns : true if it is FCoE pkt
+ */
+static inline bool ixgbe_rx_is_fcoe(struct ixgbe_adapter *adapter,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+
+ return (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
+ ((pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_ETQF_MASK)) ==
+ (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE <<
+ IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT)));
+}
+
+/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure
* @skb: packet to send up
@@ -1134,14 +1088,14 @@ static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector,
* @adapter: address of board private structure
* @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified
+ * @status_err: status error value of last descriptor in packet
**/
static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u32 status_err)
{
- u32 status_err = le32_to_cpu(rx_desc->wb.upper.status_error);
-
- skb_checksum_none_assert(skb);
+ skb->ip_summed = CHECKSUM_NONE;
/* Rx csum disabled */
if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
@@ -1485,14 +1439,12 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
/* ERR_MASK will only have valid bits if EOP set */
- if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- /* trim packet back to size 0 and recycle it */
- __pskb_trim(skb, 0);
- rx_buffer_info->skb = skb;
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_any(skb);
goto next_desc;
}
- ixgbe_rx_checksum(adapter, rx_desc, skb);
+ ixgbe_rx_checksum(adapter, rx_desc, skb, staterr);
if (adapter->netdev->features & NETIF_F_RXHASH)
ixgbe_rx_hash(rx_desc, skb);
@@ -1503,8 +1455,9 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
#ifdef IXGBE_FCOE
/* if ddp, not passing to ULD unless for FCP_RSP or error */
- if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
- ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb);
+ if (ixgbe_rx_is_fcoe(adapter, rx_desc)) {
+ ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb,
+ staterr);
if (!ddp_bytes)
goto next_desc;
}
@@ -1530,7 +1483,7 @@ next_desc:
}
rx_ring->next_to_clean = i;
- cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+ cleaned_count = ixgbe_desc_unused(rx_ring);
if (cleaned_count)
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1550,12 +1503,12 @@ next_desc:
}
#endif /* IXGBE_FCOE */
- rx_ring->total_packets += total_rx_packets;
- rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
rx_ring->stats.bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp);
+ q_vector->rx.total_packets += total_rx_packets;
+ q_vector->rx.total_bytes += total_rx_bytes;
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1581,38 +1534,37 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
/* XXX for_each_set_bit(...) */
- r_idx = find_first_bit(q_vector->rxr_idx,
+ r_idx = find_first_bit(q_vector->rx.idx,
adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rx.count; i++) {
u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->rxr_idx,
+ r_idx = find_next_bit(q_vector->rx.idx,
adapter->num_rx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->txr_idx,
+ r_idx = find_first_bit(q_vector->tx.idx,
adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ for (i = 0; i < q_vector->tx.count; i++) {
u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
- r_idx = find_next_bit(q_vector->txr_idx,
+ r_idx = find_next_bit(q_vector->tx.idx,
adapter->num_tx_queues,
r_idx + 1);
}
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */
q_vector->eitr = adapter->tx_eitr_param;
- else if (q_vector->rxr_count)
+ else if (q_vector->rx.count)
/* rx or mixed */
q_vector->eitr = adapter->rx_eitr_param;
ixgbe_write_eitr(q_vector);
- /* If Flow Director is enabled, set interrupt affinity */
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* If ATR is enabled, set interrupt affinity */
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/*
* Allocate the affinity_hint cpumask, assign the mask
* for this vector, and set our affinity_hint for
@@ -1662,11 +1614,8 @@ enum latency_range {
/**
* ixgbe_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
- * @eitr: eitr setting (ints per sec) to give last timeslice
- * @itr_setting: current throttle rate in ints/second
- * @packets: the number of packets during this measurement interval
- * @bytes: the number of bytes during this measurement interval
+ * @q_vector: structure containing interrupt and ring information
+ * @ring_container: structure containing ring performance data
*
* Stores a new ITR value based on packets and byte
* counts during the last interrupt. The advantage of per interrupt
@@ -1678,17 +1627,18 @@ enum latency_range {
* this functionality is controlled by the InterruptThrottleRate module
* parameter (see ixgbe_param.c)
**/
-static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
- u32 eitr, u8 itr_setting,
- int packets, int bytes)
+static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector,
+ struct ixgbe_ring_container *ring_container)
{
- unsigned int retval = itr_setting;
- u32 timepassed_us;
u64 bytes_perint;
+ struct ixgbe_adapter *adapter = q_vector->adapter;
+ int bytes = ring_container->total_bytes;
+ int packets = ring_container->total_packets;
+ u32 timepassed_us;
+ u8 itr_setting = ring_container->itr;
if (packets == 0)
- goto update_itr_done;
-
+ return;
/* simple throttlerate management
* 0-20MB/s lowest (100000 ints/s)
@@ -1696,28 +1646,32 @@ static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
* 100-1249MB/s bulk (8000 ints/s)
*/
/* what was last interrupt timeslice? */
- timepassed_us = 1000000/eitr;
+ timepassed_us = 1000000/q_vector->eitr;
bytes_perint = bytes / timepassed_us; /* bytes/usec */
switch (itr_setting) {
case lowest_latency:
if (bytes_perint > adapter->eitr_low)
- retval = low_latency;
+ itr_setting = low_latency;
break;
case low_latency:
if (bytes_perint > adapter->eitr_high)
- retval = bulk_latency;
+ itr_setting = bulk_latency;
else if (bytes_perint <= adapter->eitr_low)
- retval = lowest_latency;
+ itr_setting = lowest_latency;
break;
case bulk_latency:
if (bytes_perint <= adapter->eitr_high)
- retval = low_latency;
+ itr_setting = low_latency;
break;
}
-update_itr_done:
- return retval;
+ /* clear work counters since we have the values we need */
+ ring_container->total_bytes = 0;
+ ring_container->total_packets = 0;
+
+ /* write updated itr to ring container */
+ ring_container->itr = itr_setting;
}
/**
@@ -1763,44 +1717,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
}
-static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
+static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector)
{
- struct ixgbe_adapter *adapter = q_vector->adapter;
- int i, r_idx;
- u32 new_itr;
- u8 current_itr, ret_itr;
-
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
- struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
- ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
- q_vector->tx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
- r_idx + 1);
- }
+ u32 new_itr = q_vector->eitr;
+ u8 current_itr;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
- struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
- ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
- /* if the result for this queue would decrease interrupt
- * rate for this vector then use that result */
- q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
- q_vector->rx_itr - 1 : ret_itr);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
- r_idx + 1);
- }
+ ixgbe_update_itr(q_vector, &q_vector->tx);
+ ixgbe_update_itr(q_vector, &q_vector->rx);
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+ current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
switch (current_itr) {
/* counts and packets in update_itr are dependent on these numbers */
@@ -1811,16 +1736,17 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
new_itr = 20000; /* aka hwitr = ~200 */
break;
case bulk_latency:
- default:
new_itr = 8000;
break;
+ default:
+ break;
}
if (new_itr != q_vector->eitr) {
/* do an exponential smoothing */
new_itr = ((q_vector->eitr * 9) + new_itr)/10;
- /* save the algorithm value here, not the smoothed one */
+ /* save the algorithm value here */
q_vector->eitr = new_itr;
ixgbe_write_eitr(q_vector);
@@ -1937,8 +1863,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr;
@@ -2061,15 +1986,13 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
struct ixgbe_ring *tx_ring;
int i, r_idx;
- if (!q_vector->txr_count)
+ if (!q_vector->tx.count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
tx_ring = adapter->tx_ring[r_idx];
- tx_ring->total_bytes = 0;
- tx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
@@ -2097,16 +2020,14 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
rx_ring = adapter->rx_ring[r_idx];
- rx_ring->total_bytes = 0;
- rx_ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
- if (!q_vector->rxr_count)
+ if (!q_vector->rx.count)
return IRQ_HANDLED;
/* EIAM disabled interrupts (on this vector) for us */
@@ -2123,24 +2044,20 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
int r_idx;
int i;
- if (!q_vector->txr_count && !q_vector->rxr_count)
+ if (!q_vector->tx.count && !q_vector->rx.count)
return IRQ_HANDLED;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx];
- ring->total_bytes = 0;
- ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx];
- ring->total_bytes = 0;
- ring->total_packets = 0;
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
@@ -2172,7 +2089,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
rx_ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
@@ -2181,7 +2098,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2213,33 +2130,33 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- for (i = 0; i < q_vector->txr_count; i++) {
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->tx.count; i++) {
ring = adapter->tx_ring[r_idx];
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
- r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx = find_next_bit(q_vector->tx.idx, adapter->num_tx_queues,
r_idx + 1);
}
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
- budget /= (q_vector->rxr_count ?: 1);
+ budget /= (q_vector->rx.count ?: 1);
budget = max(budget, 1);
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rx.count; i++) {
ring = adapter->rx_ring[r_idx];
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
- r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx = find_next_bit(q_vector->rx.idx, adapter->num_rx_queues,
r_idx + 1);
}
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ r_idx = find_first_bit(q_vector->rx.idx, adapter->num_rx_queues);
ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2271,7 +2188,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ r_idx = find_first_bit(q_vector->tx.idx, adapter->num_tx_queues);
tx_ring = adapter->tx_ring[r_idx];
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
@@ -2281,7 +2198,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->tx_itr_setting & 1)
- ixgbe_set_itr_msix(q_vector);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter,
((u64)1 << q_vector->v_idx));
@@ -2296,8 +2213,8 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
- set_bit(r_idx, q_vector->rxr_idx);
- q_vector->rxr_count++;
+ set_bit(r_idx, q_vector->rx.idx);
+ q_vector->rx.count++;
rx_ring->q_vector = q_vector;
}
@@ -2307,9 +2224,10 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
- set_bit(t_idx, q_vector->txr_idx);
- q_vector->txr_count++;
+ set_bit(t_idx, q_vector->tx.idx);
+ q_vector->tx.count++;
tx_ring->q_vector = q_vector;
+ q_vector->tx.work_limit = a->tx_work_limit;
}
/**
@@ -2398,10 +2316,10 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
if (err)
return err;
-#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+#define SET_HANDLER(_v) (((_v)->rx.count && (_v)->tx.count) \
? &ixgbe_msix_clean_many : \
- (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
- (_v)->txr_count ? &ixgbe_msix_clean_tx : \
+ (_v)->rx.count ? &ixgbe_msix_clean_rx : \
+ (_v)->tx.count ? &ixgbe_msix_clean_tx : \
NULL)
for (vector = 0; vector < q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
@@ -2433,7 +2351,7 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector,
- ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
+ ixgbe_msix_lsc, 0, adapter->lsc_int_name, adapter);
if (err) {
e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
goto free_queue_irqs;
@@ -2452,51 +2370,6 @@ free_queue_irqs:
return err;
}
-static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
- struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
- u32 new_itr = q_vector->eitr;
- u8 current_itr;
-
- q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->tx_itr,
- tx_ring->total_packets,
- tx_ring->total_bytes);
- q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr,
- q_vector->rx_itr,
- rx_ring->total_packets,
- rx_ring->total_bytes);
-
- current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
-
- switch (current_itr) {
- /* counts and packets in update_itr are dependent on these numbers */
- case lowest_latency:
- new_itr = 100000;
- break;
- case low_latency:
- new_itr = 20000; /* aka hwitr = ~200 */
- break;
- case bulk_latency:
- new_itr = 8000;
- break;
- default:
- break;
- }
-
- if (new_itr != q_vector->eitr) {
- /* do an exponential smoothing */
- new_itr = ((q_vector->eitr * 9) + new_itr)/10;
-
- /* save the algorithm value here */
- q_vector->eitr = new_itr;
-
- ixgbe_write_eitr(q_vector);
- }
-}
-
/**
* ixgbe_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
@@ -2523,8 +2396,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
default:
break;
}
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
mask |= IXGBE_EIMS_FLOW_DIR;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
@@ -2546,8 +2418,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
**/
static irqreturn_t ixgbe_intr(int irq, void *data)
{
- struct net_device *netdev = data;
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_adapter *adapter = data;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u32 eicr;
@@ -2596,10 +2467,6 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0]->total_packets = 0;
- adapter->tx_ring[0]->total_bytes = 0;
- adapter->rx_ring[0]->total_packets = 0;
- adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -2621,10 +2488,10 @@ static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
for (i = 0; i < q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
- bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
- bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
- q_vector->rxr_count = 0;
- q_vector->txr_count = 0;
+ bitmap_zero(q_vector->rx.idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->tx.idx, MAX_TX_QUEUES);
+ q_vector->rx.count = 0;
+ q_vector->tx.count = 0;
}
}
@@ -2644,10 +2511,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
err = ixgbe_request_msix_irqs(adapter);
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
err = request_irq(adapter->pdev->irq, ixgbe_intr, 0,
- netdev->name, netdev);
+ netdev->name, adapter);
} else {
err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED,
- netdev->name, netdev);
+ netdev->name, adapter);
}
if (err)
@@ -2658,21 +2525,19 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{
- struct net_device *netdev = adapter->netdev;
-
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i, q_vectors;
q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1;
- free_irq(adapter->msix_entries[i].vector, netdev);
+ free_irq(adapter->msix_entries[i].vector, adapter);
i--;
for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
- if (!adapter->q_vector[i]->rxr_count &&
- !adapter->q_vector[i]->txr_count)
+ if (!adapter->q_vector[i]->rx.count &&
+ !adapter->q_vector[i]->tx.count)
continue;
free_irq(adapter->msix_entries[i].vector,
@@ -2681,7 +2546,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
ixgbe_reset_q_vectors(adapter);
} else {
- free_irq(adapter->pdev->irq, netdev);
+ free_irq(adapter->pdev->irq, adapter);
}
}
@@ -2814,7 +2679,8 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 rttdcs;
- u32 mask;
+ u32 reg;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
if (hw->mac.type == ixgbe_mac_82598EB)
return;
@@ -2825,22 +2691,27 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
/* set transmit pool layout */
- mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
- switch (adapter->flags & mask) {
-
+ switch (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
case (IXGBE_FLAG_SRIOV_ENABLED):
IXGBE_WRITE_REG(hw, IXGBE_MTQC,
(IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
break;
+ default:
+ if (!tcs)
+ reg = IXGBE_MTQC_64Q_1PB;
+ else if (tcs <= 4)
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
+ else
+ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
- case (IXGBE_FLAG_DCB_ENABLED):
- /* We enable 8 traffic classes, DCB only */
- IXGBE_WRITE_REG(hw, IXGBE_MTQC,
- (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
- break;
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
- default:
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ /* Enable Security TX Buffer IFG for multiple pb */
+ if (tcs) {
+ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+ reg |= IXGBE_SECTX_DCB;
+ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
+ }
break;
}
@@ -2931,7 +2802,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
u32 mrqc = 0, reta = 0;
u32 rxcsum;
int i, j;
- int mask;
+ u8 tcs = netdev_get_num_tc(adapter->netdev);
+ int maxq = adapter->ring_feature[RING_F_RSS].indices;
+
+ if (tcs)
+ maxq = min(maxq, adapter->num_tx_queues / tcs);
/* Fill out hash function seeds */
for (i = 0; i < 10; i++)
@@ -2939,7 +2814,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
/* Fill out redirection table */
for (i = 0, j = 0; i < 128; i++, j++) {
- if (j == adapter->ring_feature[RING_F_RSS].indices)
+ if (j == maxq)
j = 0;
/* reta = 4-byte sliding window of
* 0x00..(indices-1)(indices-1)00..etc. */
@@ -2953,33 +2828,28 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
rxcsum |= IXGBE_RXCSUM_PCSD;
IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
- if (adapter->hw.mac.type == ixgbe_mac_82598EB)
- mask = adapter->flags & IXGBE_FLAG_RSS_ENABLED;
- else
- mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
-#ifdef CONFIG_IXGBE_DCB
- | IXGBE_FLAG_DCB_ENABLED
-#endif
- | IXGBE_FLAG_SRIOV_ENABLED
- );
-
- switch (mask) {
-#ifdef CONFIG_IXGBE_DCB
- case (IXGBE_FLAG_DCB_ENABLED | IXGBE_FLAG_RSS_ENABLED):
- mrqc = IXGBE_MRQC_RTRSS8TCEN;
- break;
- case (IXGBE_FLAG_DCB_ENABLED):
- mrqc = IXGBE_MRQC_RT8TCEN;
- break;
-#endif /* CONFIG_IXGBE_DCB */
- case (IXGBE_FLAG_RSS_ENABLED):
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB &&
+ (adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
mrqc = IXGBE_MRQC_RSSEN;
- break;
- case (IXGBE_FLAG_SRIOV_ENABLED):
- mrqc = IXGBE_MRQC_VMDQEN;
- break;
- default:
- break;
+ } else {
+ int mask = adapter->flags & (IXGBE_FLAG_RSS_ENABLED
+ | IXGBE_FLAG_SRIOV_ENABLED);
+
+ switch (mask) {
+ case (IXGBE_FLAG_RSS_ENABLED):
+ if (!tcs)
+ mrqc = IXGBE_MRQC_RSSEN;
+ else if (tcs <= 4)
+ mrqc = IXGBE_MRQC_RTRSS4TCEN;
+ else
+ mrqc = IXGBE_MRQC_RTRSS8TCEN;
+ break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
+ default:
+ break;
+ }
}
/* Perform hash on these packet types */
@@ -2992,28 +2862,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_clear_rscctl - disable RSC for the indicated ring
- * @adapter: address of board private structure
- * @ring: structure containing ring specific data
- **/
-void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 rscctrl;
- u8 reg_idx = ring->reg_idx;
-
- rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
- rscctrl &= ~IXGBE_RSCCTL_RSCEN;
- IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
-}
-
-/**
* ixgbe_configure_rscctl - enable RSC for the indicated ring
* @adapter: address of board private structure
* @index: index of ring to set
**/
-void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *ring)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -3183,7 +3036,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
+ ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
}
static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -3681,10 +3534,10 @@ static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
q_vector = adapter->q_vector[q_idx];
napi = &q_vector->napi;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- if (!q_vector->rxr_count || !q_vector->txr_count) {
- if (q_vector->txr_count == 1)
+ if (!q_vector->rx.count || !q_vector->tx.count) {
+ if (q_vector->tx.count == 1)
napi->poll = &ixgbe_clean_txonly;
- else if (q_vector->rxr_count == 1)
+ else if (q_vector->rx.count == 1)
napi->poll = &ixgbe_clean_rxonly;
}
}
@@ -3739,7 +3592,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
/* reconfigure the hardware */
- if (adapter->dcbx_cap & (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE)) {
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) {
#ifdef CONFIG_FCOE
if (adapter->netdev->features & NETIF_F_FCOE_MTU)
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
@@ -3779,12 +3632,51 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
}
#endif
+
+static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
+{
+ int hdrm = 0;
+ int num_tc = netdev_get_num_tc(adapter->netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
+ adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
+ hdrm = 64 << adapter->fdir_pballoc;
+
+ hw->mac.ops.set_rxpba(&adapter->hw, num_tc, hdrm, PBA_STRATEGY_EQUAL);
+}
+
+static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ if (!hlist_empty(&adapter->fdir_filter_list))
+ ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ ixgbe_fdir_write_perfect_filter_82599(hw,
+ &filter->filter,
+ filter->sw_idx,
+ (filter->action == IXGBE_FDIR_DROP_QUEUE) ?
+ IXGBE_FDIR_DROP_QUEUE :
+ adapter->rx_ring[filter->action]->reg_idx);
+ }
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
static void ixgbe_configure(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
int i;
+ ixgbe_configure_pb(adapter);
#ifdef CONFIG_IXGBE_DCB
ixgbe_configure_dcb(adapter);
#endif
@@ -3803,7 +3695,9 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
- ixgbe_init_fdir_perfect_82599(hw, adapter->fdir_pballoc);
+ ixgbe_init_fdir_perfect_82599(&adapter->hw,
+ adapter->fdir_pballoc);
+ ixgbe_fdir_filter_restore(adapter);
}
ixgbe_configure_virtualization(adapter);
@@ -4180,6 +4074,23 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
ixgbe_clean_tx_ring(adapter->tx_ring[i]);
}
+static void ixgbe_fdir_filter_exit(struct ixgbe_adapter *adapter)
+{
+ struct hlist_node *node, *node2;
+ struct ixgbe_fdir_filter *filter;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node, node2,
+ &adapter->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ adapter->fdir_filter_count = 0;
+
+ spin_unlock(&adapter->fdir_perfect_lock);
+}
+
void ixgbe_down(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -4306,7 +4217,7 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
- ixgbe_set_itr(adapter);
+ ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, IXGBE_EIMS_RTX_QUEUE);
}
@@ -4369,15 +4280,13 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
f_fdir->mask = 0;
/* Flow Director must have RSS enabled */
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
- ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)))) {
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices;
ret = true;
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
}
return ret;
}
@@ -4400,69 +4309,72 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-#ifdef CONFIG_IXGBE_DCB
- int tc;
- struct net_device *dev = adapter->netdev;
+ f->indices = min((int)num_online_cpus(), f->indices);
- tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
- f->indices = dev->tc_to_txq[tc].count;
- f->mask = dev->tc_to_txq[tc].offset;
-#endif
- } else {
- f->indices = min((int)num_online_cpus(), f->indices);
-
- adapter->num_rx_queues = 1;
- adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- e_info(probe, "FCoE enabled with RSS\n");
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
- ixgbe_set_fdir_queues(adapter);
- else
- ixgbe_set_rss_queues(adapter);
- }
- /* adding FCoE rx rings to the end */
- f->mask = adapter->num_rx_queues;
- adapter->num_rx_queues += f->indices;
- adapter->num_tx_queues += f->indices;
+ if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+ e_info(probe, "FCoE enabled with RSS\n");
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
+ ixgbe_set_fdir_queues(adapter);
+ else
+ ixgbe_set_rss_queues(adapter);
}
+ /* adding FCoE rx rings to the end */
+ f->mask = adapter->num_rx_queues;
+ adapter->num_rx_queues += f->indices;
+ adapter->num_tx_queues += f->indices;
+
return true;
}
#endif /* IXGBE_FCOE */
+/* Artificial max queue cap per traffic class in DCB mode */
+#define DCB_QUEUE_CAP 8
+
#ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
- bool ret = false;
- struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_DCB];
- int i, q;
+ int per_tc_q, q, i, offset = 0;
+ struct net_device *dev = adapter->netdev;
+ int tcs = netdev_get_num_tc(dev);
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
- return ret;
+ if (!tcs)
+ return false;
- f->indices = 0;
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- q = min((int)num_online_cpus(), MAX_TRAFFIC_CLASS);
- f->indices += q;
+ /* Map queue offset and counts onto allocated tx queues */
+ per_tc_q = min(dev->num_tx_queues / tcs, (unsigned int)DCB_QUEUE_CAP);
+ q = min((int)num_online_cpus(), per_tc_q);
+
+ for (i = 0; i < tcs; i++) {
+ netdev_set_prio_tc_map(dev, i, i);
+ netdev_set_tc_queue(dev, i, q, offset);
+ offset += q;
}
- f->mask = 0x7 << 3;
- adapter->num_rx_queues = f->indices;
- adapter->num_tx_queues = f->indices;
- ret = true;
+ adapter->num_tx_queues = q * tcs;
+ adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE
- /* FCoE enabled queues require special configuration done through
- * configure_fcoe() and others. Here we map FCoE indices onto the
- * DCB queue pairs allowing FCoE to own configuration later.
+ /* FCoE enabled queues require special configuration indexed
+ * by feature specific indices and mask. Here we map FCoE
+ * indices onto the DCB queue pairs allowing FCoE to own
+ * configuration later.
*/
- ixgbe_set_fcoe_queues(adapter);
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+ int tc;
+ struct ixgbe_ring_feature *f =
+ &adapter->ring_feature[RING_F_FCOE];
+
+ tc = netdev_get_prio_tc_map(dev, adapter->fcoe.up);
+ f->indices = dev->tc_to_txq[tc].count;
+ f->mask = dev->tc_to_txq[tc].offset;
+ }
#endif
- return ret;
+ return true;
}
#endif
@@ -4616,8 +4528,8 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
- *tx = tc << 3;
- *rx = tc << 2;
+ *tx = tc << 2;
+ *rx = tc << 3;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -4657,55 +4569,6 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
}
}
-#define IXGBE_MAX_Q_PER_TC (IXGBE_MAX_DCB_INDICES / MAX_TRAFFIC_CLASS)
-
-/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
- * classes.
- *
- * @netdev: net device to configure
- * @tc: number of traffic classes to enable
- */
-int ixgbe_setup_tc(struct net_device *dev, u8 tc)
-{
- int i;
- unsigned int q, offset = 0;
-
- if (!tc) {
- netdev_reset_tc(dev);
- } else {
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- /* Hardware supports up to 8 traffic classes */
- if (tc > MAX_TRAFFIC_CLASS || netdev_set_num_tc(dev, tc))
- return -EINVAL;
-
- /* Partition Tx queues evenly amongst traffic classes */
- for (i = 0; i < tc; i++) {
- q = min((int)num_online_cpus(), IXGBE_MAX_Q_PER_TC);
- netdev_set_prio_tc_map(dev, i, i);
- netdev_set_tc_queue(dev, i, q, offset);
- offset += q;
- }
-
- /* This enables multiple traffic class support in the hardware
- * which defaults to strict priority transmission by default.
- * If traffic classes are already enabled perhaps through DCB
- * code path then existing configuration will be used.
- */
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
- dev->dcbnl_ops && dev->dcbnl_ops->setdcbx) {
- struct ieee_ets ets = {
- .prio_tc = {0, 1, 2, 3, 4, 5, 6, 7},
- };
- u8 mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
-
- dev->dcbnl_ops->setdcbx(dev, mode);
- dev->dcbnl_ops->ieee_setets(dev, &ets);
- }
- }
- return 0;
-}
-
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
@@ -4719,7 +4582,7 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
int i, j, k;
u8 num_tcs = netdev_get_num_tc(dev);
- if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ if (!num_tcs)
return false;
for (i = 0, k = 0; i < num_tcs; i++) {
@@ -4751,9 +4614,8 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
int i;
bool ret = false;
- if (adapter->flags & IXGBE_FLAG_RSS_ENABLED &&
- ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -4782,8 +4644,7 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
- if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
- (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
@@ -4963,14 +4824,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
- if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
- IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
e_err(probe,
- "Flow Director is not supported while multiple "
+ "ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
@@ -5024,7 +4883,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- if (q_vector->txr_count && !q_vector->rxr_count)
+ if (q_vector->tx.count && !q_vector->rx.count)
q_vector->eitr = adapter->tx_eitr_param;
else
q_vector->eitr = adapter->rx_eitr_param;
@@ -5201,7 +5060,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
- adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
@@ -5215,21 +5073,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
- /* n-tuple support exists, always init our spinlock */
- spin_lock_init(&adapter->fdir_perfect_lock);
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->fdir_pballoc = 0;
+ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
- adapter->fcoe.tc = IXGBE_FCOE_DEFTC;
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
@@ -5238,6 +5093,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
break;
}
+ /* n-tuple support exists, always init our spinlock */
+ spin_lock_init(&adapter->fdir_perfect_lock);
+
#ifdef CONFIG_IXGBE_DCB
/* Configure DCB traffic classes */
for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
@@ -5250,7 +5108,6 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
}
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.rx_pba_cfg = pba_equal;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
@@ -5285,6 +5142,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+ /* set default work limits */
+ adapter->tx_work_limit = adapter->tx_ring_count;
+
/* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) {
e_dev_err("EEPROM initialization failed\n");
@@ -5331,7 +5191,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->work_limit = tx_ring->count;
return 0;
err:
@@ -5620,6 +5479,8 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_down(adapter);
ixgbe_free_irq(adapter);
+ ixgbe_fdir_filter_exit(adapter);
+
ixgbe_free_all_tx_resources(adapter);
ixgbe_free_all_rx_resources(adapter);
@@ -6038,7 +5899,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
+ if (qv->rx.count || qv->tx.count)
eics |= ((u64)1 << i);
}
}
@@ -6143,9 +6004,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
(flow_tx ? "TX" : "None"))));
netif_carrier_on(netdev);
-#ifdef HAVE_IPLINK_VF_CONFIG
ixgbe_check_vf_rate_limit(adapter);
-#endif /* HAVE_IPLINK_VF_CONFIG */
}
/**
@@ -6404,179 +6263,145 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
}
-static int ixgbe_tso(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, u8 *hdr_len, __be16 protocol)
+void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
+ u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- int err;
- struct ixgbe_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl;
- u32 mss_l4len_idx, l4len;
+ u16 i = tx_ring->next_to_use;
- if (skb_is_gso(skb)) {
- if (skb_header_cloned(skb)) {
- err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
- if (err)
- return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
- i = tx_ring->next_to_use;
+ i++;
+ tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
-
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= ((skb_network_offset(skb)) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- vlan_macip_lens |=
- (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len +=
- (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ /* set bits to identify this as an advanced context descriptor */
+ type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
- if (protocol == htons(ETH_P_IP))
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
- type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
-
- /* MSS L4LEN IDX */
- mss_l4len_idx =
- (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
- /* use index 1 for TSO */
- mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+}
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
+static int ixgbe_tso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
+ u32 tx_flags, __be16 protocol, u8 *hdr_len)
+{
+ int err;
+ u32 vlan_macip_lens, type_tucmd;
+ u32 mss_l4len_idx, l4len;
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
+ if (!skb_is_gso(skb))
+ return 0;
- return true;
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
}
- return false;
-}
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- __be16 protocol)
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+
+ if (protocol == __constant_htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
+
+ l4len = tcp_hdrlen(skb);
+ *hdr_len = skb_transport_offset(skb) + l4len;
+
+ /* mss_l4len_id: use 1 as index for TSO */
+ mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
+ mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+
+ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
+ vlan_macip_lens = skb_network_header_len(skb);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
+
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
+ mss_l4len_idx);
+
+ return 1;
+}
+
+static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ __be16 protocol)
{
- u32 rtn = 0;
+ u32 vlan_macip_lens = 0;
+ u32 mss_l4len_idx = 0;
+ u32 type_tucmd = 0;
- switch (protocol) {
- case cpu_to_be16(ETH_P_IP):
- rtn |= IXGBE_ADVTXD_TUCMD_IPV4;
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_TCP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ if (skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (!(tx_flags & IXGBE_TX_FLAGS_VLAN))
+ return false;
+ } else {
+ u8 l4_hdr = 0;
+ switch (protocol) {
+ case __constant_htons(ETH_P_IP):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+ l4_hdr = ip_hdr(skb)->protocol;
break;
- case IPPROTO_SCTP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ case __constant_htons(ETH_P_IPV6):
+ vlan_macip_lens |= skb_network_header_len(skb);
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but proto=%x!\n",
+ skb->protocol);
+ }
break;
}
- break;
- case cpu_to_be16(ETH_P_IPV6):
- /* XXX what about other V6 headers?? */
- switch (ipv6_hdr(skb)->nexthdr) {
+
+ switch (l4_hdr) {
case IPPROTO_TCP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ mss_l4len_idx = tcp_hdrlen(skb) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
break;
case IPPROTO_SCTP:
- rtn |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+ mss_l4len_idx = sizeof(struct sctphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ case IPPROTO_UDP:
+ mss_l4len_idx = sizeof(struct udphdr) <<
+ IXGBE_ADVTXD_L4LEN_SHIFT;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ dev_warn(tx_ring->dev,
+ "partial checksum but l4 proto=%x!\n",
+ skb->protocol);
+ }
break;
}
- break;
- default:
- if (unlikely(net_ratelimit()))
- e_warn(probe, "partial checksum but proto=%x!\n",
- protocol);
- break;
}
- return rtn;
-}
-
-static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags,
- __be16 protocol)
-{
- struct ixgbe_adv_tx_context_desc *context_desc;
- unsigned int i;
- struct ixgbe_tx_buffer *tx_buffer_info;
- u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
-
- if (skb->ip_summed == CHECKSUM_PARTIAL ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
- i = tx_ring->next_to_use;
- tx_buffer_info = &tx_ring->tx_buffer_info[i];
- context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i);
-
- if (tx_flags & IXGBE_TX_FLAGS_VLAN)
- vlan_macip_lens |=
- (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
- vlan_macip_lens |= (skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- vlan_macip_lens |= (skb_transport_header(skb) -
- skb_network_header(skb));
-
- context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
- context_desc->seqnum_seed = 0;
-
- type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
- IXGBE_ADVTXD_DTYP_CTXT);
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
+ type_tucmd, mss_l4len_idx);
- context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
- /* use index zero for tx checksum offload */
- context_desc->mss_l4len_idx = 0;
-
- tx_buffer_info->time_stamp = jiffies;
- tx_buffer_info->next_to_watch = i;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
- tx_ring->next_to_use = i;
-
- return true;
- }
-
- return false;
+ return (skb->ip_summed == CHECKSUM_PARTIAL);
}
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
@@ -6588,11 +6413,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int offset = 0, size, count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
unsigned int bytecount = skb->len;
u16 gso_segs = 1;
+ u16 i;
i = tx_ring->next_to_use;
@@ -6858,7 +6684,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
input, common, ring->queue_index);
}
-static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had:
@@ -6868,7 +6694,7 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
/* We need to check again in a case another CPU has just
* made room available. */
- if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ if (likely(ixgbe_desc_unused(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
@@ -6877,9 +6703,9 @@ static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
return 0;
}
-static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
+static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
{
- if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ if (likely(ixgbe_desc_unused(tx_ring) >= size))
return 0;
return __ixgbe_maybe_stop_tx(tx_ring, size);
}
@@ -6887,11 +6713,10 @@ static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- int txq = smp_processor_id();
+ int txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) :
+ smp_processor_id();
#ifdef IXGBE_FCOE
- __be16 protocol;
-
- protocol = vlan_get_protocol(skb);
+ __be16 protocol = vlan_get_protocol(skb);
if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) &&
@@ -6915,13 +6740,33 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring)
{
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
int tso;
- int count = 0;
- unsigned int f;
+ u32 tx_flags = 0;
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ unsigned short f;
+#endif
+ u16 first;
+ u16 count = TXD_USE_COUNT(skb_headlen(skb));
__be16 protocol;
+ u8 hdr_len = 0;
+
+ /*
+ * need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
+ * + 1 desc for skb_head_len/IXGBE_MAX_DATA_PER_TXD,
+ * + 2 desc gap to keep tail from touching head,
+ * + 1 desc for context descriptor,
+ * otherwise try next time
+ */
+#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+#else
+ count += skb_shinfo(skb)->nr_frags;
+#endif
+ if (ixgbe_maybe_stop_tx(tx_ring, count + 3)) {
+ tx_ring->tx_stats.tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
protocol = vlan_get_protocol(skb);
@@ -6946,51 +6791,29 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
(protocol == htons(ETH_P_FCOE)))
tx_flags |= IXGBE_TX_FLAGS_FCOE;
-#endif
-
- /* four things can cause us to need a context descriptor */
- if (skb_is_gso(skb) ||
- (skb->ip_summed == CHECKSUM_PARTIAL) ||
- (tx_flags & IXGBE_TX_FLAGS_VLAN) ||
- (tx_flags & IXGBE_TX_FLAGS_FCOE))
- count++;
-
- count += TXD_USE_COUNT(skb_headlen(skb));
- for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
- count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-
- if (ixgbe_maybe_stop_tx(tx_ring, count)) {
- tx_ring->tx_stats.tx_busy++;
- return NETDEV_TX_BUSY;
- }
+#endif
+ /* record the location of the first descriptor for this packet */
first = tx_ring->next_to_use;
+
if (tx_flags & IXGBE_TX_FLAGS_FCOE) {
#ifdef IXGBE_FCOE
/* setup tx offload for FCoE */
- tso = ixgbe_fso(adapter, tx_ring, skb, tx_flags, &hdr_len);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
- if (tso)
+ tso = ixgbe_fso(tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
tx_flags |= IXGBE_TX_FLAGS_FSO;
#endif /* IXGBE_FCOE */
} else {
if (protocol == htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
- tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
- protocol);
- if (tso < 0) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
- if (tso)
+ tso = ixgbe_tso(tx_ring, skb, tx_flags, protocol, &hdr_len);
+ if (tso < 0)
+ goto out_drop;
+ else if (tso)
tx_flags |= IXGBE_TX_FLAGS_TSO;
- else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
- protocol) &&
- (skb->ip_summed == CHECKSUM_PARTIAL))
+ else if (ixgbe_tx_csum(tx_ring, skb, tx_flags, protocol))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
@@ -7003,12 +6826,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
} else {
- dev_kfree_skb_any(skb);
tx_ring->tx_buffer_info[first].time_stamp = 0;
tx_ring->next_to_use = first;
+ goto out_drop;
}
return NETDEV_TX_OK;
+
+out_drop:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -7198,6 +7025,177 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
return stats;
}
+/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
+ * #adapter: pointer to ixgbe_adapter
+ * @tc: number of traffic classes currently enabled
+ *
+ * Configure a valid 802.1Qp to Rx packet buffer mapping ie confirm
+ * 802.1Q priority maps to a packet buffer that exists.
+ */
+static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg, rsave;
+ int i;
+
+ /* 82598 have a static priority to TC mapping that can not
+ * be changed so no validation is needed.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return;
+
+ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC);
+ rsave = reg;
+
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT);
+
+ /* If up2tc is out of bounds default to zero */
+ if (up2tc > tc)
+ reg &= ~(0x7 << IXGBE_RTRUP2TC_UP_SHIFT);
+ }
+
+ if (reg != rsave)
+ IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
+
+ return;
+}
+
+
+/* ixgbe_setup_tc - routine to configure net_device for multiple traffic
+ * classes.
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ */
+int ixgbe_setup_tc(struct net_device *dev, u8 tc)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* If DCB is anabled do not remove traffic classes, multiple
+ * traffic classes are required to implement DCB
+ */
+ if (!tc && (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ return 0;
+
+ /* Hardware supports up to 8 traffic classes */
+ if (tc > MAX_TRAFFIC_CLASS ||
+ (hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS))
+ return -EINVAL;
+
+ /* Hardware has to reinitialize queues and interrupts to
+ * match packet buffer alignment. Unfortunantly, the
+ * hardware is not flexible enough to do this dynamically.
+ */
+ if (netif_running(dev))
+ ixgbe_close(dev);
+ ixgbe_clear_interrupt_scheme(adapter);
+
+ if (tc)
+ netdev_set_num_tc(dev, tc);
+ else
+ netdev_reset_tc(dev);
+
+ ixgbe_init_interrupt_scheme(adapter);
+ ixgbe_validate_rtr(adapter, tc);
+ if (netif_running(dev))
+ ixgbe_open(dev);
+
+ return 0;
+}
+
+void ixgbe_do_reset(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ ixgbe_reinit_locked(adapter);
+ else
+ ixgbe_reset(adapter);
+}
+
+static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+
+#ifdef CONFIG_DCB
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
+ data &= ~NETIF_F_HW_VLAN_RX;
+#endif
+
+ /* return error if RXHASH is being enabled when RSS is not supported */
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ data &= ~NETIF_F_RXHASH;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ data &= ~NETIF_F_LRO;
+
+ /* Turn off LRO if not RSC capable or invalid ITR settings */
+ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
+ data &= ~NETIF_F_LRO;
+ } else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (adapter->rx_itr_setting != 1 &&
+ adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
+ data &= ~NETIF_F_LRO;
+ e_info(probe, "rx-usecs set too low, not enabling RSC\n");
+ }
+
+ return data;
+}
+
+static int ixgbe_set_features(struct net_device *netdev, u32 data)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
+
+ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
+ if (!(data & NETIF_F_RXCSUM))
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ /* Make sure RSC matches LRO, reset if change */
+ if (!!(data & NETIF_F_LRO) !=
+ !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+ adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB:
+ need_reset = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
+ /* turn off ATR, enable perfect filters and reset */
+ if (data & NETIF_F_NTUPLE) {
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ }
+ } else if (!(data & NETIF_F_NTUPLE)) {
+ /* turn off Flow Director, set ATR and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
+ !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ }
+
+ if (need_reset)
+ ixgbe_do_reset(netdev);
+
+ return 0;
+
+}
static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_open = ixgbe_open,
@@ -7218,9 +7216,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
-#ifdef CONFIG_IXGBE_DCB
.ndo_setup_tc = ixgbe_setup_tc,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
@@ -7232,6 +7228,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
#endif /* IXGBE_FCOE */
+ .ndo_set_features = ixgbe_set_features,
+ .ndo_fix_features = ixgbe_fix_features,
};
static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -7379,14 +7377,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
pci_save_state(pdev);
+#ifdef CONFIG_IXGBE_DCB
+ indices *= MAX_TRAFFIC_CLASS;
+#endif
+
if (ii->mac == ixgbe_mac_82598EB)
indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
else
indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
-#if defined(CONFIG_DCB)
- indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
-#elif defined(IXGBE_FCOE)
+#ifdef IXGBE_FCOE
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
@@ -7497,20 +7497,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ NETIF_F_HW_VLAN_FILTER |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- netdev->features |= NETIF_F_GRO;
- netdev->features |= NETIF_F_RXHASH;
+ netdev->hw_features = netdev->features;
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
netdev->features |= NETIF_F_SCTP_CSUM;
+ netdev->hw_features |= NETIF_F_SCTP_CSUM |
+ NETIF_F_NTUPLE;
break;
default:
break;
@@ -7549,6 +7553,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_HIGHDMA;
}
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
+ netdev->hw_features |= NETIF_F_LRO;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
@@ -7585,25 +7591,24 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (err)
goto err_sw_init;
- if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
+ netdev->hw_features &= ~NETIF_F_RXHASH;
netdev->features &= ~NETIF_F_RXHASH;
+ }
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
/* Only this subdevice supports WOL */
if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
case IXGBE_DEV_ID_82599_KX4:
- adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
- IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+ adapter->wol = IXGBE_WUFC_MAG;
break;
default:
adapter->wol = 0;
@@ -7678,6 +7683,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_vf_configuration(pdev, (i | 0x10000000));
}
+ /* Inform firmware of driver version */
+ if (hw->mac.ops.set_fw_drv_ver)
+ hw->mac.ops.set_fw_drv_ver(hw, MAJ, MIN, BUILD,
+ FW_CEM_UNUSED_VER);
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index ac99b0458fe2..d99d01e21326 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -605,6 +605,22 @@ static void ixgbe_set_vf_rate_limit(struct ixgbe_hw *hw, int vf, int tx_rate,
}
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, 2*vf); /* vf Y uses queue 2*Y */
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. Typically MMW_SIZE=0x014 if 9728-byte jumbo is supported
+ * and 0x004 otherwise.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x4);
+ break;
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, 0x14);
+ break;
+ default:
+ break;
+ }
+
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
}
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index fa43f2507f43..e0d970ebab7a 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -534,7 +534,7 @@
#define IXGBE_RTTBCNRC_RF_INT_SHIFT 14
#define IXGBE_RTTBCNRC_RF_INT_MASK \
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-
+#define IXGBE_RTTBCNRM 0x04980
/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
@@ -707,6 +707,13 @@
#define IXGBE_HFDR 0x15FE8
#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */
+#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C 0x02
+#define IXGBE_HICR_SV 0x04 /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET 0x80
+
/* PCI-E registers */
#define IXGBE_GCR 0x11000
#define IXGBE_GTV 0x11004
@@ -1118,6 +1125,27 @@
#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */
+/* Packet Buffer Initialization */
+#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
+
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
+#define IXGBE_MAX_PB 8
+
+/* Packet buffer allocation strategies */
+enum {
+ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL
+ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED
+};
+
/* Transmit Flow Control status */
#define IXGBE_TFCS_TXOFF 0x00000001
#define IXGBE_TFCS_TXOFF0 0x00000100
@@ -1860,6 +1888,7 @@
#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */
#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */
#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */
/* Receive Descriptor bit definitions */
#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
@@ -2027,9 +2056,10 @@
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
enum ixgbe_fdir_pballoc_type {
- IXGBE_FDIR_PBALLOC_64K = 0,
- IXGBE_FDIR_PBALLOC_128K,
- IXGBE_FDIR_PBALLOC_256K,
+ IXGBE_FDIR_PBALLOC_NONE = 0,
+ IXGBE_FDIR_PBALLOC_64K = 1,
+ IXGBE_FDIR_PBALLOC_128K = 2,
+ IXGBE_FDIR_PBALLOC_256K = 3,
};
#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16
@@ -2083,7 +2113,7 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001
#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002
#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003
-#define IXGBE_FDIRCMD_CMD_QUERY_REM_HASH 0x00000007
+#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004
#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008
#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010
#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020
@@ -2102,6 +2132,44 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIR_INIT_DONE_POLL 10
#define IXGBE_FDIRCMD_CMD_POLL 10
+#define IXGBE_FDIR_DROP_QUEUE 127
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN 0x4
+#define FW_CEM_CMD_DRIVER_INFO 0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5
+#define FW_CEM_CMD_RESERVED 0x0
+#define FW_CEM_UNUSED_VER 0x0
+#define FW_CEM_MAX_RETRIES 3
+#define FW_CEM_RESP_STATUS_SUCCESS 0x1
+
+/* Host Interface Command Structures */
+struct ixgbe_hic_hdr {
+ u8 cmd;
+ u8 buf_len;
+ union {
+ u8 cmd_resv;
+ u8 ret_status;
+ } cmd_or_resp;
+ u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_num;
+ u8 ver_sub;
+ u8 ver_build;
+ u8 ver_min;
+ u8 ver_maj;
+ u8 pad; /* end spacing to ensure length is mult. of dword */
+ u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2286,7 +2354,7 @@ union ixgbe_atr_input {
* src_port - 2 bytes
* dst_port - 2 bytes
* flex_bytes - 2 bytes
- * rsvd0 - 2 bytes - space reserved must be 0.
+ * bkt_hash - 2 bytes
*/
struct {
u8 vm_pool;
@@ -2297,7 +2365,7 @@ union ixgbe_atr_input {
__be16 src_port;
__be16 dst_port;
__be16 flex_bytes;
- __be16 rsvd0;
+ __be16 bkt_hash;
} formatted;
__be32 dword_stream[11];
};
@@ -2318,16 +2386,6 @@ union ixgbe_atr_hash_dword {
__be32 dword;
};
-struct ixgbe_atr_input_masks {
- __be16 rsvd0;
- __be16 vlan_id_mask;
- __be32 dst_ip_mask[4];
- __be32 src_ip_mask[4];
- __be16 src_port_mask;
- __be16 dst_port_mask;
- __be16 flex_mask;
-};
-
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -2615,6 +2673,9 @@ struct ixgbe_mac_operations {
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
bool *);
+ /* Packet Buffer Manipulation */
+ void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
+
/* LED */
s32 (*led_on)(struct ixgbe_hw *, u32);
s32 (*led_off)(struct ixgbe_hw *, u32);
@@ -2638,6 +2699,9 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *, s32);
+
+ /* Manageability interface */
+ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
};
struct ixgbe_phy_operations {
@@ -2807,6 +2871,7 @@ struct ixgbe_info {
#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
#define IXGBE_ERR_PBA_SECTION -31
#define IXGBE_ERR_INVALID_ARGUMENT -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index 4ed687be2fe3..bec30ed91adc 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -876,6 +876,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.read_analog_reg8 = NULL,
.write_analog_reg8 = NULL,
.setup_link = &ixgbe_setup_mac_link_X540,
+ .set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.led_on = &ixgbe_led_on_generic,
@@ -893,6 +894,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,