aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e/i40e_txrx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/i40e/i40e_txrx.c')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
1 files changed, 79 insertions, 52 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc66caf..e49f31dbd5d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
*
******************************************************************************/
+#include <linux/prefetch.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* find existing FDIR VSI */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
- int i;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
ip->saddr = fd_data->src_ip[0];
udp->source = fd_data->src_port;
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
}
return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
}
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
/* Turn off ATR first */
- if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
- } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
return;
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |=
+ ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;