aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/i40e/i40e_xsk.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c153
1 files changed, 80 insertions, 73 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 37a21fb99922..fc32c5019b0f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -215,9 +215,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
bi = i40e_rx_bi(rx_ring, 0);
ntu = 0;
}
-
- count--;
- } while (count);
+ } while (--count);
no_buffers:
if (rx_ring->next_to_use != ntu) {
@@ -250,27 +248,68 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
xdp->data_end - xdp->data_hard_start,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
- return NULL;
+ goto out;
skb_reserve(skb, xdp->data - xdp->data_hard_start);
memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
+out:
xsk_buff_free(xdp);
return skb;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+static void i40e_handle_xdp_result_zc(struct i40e_ring *rx_ring,
+ struct xdp_buff *xdp_buff,
+ union i40e_rx_desc *rx_desc,
+ unsigned int *rx_packets,
+ unsigned int *rx_bytes,
+ unsigned int size,
+ unsigned int xdp_res)
{
- u32 ntc = rx_ring->next_to_clean + 1;
+ struct sk_buff *skb;
+
+ *rx_packets = 1;
+ *rx_bytes = size;
+
+ if (likely(xdp_res == I40E_XDP_REDIR) || xdp_res == I40E_XDP_TX)
+ return;
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
+ if (xdp_res == I40E_XDP_CONSUMED) {
+ xsk_buff_free(xdp_buff);
+ return;
+ }
+
+ if (xdp_res == I40E_XDP_PASS) {
+ /* NB! We are not checking for errors using
+ * i40e_test_staterr with
+ * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
+ * SBP is *not* set in PRT_SBPVSI (default not set).
+ */
+ skb = i40e_construct_skb_zc(rx_ring, xdp_buff);
+ if (!skb) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ if (eth_skb_pad(skb)) {
+ *rx_packets = 0;
+ *rx_bytes = 0;
+ return;
+ }
+
+ *rx_bytes = skb->len;
+ i40e_process_skb_fields(rx_ring, rx_desc, skb);
+ napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ return;
+ }
+
+ /* Should never get here, as all valid cases have been handled already.
+ */
+ WARN_ON_ONCE(1);
}
/**
@@ -284,17 +323,20 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
+ u16 next_to_clean = rx_ring->next_to_clean;
+ u16 count_mask = rx_ring->count - 1;
unsigned int xdp_res, xdp_xmit = 0;
bool failure = false;
- struct sk_buff *skb;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi;
+ unsigned int rx_packets;
+ unsigned int rx_bytes;
+ struct xdp_buff *bi;
unsigned int size;
u64 qword;
- rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = I40E_RX_DESC(rx_ring, next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
/* This memory barrier is needed to keep us from reading
@@ -307,11 +349,9 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_clean_programming_status(rx_ring,
rx_desc->raw.qword[0],
qword);
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- xsk_buff_free(*bi);
- *bi = NULL;
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ xsk_buff_free(bi);
+ next_to_clean = (next_to_clean + 1) & count_mask;
continue;
}
@@ -320,53 +360,22 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
if (!size)
break;
- bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- (*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
-
- xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
- if (xdp_res) {
- if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
- xdp_xmit |= xdp_res;
- else
- xsk_buff_free(*bi);
-
- *bi = NULL;
- total_rx_bytes += size;
- total_rx_packets++;
-
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
- continue;
- }
-
- /* XDP_PASS path */
-
- /* NB! We are not checking for errors using
- * i40e_test_staterr with
- * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
- * SBP is *not* set in PRT_SBPVSI (default not set).
- */
- skb = i40e_construct_skb_zc(rx_ring, *bi);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- break;
- }
-
- *bi = NULL;
- cleaned_count++;
- i40e_inc_ntc(rx_ring);
-
- if (eth_skb_pad(skb))
- continue;
-
- total_rx_bytes += skb->len;
- total_rx_packets++;
-
- i40e_process_skb_fields(rx_ring, rx_desc, skb);
- napi_gro_receive(&rx_ring->q_vector->napi, skb);
+ bi = *i40e_rx_bi(rx_ring, next_to_clean);
+ bi->data_end = bi->data + size;
+ xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+
+ xdp_res = i40e_run_xdp_zc(rx_ring, bi);
+ i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets,
+ &rx_bytes, size, xdp_res);
+ total_rx_packets += rx_packets;
+ total_rx_bytes += rx_bytes;
+ xdp_xmit |= xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR);
+ next_to_clean = (next_to_clean + 1) & count_mask;
}
+ rx_ring->next_to_clean = next_to_clean;
+ cleaned_count = (next_to_clean - rx_ring->next_to_use - 1) & count_mask;
+
if (cleaned_count >= I40E_RX_BUFFER_WRITE)
failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
@@ -374,7 +383,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ if (failure || next_to_clean == rx_ring->next_to_use)
xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
@@ -604,16 +613,14 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
- u16 i;
-
- for (i = 0; i < rx_ring->count; i++) {
- struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
+ u16 count_mask = rx_ring->count - 1;
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
- if (!rx_bi)
- continue;
+ for ( ; ntc != ntu; ntc = (ntc + 1) & count_mask) {
+ struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, ntc);
xsk_buff_free(rx_bi);
- rx_bi = NULL;
}
}