aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/net/ethernet/intel/ice/ice_xsk.c
diff options
context:
space:
mode:
authorMaciej Fijalkowski <maciej.fijalkowski@intel.com>2022-04-13 17:30:06 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2022-04-15 21:10:37 +0200
commit50ae066480738e0d3ce72a90c8c0f4d9de6092e0 (patch)
tree645af467aa5753b1356423d06e3f20f707a86481 /drivers/net/ethernet/intel/ice/ice_xsk.c
parentixgbe, xsk: Decorate IXGBE_XDP_REDIR with likely() (diff)
downloadwireguard-linux-50ae066480738e0d3ce72a90c8c0f4d9de6092e0.tar.xz
wireguard-linux-50ae066480738e0d3ce72a90c8c0f4d9de6092e0.zip
ice, xsk: Terminate Rx side of NAPI when XSK Rx queue gets full
When XSK pool uses need_wakeup feature, correlate -ENOBUFS that was returned from xdp_do_redirect() with a XSK Rx queue being full. In such case, terminate the Rx processing that is being done on the current HW Rx ring and let the user space consume descriptors from XSK Rx queue so that there is room that driver can use later on. Introduce new internal return code ICE_XDP_EXIT that will indicate case described above. Note that it does not affect Tx processing that is bound to the same NAPI context, nor the other Rx rings. Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220413153015.453864-6-maciej.fijalkowski@intel.com
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_xsk.c')
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index e9ff05de0084..99bfa21c3938 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -540,9 +540,13 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (likely(act == XDP_REDIRECT)) {
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- if (err)
- goto out_failure;
- return ICE_XDP_REDIR;
+ if (!err)
+ return ICE_XDP_REDIR;
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ result = ICE_XDP_EXIT;
+ else
+ result = ICE_XDP_CONSUMED;
+ goto out_failure;
}
switch (act) {
@@ -553,15 +557,16 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
+ case XDP_DROP:
+ result = ICE_XDP_CONSUMED;
+ break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act);
fallthrough;
case XDP_ABORTED:
+ result = ICE_XDP_CONSUMED;
out_failure:
trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
- fallthrough;
- case XDP_DROP:
- result = ICE_XDP_CONSUMED;
break;
}
@@ -629,12 +634,16 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
- if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)))
+ if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
- else if (xdp_res == ICE_XDP_CONSUMED)
+ } else if (xdp_res == ICE_XDP_EXIT) {
+ failure = true;
+ break;
+ } else if (xdp_res == ICE_XDP_CONSUMED) {
xsk_buff_free(xdp);
- else
+ } else if (xdp_res == ICE_XDP_PASS) {
goto construct_skb;
+ }
total_rx_bytes += size;
total_rx_packets++;
@@ -669,7 +678,7 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- failure = !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, ICE_DESC_UNUSED(rx_ring));
ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);