aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-09-06 16:49:17 +0200
committerDavid S. Miller <davem@davemloft.net>2019-09-06 16:49:17 +0200
commit1e46c09ec10049a9e366153b32e41cc557383fdb (patch)
treeb2f86d40fd1ddf32522b5dd1792158daeab72e21 /drivers
parentlan743x: remove redundant assignment to variable rx_process_result (diff)
parentMerge branch 'bpf-af-xdp-barrier-fixes' (diff)
downloadlinux-dev-1e46c09ec10049a9e366153b32e41cc557383fdb.tar.xz
linux-dev-1e46c09ec10049a9e366153b32e41cc557383fdb.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add the ability to use unaligned chunks in the AF_XDP umem. By relaxing where the chunks can be placed, it allows to use an arbitrary buffer size and place whenever there is a free address in the umem. Helps more seamless DPDK AF_XDP driver integration. Support for i40e, ixgbe and mlx5e, from Kevin and Maxim. 2) Addition of a wakeup flag for AF_XDP tx and fill rings so the application can wake up the kernel for rx/tx processing which avoids busy-spinning of the latter, useful when app and driver is located on the same core. Support for i40e, ixgbe and mlx5e, from Magnus and Maxim. 3) bpftool fixes for printf()-like functions so compiler can actually enforce checks, bpftool build system improvements for custom output directories, and addition of 'bpftool map freeze' command, from Quentin. 4) Support attaching/detaching XDP programs from 'bpftool net' command, from Daniel. 5) Automatic xskmap cleanup when AF_XDP socket is released, and several barrier/{read,write}_once fixes in AF_XDP code, from Björn. 6) Relicense of bpf_helpers.h/bpf_endian.h for future libbpf inclusion as well as libbpf versioning improvements, from Andrii. 7) Several new BPF kselftests for verifier precision tracking, from Alexei. 8) Several BPF kselftest fixes wrt endianess to run on s390x, from Ilya. 9) And more BPF kselftest improvements all over the place, from Stanislav. 10) Add simple BPF map op cache for nfp driver to batch dumps, from Jakub. 11) AF_XDP socket umem mapping improvements for 32bit archs, from Ivan. 12) Add BPF-to-BPF call and BTF line info support for s390x JIT, from Yauheni. 13) Small optimization in arm64 JIT to spare 1 insns for BPF_MOD, from Jerin. 14) Fix an error check in bpf_tcp_gen_syncookie() helper, from Petar. 15) Various minor fixes and cleanups, from Nathan, Masahiro, Masanari, Peter, Wei, Yue. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c187
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/fw.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c33
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c9
24 files changed, 414 insertions, 79 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index fdf43d87e983..3c8a2f55c43a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -12530,7 +12530,8 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
if (vsi->xdp_rings[i]->xsk_umem)
- (void)i40e_xsk_async_xmit(vsi->netdev, i);
+ (void)i40e_xsk_wakeup(vsi->netdev, i,
+ XDP_WAKEUP_RX);
return 0;
}
@@ -12852,7 +12853,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
.ndo_bpf = i40e_xdp,
.ndo_xdp_xmit = i40e_xdp_xmit,
- .ndo_xsk_async_xmit = i40e_xsk_async_xmit,
+ .ndo_xsk_wakeup = i40e_xsk_wakeup,
.ndo_dfwd_add_station = i40e_fwd_add,
.ndo_dfwd_del_station = i40e_fwd_del,
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 32bad014d76c..0373bc6c7e61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -116,7 +116,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
return err;
/* Kick start the NAPI context so that receiving will start */
- err = i40e_xsk_async_xmit(vsi->netdev, qid);
+ err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
if (err)
return err;
}
@@ -190,7 +190,9 @@ int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/
static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = I40E_XDP_PASS;
+ u64 offset = umem->headroom;
struct i40e_ring *xdp_ring;
struct bpf_prog *xdp_prog;
u32 act;
@@ -201,7 +203,10 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
*/
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- xdp->handle += xdp->data - xdp->data_hard_start;
+ offset += xdp->data - xdp->data_hard_start;
+
+ xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
+
switch (act) {
case XDP_PASS:
break;
@@ -262,7 +267,7 @@ static bool i40e_alloc_buffer_zc(struct i40e_ring *rx_ring,
bi->addr = xdp_umem_get_data(umem, handle);
bi->addr += hr;
- bi->handle = handle + umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr(umem);
return true;
@@ -299,7 +304,7 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring *rx_ring,
bi->addr = xdp_umem_get_data(umem, handle);
bi->addr += hr;
- bi->handle = handle + umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr_rq(umem);
return true;
@@ -420,8 +425,6 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *old_bi)
{
struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
- unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
- u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
u16 nta = rx_ring->next_to_alloc;
/* update, and store next to alloc */
@@ -429,14 +432,9 @@ static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- new_bi->dma = old_bi->dma & mask;
- new_bi->dma += hr;
-
- new_bi->addr = (void *)((unsigned long)old_bi->addr & mask);
- new_bi->addr += hr;
-
- new_bi->handle = old_bi->handle & mask;
- new_bi->handle += rx_ring->xsk_umem->headroom;
+ new_bi->dma = old_bi->dma;
+ new_bi->addr = old_bi->addr;
+ new_bi->handle = old_bi->handle;
old_bi->addr = NULL;
}
@@ -471,7 +469,8 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
bi->addr += hr;
- bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
+ rx_ring->xsk_umem->headroom);
}
/**
@@ -626,6 +625,15 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
+
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+ return (int)total_rx_packets;
+ }
return failure ? budget : (int)total_rx_packets;
}
@@ -681,6 +689,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
i40e_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@@ -759,19 +769,27 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
out_xmit:
+ if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
+ if (tx_ring->next_to_clean == tx_ring->next_to_use)
+ xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ else
+ xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
+ }
+
xmit_done = i40e_xmit_zc(tx_ring, budget);
return work_done && xmit_done;
}
/**
- * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit
+ * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
* @dev: the netdevice
* @queue_id: queue id to wake up
+ * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
*
* Returns <0 for errors, 0 otherwise.
**/
-int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id)
+int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index 8cc0a2e7d9a2..9ed59c14eb55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -18,6 +18,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
struct i40e_ring *tx_ring, int napi_budget);
-int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
#endif /* _I40E_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 17b7ae9f46ec..9bcae44e9883 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10260,7 +10260,8 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
if (adapter->xdp_ring[i]->xsk_umem)
- (void)ixgbe_xsk_async_xmit(adapter->netdev, i);
+ (void)ixgbe_xsk_wakeup(adapter->netdev, i,
+ XDP_WAKEUP_RX);
return 0;
}
@@ -10379,7 +10380,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_features_check = ixgbe_features_check,
.ndo_bpf = ixgbe_xdp,
.ndo_xdp_xmit = ixgbe_xdp_xmit,
- .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit,
+ .ndo_xsk_wakeup = ixgbe_xsk_wakeup,
};
static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index d93a690aff74..6d01700b46bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -42,7 +42,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring);
bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget);
-int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id);
+int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring);
#endif /* #define _IXGBE_TXRX_COMMON_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 6b609553329f..ad802a8909e0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,7 +100,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
ixgbe_txrx_ring_enable(adapter, qid);
/* Kick start the NAPI context so that receiving will start */
- err = ixgbe_xsk_async_xmit(adapter->netdev, qid);
+ err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
if (err)
return err;
}
@@ -143,7 +143,9 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
struct xdp_buff *xdp)
{
+ struct xdp_umem *umem = rx_ring->xsk_umem;
int err, result = IXGBE_XDP_PASS;
+ u64 offset = umem->headroom;
struct bpf_prog *xdp_prog;
struct xdp_frame *xdpf;
u32 act;
@@ -151,7 +153,10 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
act = bpf_prog_run_xdp(xdp_prog, xdp);
- xdp->handle += xdp->data - xdp->data_hard_start;
+ offset += xdp->data - xdp->data_hard_start;
+
+ xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset);
+
switch (act) {
case XDP_PASS:
break;
@@ -201,8 +206,6 @@ ixgbe_rx_buffer *ixgbe_get_rx_buffer_zc(struct ixgbe_ring *rx_ring,
static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
struct ixgbe_rx_buffer *obi)
{
- unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
- u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
u16 nta = rx_ring->next_to_alloc;
struct ixgbe_rx_buffer *nbi;
@@ -212,14 +215,9 @@ static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- nbi->dma = obi->dma & mask;
- nbi->dma += hr;
-
- nbi->addr = (void *)((unsigned long)obi->addr & mask);
- nbi->addr += hr;
-
- nbi->handle = obi->handle & mask;
- nbi->handle += rx_ring->xsk_umem->headroom;
+ nbi->dma = obi->dma;
+ nbi->addr = obi->addr;
+ nbi->handle = obi->handle;
obi->addr = NULL;
obi->skb = NULL;
@@ -250,7 +248,8 @@ void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle);
bi->addr += hr;
- bi->handle = (u64)handle + rx_ring->xsk_umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle,
+ rx_ring->xsk_umem->headroom);
}
static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
@@ -276,7 +275,7 @@ static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring,
bi->addr = xdp_umem_get_data(umem, handle);
bi->addr += hr;
- bi->handle = handle + umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr(umem);
return true;
@@ -303,7 +302,7 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring *rx_ring,
bi->addr = xdp_umem_get_data(umem, handle);
bi->addr += hr;
- bi->handle = handle + umem->headroom;
+ bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom);
xsk_umem_discard_addr_rq(umem);
return true;
@@ -547,6 +546,14 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
+ if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ else
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+
+ return (int)total_rx_packets;
+ }
return failure ? budget : (int)total_rx_packets;
}
@@ -615,6 +622,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
+ xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return !!budget && work_done;
@@ -688,11 +697,19 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
if (xsk_frames)
xsk_umem_complete_tx(umem, xsk_frames);
+ if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) {
+ if (tx_ring->next_to_clean == tx_ring->next_to_use)
+ xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ else
+ xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
+ }
+
xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
+
return budget > 0 && xmit_done;
}
-int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid)
+int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_ring *ring;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 79301d116667..eb2e1f2138e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -25,18 +25,33 @@ u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
return headroom;
}
-u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk)
+u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
{
u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk);
- u32 frag_sz = linear_rq_headroom + hw_mtu;
+
+ return linear_rq_headroom + hw_mtu;
+}
+
+u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk);
/* AF_XDP doesn't build SKBs in place. */
if (!xsk)
frag_sz = MLX5_SKB_FRAG_SZ(frag_sz);
- /* XDP in mlx5e doesn't support multiple packets per page. */
+ /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a
+ * special case. It can run with frames smaller than a page, as it
+ * doesn't allocate pages dynamically. However, here we pretend that
+ * fragments are page-sized: it allows to treat XSK frames like pages
+ * by redirecting alloc and free operations to XSK rings and by using
+ * the fact there are no multiple packets per "page" (which is a frame).
+ * The latter is important, because frames may come in a random order,
+ * and we will have trouble assemblying a real page of multiple frames.
+ */
if (mlx5e_rx_is_xdp(params, xsk))
frag_sz = max_t(u32, frag_sz, PAGE_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 3a615d663d84..989d8f429438 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -76,6 +76,8 @@ static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk);
u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 1ed5c33e022f..f049e0ac308a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -122,6 +122,7 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
void *va, u16 *rx_headroom, u32 *len, bool xsk)
{
struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
+ struct xdp_umem *umem = rq->umem;
struct xdp_buff xdp;
u32 act;
int err;
@@ -138,8 +139,11 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
xdp.rxq = &rq->xdp_rxq;
act = bpf_prog_run_xdp(prog, &xdp);
- if (xsk)
- xdp.handle += xdp.data - xdp.data_hard_start;
+ if (xsk) {
+ u64 off = xdp.data - xdp.data_hard_start;
+
+ xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off);
+ }
switch (act) {
case XDP_PASS:
*rx_headroom = xdp.data - xdp.data_hard_start;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 6a55573ec8f2..475b6bd5d29b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -24,7 +24,8 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
if (!xsk_umem_peek_addr_rq(umem, &handle))
return -ENOMEM;
- dma_info->xsk.handle = handle + rq->buff.umem_headroom;
+ dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle,
+ rq->buff.umem_headroom);
dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle);
/* No need to add headroom to the DMA address. In striding RQ case, we
@@ -104,7 +105,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* head_offset is not used in this function, because di->xsk.data and
* di->addr point directly to the necessary place. Furthermore, in the
- * current implementation, one page = one packet = one frame, so
+ * current implementation, UMR pages are mapped to XSK frames, so
* head_offset should always be 0.
*/
WARN_ON_ONCE(head_offset);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index 307b923a1361..cab0e93497ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -5,6 +5,7 @@
#define __MLX5_EN_XSK_RX_H__
#include "en.h"
+#include <net/xdp_sock.h>
/* RX data path */
@@ -24,4 +25,17 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
+static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
+{
+ if (!xsk_umem_uses_need_wakeup(rq->umem))
+ return alloc_err;
+
+ if (unlikely(alloc_err))
+ xsk_set_rx_need_wakeup(rq->umem);
+ else
+ xsk_clear_rx_need_wakeup(rq->umem);
+
+ return false;
+}
+
#endif /* __MLX5_EN_XSK_RX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index d360750b25b7..631af8dee517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -4,18 +4,23 @@
#include "setup.h"
#include "en/params.h"
+/* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may
+ * change unexpectedly, and mlx5e has a minimum valid stride size for striding
+ * RQ, keep this check in the driver.
+ */
+#define MLX5E_MIN_XSK_CHUNK_SIZE 2048
+
bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current
- * mlx5e XDP implementation doesn't support multiple packets per page.
- */
- if (xsk->chunk_size != PAGE_SIZE)
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
+ if (xsk->chunk_size > PAGE_SIZE ||
+ xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE)
return false;
/* Current MTU and XSK headroom don't allow packets to fit the frames. */
- if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size)
+ if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size)
return false;
/* frag_sz is different for regular and XSK RQs, so ensure that linear
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fd2c75b4b519..87827477d38c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -7,7 +7,7 @@
#include "en/params.h"
#include <net/xdp_sock.h>
-int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid)
+int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 7add18bf78d8..79b487d89757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -5,11 +5,23 @@
#define __MLX5_EN_XSK_TX_H__
#include "en.h"
+#include <net/xdp_sock.h>
/* TX data path */
-int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid);
+int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
+static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
+{
+ if (!xsk_umem_uses_need_wakeup(sq->umem))
+ return;
+
+ if (sq->pc != sq->cc)
+ xsk_clear_tx_need_wakeup(sq->umem);
+ else
+ xsk_set_tx_need_wakeup(sq->umem);
+}
+
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dadadf221087..1cacda1bc1b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4580,7 +4580,7 @@ const struct net_device_ops mlx5e_netdev_ops = {
.ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit,
- .ndo_xsk_async_xmit = mlx5e_xsk_async_xmit,
+ .ndo_xsk_wakeup = mlx5e_xsk_wakeup,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 2fd2760d0bb7..d6a547238de0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -695,8 +695,11 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
rq->mpwqe.actual_wq_head = head;
- /* If XSK Fill Ring doesn't have enough frames, busy poll by
- * rescheduling the NAPI poll.
+ /* If XSK Fill Ring doesn't have enough frames, report the error, so
+ * that one of the actions can be performed:
+ * 1. If need_wakeup is used, signal that the application has to kick
+ * the driver when it refills the Fill Ring.
+ * 2. Otherwise, busy poll by rescheduling the NAPI poll.
*/
if (unlikely(alloc_err == -ENOMEM && rq->umem))
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 49b06b256c92..257a7c9f7a14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -33,6 +33,7 @@
#include <linux/irq.h>
#include "en.h"
#include "en/xdp.h"
+#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
@@ -81,6 +82,29 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
}
+static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq)
+{
+ bool busy_xsk = false, xsk_rx_alloc_err;
+
+ /* Handle the race between the application querying need_wakeup and the
+ * driver setting it:
+ * 1. Update need_wakeup both before and after the TX. If it goes to
+ * "yes", it can only happen with the first update.
+ * 2. If the application queried need_wakeup before we set it, the
+ * packets will be transmitted anyway, even w/o a wakeup.
+ * 3. Give a chance to clear need_wakeup after new packets were queued
+ * for TX.
+ */
+ mlx5e_xsk_update_tx_wakeup(xsksq);
+ busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
+ mlx5e_xsk_update_tx_wakeup(xsksq);
+
+ xsk_rx_alloc_err = xskrq->post_wqes(xskrq);
+ busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err);
+
+ return busy_xsk;
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -122,8 +146,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
if (xsk_open) {
mlx5e_poll_ico_cq(&c->xskicosq.cq);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
- busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET);
- busy_xsk |= xskrq->post_wqes(xskrq);
+ busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
}
busy |= busy_xsk;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index bc9850e4ec5e..0e2db6ea79e9 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -6,6 +6,7 @@
#include <linux/bug.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
+#include <linux/timekeeping.h>
#include "../ccm.h"
#include "../nfp_app.h"
@@ -175,29 +176,151 @@ nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
}
+static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_UPDATE ||
+ op == NFP_CCM_TYPE_BPF_MAP_DELETE;
+}
+
+static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP ||
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
+}
+
+static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op)
+{
+ return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST ||
+ op == NFP_CCM_TYPE_BPF_MAP_GETNEXT;
+}
+
+static unsigned int
+nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
+ const u8 *key, u8 *out_key, u8 *out_value,
+ u32 *cache_gen)
+{
+ struct bpf_map *map = &nfp_map->offmap->map;
+ struct nfp_app_bpf *bpf = nfp_map->bpf;
+ unsigned int i, count, n_entries;
+ struct cmsg_reply_map_op *reply;
+
+ n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1;
+
+ spin_lock(&nfp_map->cache_lock);
+ *cache_gen = nfp_map->cache_gen;
+ if (nfp_map->cache_blockers)
+ n_entries = 1;
+
+ if (nfp_bpf_ctrl_op_cache_invalidate(op))
+ goto exit_block;
+ if (!nfp_bpf_ctrl_op_cache_capable(op))
+ goto exit_unlock;
+
+ if (!nfp_map->cache)
+ goto exit_unlock;
+ if (nfp_map->cache_to < ktime_get_ns())
+ goto exit_invalidate;
+
+ reply = (void *)nfp_map->cache->data;
+ count = be32_to_cpu(reply->count);
+
+ for (i = 0; i < count; i++) {
+ void *cached_key;
+
+ cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i);
+ if (memcmp(cached_key, key, map->key_size))
+ continue;
+
+ if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP)
+ memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i),
+ map->value_size);
+ if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) {
+ if (i + 1 == count)
+ break;
+
+ memcpy(out_key,
+ nfp_bpf_ctrl_reply_key(bpf, reply, i + 1),
+ map->key_size);
+ }
+
+ n_entries = 0;
+ goto exit_unlock;
+ }
+ goto exit_unlock;
+
+exit_block:
+ nfp_map->cache_blockers++;
+exit_invalidate:
+ dev_consume_skb_any(nfp_map->cache);
+ nfp_map->cache = NULL;
+exit_unlock:
+ spin_unlock(&nfp_map->cache_lock);
+ return n_entries;
+}
+
+static void
+nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op,
+ struct sk_buff *skb, u32 cache_gen)
+{
+ bool blocker, filler;
+
+ blocker = nfp_bpf_ctrl_op_cache_invalidate(op);
+ filler = nfp_bpf_ctrl_op_cache_fill(op);
+ if (blocker || filler) {
+ u64 to = 0;
+
+ if (filler)
+ to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS;
+
+ spin_lock(&nfp_map->cache_lock);
+ if (blocker) {
+ nfp_map->cache_blockers--;
+ nfp_map->cache_gen++;
+ }
+ if (filler && !nfp_map->cache_blockers &&
+ nfp_map->cache_gen == cache_gen) {
+ nfp_map->cache_to = to;
+ swap(nfp_map->cache, skb);
+ }
+ spin_unlock(&nfp_map->cache_lock);
+ }
+
+ dev_consume_skb_any(skb);
+}
+
static int
nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
{
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
+ unsigned int n_entries, reply_entries, count;
struct nfp_app_bpf *bpf = nfp_map->bpf;
struct bpf_map *map = &offmap->map;
struct cmsg_reply_map_op *reply;
struct cmsg_req_map_op *req;
struct sk_buff *skb;
+ u32 cache_gen;
int err;
/* FW messages have no space for more than 32 bits of flags */
if (flags >> 32)
return -EOPNOTSUPP;
+ /* Handle op cache */
+ n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key,
+ out_value, &cache_gen);
+ if (!n_entries)
+ return 0;
+
skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ err = -ENOMEM;
+ goto err_cache_put;
+ }
req = (void *)skb->data;
req->tid = cpu_to_be32(nfp_map->tid);
- req->count = cpu_to_be32(1);
+ req->count = cpu_to_be32(n_entries);
req->flags = cpu_to_be32(flags);
/* Copy inputs */
@@ -207,16 +330,38 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
map->value_size);
- skb = nfp_ccm_communicate(&bpf->ccm, skb, op,
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto err_cache_put;
+ }
+
+ if (skb->len < sizeof(*reply)) {
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n",
+ op, skb->len);
+ err = -EIO;
+ goto err_free;
+ }
reply = (void *)skb->data;
+ count = be32_to_cpu(reply->count);
err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
+ /* FW responds with message sized to hold the good entries,
+ * plus one extra entry if there was an error.
+ */
+ reply_entries = count + !!err;
+ if (n_entries > 1 && count)
+ err = 0;
if (err)
goto err_free;
+ if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) {
+ cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n",
+ op, skb->len, reply_entries);
+ err = -EIO;
+ goto err_free;
+ }
+
/* Copy outputs */
if (out_key)
memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
@@ -225,11 +370,13 @@ nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op,
memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
map->value_size);
- dev_consume_skb_any(skb);
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen);
return 0;
err_free:
dev_kfree_skb_any(skb);
+err_cache_put:
+ nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen);
return err;
}
@@ -267,11 +414,29 @@ int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
key, NULL, 0, next_key, NULL);
}
+unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf)
+{
+ return max(nfp_bpf_cmsg_map_req_size(bpf, 1),
+ nfp_bpf_cmsg_map_reply_size(bpf, 1));
+}
+
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
{
- return max3((unsigned int)NFP_NET_DEFAULT_MTU,
- nfp_bpf_cmsg_map_req_size(bpf, 1),
- nfp_bpf_cmsg_map_reply_size(bpf, 1));
+ return max3(NFP_NET_DEFAULT_MTU,
+ nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT),
+ nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT));
+}
+
+unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf)
+{
+ unsigned int mtu, req_max, reply_max, entry_sz;
+
+ mtu = bpf->app->ctrl->dp.mtu;
+ entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz;
+ req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz;
+ reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz;
+
+ return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT);
}
void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/fw.h b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
index 06c4286bd79e..a83a0ad5e27d 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/fw.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/fw.h
@@ -24,6 +24,7 @@ enum bpf_cap_tlv_type {
NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5,
NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6,
NFP_BPF_CAP_TYPE_ABI_VERSION = 7,
+ NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT = 8,
};
struct nfp_bpf_cap_tlv_func {
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 1c9fb11470df..8f732771d3fa 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -300,6 +300,14 @@ nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
}
static int
+nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value,
+ u32 length)
+{
+ bpf->cmsg_multi_ent = true;
+ return 0;
+}
+
+static int
nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
u32 length)
{
@@ -375,6 +383,11 @@ static int nfp_bpf_parse_capabilities(struct nfp_app *app)
length))
goto err_release_free;
break;
+ case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT:
+ if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value,
+ length))
+ goto err_release_free;
+ break;
default:
nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
break;
@@ -415,6 +428,25 @@ static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
}
+static int nfp_bpf_start(struct nfp_app *app)
+{
+ struct nfp_app_bpf *bpf = app->priv;
+
+ if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) {
+ nfp_err(bpf->app->cpp,
+ "ctrl channel MTU below min required %u < %u\n",
+ app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf));
+ return -EINVAL;
+ }
+
+ if (bpf->cmsg_multi_ent)
+ bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf);
+ else
+ bpf->cmsg_cache_cnt = 1;
+
+ return 0;
+}
+
static int nfp_bpf_init(struct nfp_app *app)
{
struct nfp_app_bpf *bpf;
@@ -488,6 +520,7 @@ const struct nfp_app_type app_bpf = {
.init = nfp_bpf_init,
.clean = nfp_bpf_clean,
+ .start = nfp_bpf_start,
.check_mtu = nfp_bpf_check_mtu,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index 57d6ff51e980..fac9c6f9e197 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -99,6 +99,7 @@ enum pkt_vec {
* @maps_neutral: hash table of offload-neutral maps (on pointer)
*
* @abi_version: global BPF ABI version
+ * @cmsg_cache_cnt: number of entries to read for caching
*
* @adjust_head: adjust head capability
* @adjust_head.flags: extra flags for adjust head
@@ -124,6 +125,7 @@ enum pkt_vec {
* @pseudo_random: FW initialized the pseudo-random machinery (CSRs)
* @queue_select: BPF can set the RX queue ID in packet vector
* @adjust_tail: BPF can simply trunc packet size for adjust tail
+ * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg
*/
struct nfp_app_bpf {
struct nfp_app *app;
@@ -134,6 +136,8 @@ struct nfp_app_bpf {
unsigned int cmsg_key_sz;
unsigned int cmsg_val_sz;
+ unsigned int cmsg_cache_cnt;
+
struct list_head map_list;
unsigned int maps_in_use;
unsigned int map_elems_in_use;
@@ -169,6 +173,7 @@ struct nfp_app_bpf {
bool pseudo_random;
bool queue_select;
bool adjust_tail;
+ bool cmsg_multi_ent;
};
enum nfp_bpf_map_use {
@@ -183,11 +188,21 @@ struct nfp_bpf_map_word {
unsigned char non_zero_update :1;
};
+#define NFP_BPF_MAP_CACHE_CNT 4U
+#define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000)
+
/**
* struct nfp_bpf_map - private per-map data attached to BPF maps for offload
* @offmap: pointer to the offloaded BPF map
* @bpf: back pointer to bpf app private structure
* @tid: table id identifying map on datapath
+ *
+ * @cache_lock: protects @cache_blockers, @cache_to, @cache
+ * @cache_blockers: number of ops in flight which block caching
+ * @cache_gen: counter incremented by every blocker on exit
+ * @cache_to: time when cache will no longer be valid (ns)
+ * @cache: skb with cached response
+ *
* @l: link on the nfp_app_bpf->map_list list
* @use_map: map of how the value is used (in 4B chunks)
*/
@@ -195,6 +210,13 @@ struct nfp_bpf_map {
struct bpf_offloaded_map *offmap;
struct nfp_app_bpf *bpf;
u32 tid;
+
+ spinlock_t cache_lock;
+ u32 cache_blockers;
+ u32 cache_gen;
+ u64 cache_to;
+ struct sk_buff *cache;
+
struct list_head l;
struct nfp_bpf_map_word use_map[];
};
@@ -564,7 +586,9 @@ nfp_bpf_goto_meta(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv);
+unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf);
unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf);
+unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf);
long long int
nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map);
void
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 39c9fec222b4..88fab6a82acf 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -385,6 +385,7 @@ nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
offmap->dev_priv = nfp_map;
nfp_map->offmap = offmap;
nfp_map->bpf = bpf;
+ spin_lock_init(&nfp_map->cache_lock);
res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
if (res < 0) {
@@ -407,6 +408,8 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
struct nfp_bpf_map *nfp_map = offmap->dev_priv;
nfp_bpf_ctrl_free_map(bpf, nfp_map);
+ dev_consume_skb_any(nfp_map->cache);
+ WARN_ON_ONCE(nfp_map->cache_blockers);
list_del_init(&nfp_map->l);
bpf->map_elems_in_use -= offmap->map.max_entries;
bpf->maps_in_use--;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 5d6c3738b494..250f510b1d21 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -66,7 +66,7 @@
#define NFP_NET_MAX_DMA_BITS 40
/* Default size for MTU and freelist buffer sizes */
-#define NFP_NET_DEFAULT_MTU 1500
+#define NFP_NET_DEFAULT_MTU 1500U
/* Maximum number of bytes prepended to a packet */
#define NFP_NET_MAX_PREPEND 64
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 6f97b554f7da..61aabffc8888 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -4116,14 +4116,7 @@ int nfp_net_init(struct nfp_net *nn)
/* Set default MTU and Freelist buffer size */
if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
- if (nn->app->ctrl_mtu <= nn->max_mtu) {
- nn->dp.mtu = nn->app->ctrl_mtu;
- } else {
- if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX)
- nn_warn(nn, "app requested MTU above max supported %u > %u\n",
- nn->app->ctrl_mtu, nn->max_mtu);
- nn->dp.mtu = nn->max_mtu;
- }
+ nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
} else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
nn->dp.mtu = nn->max_mtu;
} else {