aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-14 15:35:18 -0700
committerDavid S. Miller <davem@davemloft.net>2019-06-14 15:35:18 -0700
commiteea9e3a40dbc15a4b03b447158c2555079875ba5 (patch)
tree821f090c2965bc686322532b016f9330556d5acb
parentMerge tag 'mac80211-next-for-davem-2019-06-14' of git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next (diff)
parentnet/mlx5e: use indirect calls wrapper for the rx packet handler (diff)
downloadlinux-dev-eea9e3a40dbc15a4b03b447158c2555079875ba5.tar.xz
linux-dev-eea9e3a40dbc15a4b03b447158c2555079875ba5.zip
Merge branch 'net-mlx5-use-indirect-call-wrappers'
Paolo Abeni says: ==================== net/mlx5: use indirect call wrappers The mlx5_core driver uses several indirect calls in fast-path, some of them are invoked on each ingress packet, even for the XDP-only traffic. This series leverage the indirect call wrappers infrastructure the avoid the expansive RETPOLINE overhead for 2 indirect calls in fast-path. Each call is addressed on a different patch, plus we need to introduce a couple of additional helpers to cope with the higher number of possible direct-call alternatives. v2 -> v3: - do not add more INDIRECT_CALL_* macros - use only the direct calls always available regardless of the mlx5 build options in the last patch v1 -> v2: - update the direct call list and use a macro to define it, as per Saeed suggestion. An intermediated additional macro is needed to allow arg list expansion - patch 2/3 is unchanged, as the generated code looks better this way than with possible alternative (dropping BP hits) ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 13133e7f088e..935bf62eddc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -34,6 +34,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
@@ -1092,7 +1093,10 @@ void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (!skb) {
/* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
@@ -1279,8 +1283,10 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
- skb = rq->mpwqe.skb_from_cqe_mpwrq(rq, wi, cqe_bcnt, head_offset,
- page_idx);
+ skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
+ mlx5e_skb_from_cqe_mpwrq_linear,
+ mlx5e_skb_from_cqe_mpwrq_nonlinear,
+ rq, wi, cqe_bcnt, head_offset, page_idx);
if (!skb)
goto mpwrq_cqe_out;
@@ -1327,7 +1333,8 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5_cqwq_pop(cqwq);
- rq->handle_rx_cqe(rq, cqe);
+ INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
@@ -1437,7 +1444,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (!skb)
goto wq_free_wqe;
@@ -1469,7 +1479,10 @@ void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
- skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
+ skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
+ mlx5e_skb_from_cqe_linear,
+ mlx5e_skb_from_cqe_nonlinear,
+ rq, cqe, wi, cqe_bcnt);
if (unlikely(!skb)) {
/* a DROP, save the page-reuse checks */
mlx5e_free_rx_wqe(rq, wi, true);