aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/filter.c
diff options
context:
space:
mode:
authorJesper Dangaard Brouer <brouer@redhat.com>2017-08-29 16:37:56 +0200
committerDavid S. Miller <davem@davemloft.net>2017-08-29 10:51:29 -0700
commitf5836ca5e9867fa6ab88cadb9873af56d9ceb589 (patch)
tree81b8317a3d986fb176b2df280d946bd250e4ad1a /net/core/filter.c
parentxdp: make xdp tracepoints report bpf prog id instead of prog_tag (diff)
downloadlinux-dev-f5836ca5e9867fa6ab88cadb9873af56d9ceb589.tar.xz
linux-dev-f5836ca5e9867fa6ab88cadb9873af56d9ceb589.zip
xdp: separate xdp_redirect tracepoint in error case
There is a need to separate the xdp_redirect tracepoint into two tracepoints, for separating the error case from the normal forward case. Due to the extreme speeds XDP is operating at, loading a tracepoint have a measurable impact. Single core XDP REDIRECT (ethtool tuned rx-usecs 25) can do 13.7 Mpps forwarding, but loading a simple bpf_prog at the tracepoint (with a return 0) reduce perf to 10.2 Mpps (CPU E5-1650 v4 @ 3.60GHz, driver: ixgbe) The overhead of loading a bpf-based tracepoint can be calculated to cost 25 nanosec ((1/13782002-1/10267937)*10^9 = -24.83 ns). Using perf record on the tracepoint event, with a non-matching --filter expression, the overhead is much larger. Performance drops to 8.3 Mpps, cost 48 nanosec ((1/13782002-1/8312497)*10^9 = -47.74)) Having a separate tracepoint for err cases, which should be less frequent, allow running a continuous monitor for errors while not affecting the redirect forward performance (this have also been verified by measurements). Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/filter.c')
-rw-r--r--net/core/filter.c37
1 files changed, 24 insertions, 13 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 31eab77cc842..096e78de0b97 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2515,16 +2515,20 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
fwd = __dev_map_lookup_elem(map, index);
if (!fwd) {
err = -EINVAL;
- goto out;
+ goto err;
}
if (ri->map_to_flush && ri->map_to_flush != map)
xdp_do_flush_map();
err = __bpf_tx_xdp(fwd, map, xdp, index);
- if (likely(!err))
- ri->map_to_flush = map;
-out:
- trace_xdp_redirect_map(dev, xdp_prog, fwd, err, map, index);
+ if (unlikely(err))
+ goto err;
+
+ ri->map_to_flush = map;
+ trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
+ return 0;
+err:
+ trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
return err;
}
@@ -2543,12 +2547,17 @@ int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
ri->ifindex = 0;
if (unlikely(!fwd)) {
err = -EINVAL;
- goto out;
+ goto err;
}
err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
-out:
- _trace_xdp_redirect(dev, xdp_prog, index, err);
+ if (unlikely(err))
+ goto err;
+
+ _trace_xdp_redirect(dev, xdp_prog, index);
+ return 0;
+err:
+ _trace_xdp_redirect_err(dev, xdp_prog, index, err);
return err;
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
@@ -2566,23 +2575,25 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
ri->ifindex = 0;
if (unlikely(!fwd)) {
err = -EINVAL;
- goto out;
+ goto err;
}
if (unlikely(!(fwd->flags & IFF_UP))) {
err = -ENETDOWN;
- goto out;
+ goto err;
}
len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
if (skb->len > len) {
err = -EMSGSIZE;
- goto out;
+ goto err;
}
skb->dev = fwd;
-out:
- _trace_xdp_redirect(dev, xdp_prog, index, err);
+ _trace_xdp_redirect(dev, xdp_prog, index);
+ return 0;
+err:
+ _trace_xdp_redirect_err(dev, xdp_prog, index, err);
return err;
}
EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);