aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorToke Høiland-Jørgensen <toke@redhat.com>2019-06-28 11:12:34 +0200
committerDaniel Borkmann <daniel@iogearbox.net>2019-06-29 01:31:09 +0200
commit4b55cf290dc6bd3a9e5da26d1ad60e77aa88c8cf (patch)
tree3fd6585380801d04a61c2c364cbdee8d5f672167 /net/core
parentdevmap/cpumap: Use flush list instead of bitmap (diff)
downloadlinux-dev-4b55cf290dc6bd3a9e5da26d1ad60e77aa88c8cf.tar.xz
linux-dev-4b55cf290dc6bd3a9e5da26d1ad60e77aa88c8cf.zip
devmap: Rename ifindex member in bpf_redirect_info
The bpf_redirect_info struct has an 'ifindex' member which was named back when the redirects could only target egress interfaces. Now that we can also redirect to sockets and CPUs, this is a bit misleading, so rename the member to tgt_index. Reorder the struct members so we can have 'tgt_index' and 'tgt_value' next to each other in a subsequent patch. Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 1e5fd37e9ab5..b4a062379bb9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2158,8 +2158,8 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
if (unlikely(flags & ~(BPF_F_INGRESS)))
return TC_ACT_SHOT;
- ri->ifindex = ifindex;
ri->flags = flags;
+ ri->tgt_index = ifindex;
return TC_ACT_REDIRECT;
}
@@ -2169,8 +2169,8 @@ int skb_do_redirect(struct sk_buff *skb)
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct net_device *dev;
- dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
- ri->ifindex = 0;
+ dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
+ ri->tgt_index = 0;
if (unlikely(!dev)) {
kfree_skb(skb);
return -EINVAL;
@@ -3488,11 +3488,11 @@ xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
{
struct net_device *fwd;
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
int err;
fwd = dev_get_by_index_rcu(dev_net(dev), index);
- ri->ifindex = 0;
+ ri->tgt_index = 0;
if (unlikely(!fwd)) {
err = -EINVAL;
goto err;
@@ -3604,11 +3604,11 @@ static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct bpf_map *map,
struct bpf_redirect_info *ri)
{
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
void *fwd = NULL;
int err;
- ri->ifindex = 0;
+ ri->tgt_index = 0;
WRITE_ONCE(ri->map, NULL);
fwd = __xdp_map_lookup_elem(map, index);
@@ -3651,11 +3651,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
struct bpf_map *map)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
void *fwd = NULL;
int err = 0;
- ri->ifindex = 0;
+ ri->tgt_index = 0;
WRITE_ONCE(ri->map, NULL);
fwd = __xdp_map_lookup_elem(map, index);
@@ -3695,14 +3695,14 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
struct bpf_map *map = READ_ONCE(ri->map);
- u32 index = ri->ifindex;
+ u32 index = ri->tgt_index;
struct net_device *fwd;
int err = 0;
if (map)
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
map);
- ri->ifindex = 0;
+ ri->tgt_index = 0;
fwd = dev_get_by_index_rcu(dev_net(dev), index);
if (unlikely(!fwd)) {
err = -EINVAL;
@@ -3730,8 +3730,8 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
if (unlikely(flags))
return XDP_ABORTED;
- ri->ifindex = ifindex;
ri->flags = flags;
+ ri->tgt_index = ifindex;
WRITE_ONCE(ri->map, NULL);
return XDP_REDIRECT;
@@ -3753,8 +3753,8 @@ BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
if (unlikely(flags))
return XDP_ABORTED;
- ri->ifindex = ifindex;
ri->flags = flags;
+ ri->tgt_index = ifindex;
WRITE_ONCE(ri->map, map);
return XDP_REDIRECT;