aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-08-01 00:46:29 +0200
committerDavid S. Miller <davem@davemloft.net>2015-08-02 17:20:47 -0700
commitba7591d8b28bd16a2eface5d009ab0b60c7629a4 (patch)
tree17f39a416a27698e437e523a02428cf9cbdf7720 /net/core
parentbnx2x: Correct logic for pvid configuration. (diff)
downloadlinux-dev-ba7591d8b28bd16a2eface5d009ab0b60c7629a4.tar.xz
linux-dev-ba7591d8b28bd16a2eface5d009ab0b60c7629a4.zip
ebpf: add skb->hash to offset map for usage in {cls, act}_bpf or filters
Add skb->hash to the __sk_buff offset map, so it can be accessed from an eBPF program. We currently already do this for classic BPF filters, but not yet on eBPF, it might be useful as a demuxer in combination with helpers like bpf_clone_redirect(), toy example: __section("cls-lb") int ingress_main(struct __sk_buff *skb) { unsigned int which = 3 + (skb->hash & 7); /* bpf_skb_store_bytes(skb, ...); */ /* bpf_l{3,4}_csum_replace(skb, ...); */ bpf_clone_redirect(skb, which, 0); return -1; } I was thinking whether to add skb_get_hash(), but then concluded the raw skb->hash seems fine in this case: we can directly access the hash w/o extra eBPF helper function call, it's filled out by many NICs on ingress, and in case the entropy level would not be sufficient, people can still implement their own specific sw fallback hash mix anyway. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/filter.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/net/core/filter.c b/net/core/filter.c
index 1b72264ff2ee..a50dbfa83ad9 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1711,6 +1711,13 @@ static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg,
offsetof(struct net_device, ifindex));
break;
+ case offsetof(struct __sk_buff, hash):
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+
+ *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ offsetof(struct sk_buff, hash));
+ break;
+
case offsetof(struct __sk_buff, mark):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);