aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/filter.h
diff options
context:
space:
mode:
authorStanislav Fomichev <sdf@google.com>2019-07-01 10:38:39 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2019-07-08 16:22:55 +0200
commit600c70bad6594cb124c641ed05355ca134650ea4 (patch)
tree64b117309f808caca760fc11a37284880654eda3 /include/linux/filter.h
parentMerge branch 'bpf-libbpf-perf-rb-api' (diff)
downloadlinux-dev-600c70bad6594cb124c641ed05355ca134650ea4.tar.xz
linux-dev-600c70bad6594cb124c641ed05355ca134650ea4.zip
bpf: allow wide (u64) aligned stores for some fields of bpf_sock_addr
Since commit cd17d7770578 ("bpf/tools: sync bpf.h") clang decided that it can do a single u64 store into user_ip6[2] instead of two separate u32 ones: # 17: (18) r2 = 0x100000000000000 # ; ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2); # 19: (7b) *(u64 *)(r1 +16) = r2 # invalid bpf_context access off=16 size=8 >From the compiler point of view it does look like a correct thing to do, so let's support it on the kernel side. Credit to Andrii Nakryiko for a proper implementation of bpf_ctx_wide_store_ok. Cc: Andrii Nakryiko <andriin@fb.com> Cc: Yonghong Song <yhs@fb.com> Fixes: cd17d7770578 ("bpf/tools: sync bpf.h") Reported-by: kernel test robot <rong.a.chen@intel.com> Acked-by: Yonghong Song <yhs@fb.com> Acked-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'include/linux/filter.h')
-rw-r--r--include/linux/filter.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 1fe53e78c7e3..6d944369ca87 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -747,6 +747,12 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
+#define bpf_ctx_wide_store_ok(off, size, type, field) \
+ (size == sizeof(__u64) && \
+ off >= offsetof(type, field) && \
+ off + sizeof(__u64) <= offsetofend(type, field) && \
+ off % sizeof(__u64) == 0)
+
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)