aboutsummaryrefslogtreecommitdiffstats
path: root/include/trace
diff options
context:
space:
mode:
authorMatt Mullins <mmullins@fb.com>2019-04-26 11:49:47 -0700
committerAlexei Starovoitov <ast@kernel.org>2019-04-26 19:04:19 -0700
commit9df1c28bb75217b244257152ab7d788bb2a386d0 (patch)
tree7fe10ffd9b59716b52b992f5663ec8b5102406b6 /include/trace
parentbpf, arm64: use more scalable stadd over ldxr / stxr loop in xadd (diff)
downloadlinux-dev-9df1c28bb75217b244257152ab7d788bb2a386d0.tar.xz
linux-dev-9df1c28bb75217b244257152ab7d788bb2a386d0.zip
bpf: add writable context for raw tracepoints
This is an opt-in interface that allows a tracepoint to provide a safe buffer that can be written from a BPF_PROG_TYPE_RAW_TRACEPOINT program. The size of the buffer must be a compile-time constant, and is checked before allowing a BPF program to attach to a tracepoint that uses this feature. The pointer to this buffer will be the first argument of tracepoints that opt in; the pointer is valid and can be bpf_probe_read() by both BPF_PROG_TYPE_RAW_TRACEPOINT and BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE programs that attach to such a tracepoint, but the buffer to which it points may only be written by the latter. Signed-off-by: Matt Mullins <mmullins@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/bpf_probe.h27
1 files changed, 25 insertions, 2 deletions
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 505dae0bed80..d6e556c0a085 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -69,8 +69,7 @@ __bpf_trace_##call(void *__data, proto) \
* to make sure that if the tracepoint handling changes, the
* bpf probe will fail to compile unless it too is updated.
*/
-#undef DEFINE_EVENT
-#define DEFINE_EVENT(template, call, proto, args) \
+#define __DEFINE_EVENT(template, call, proto, args, size) \
static inline void bpf_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(__bpf_trace_##template); \
@@ -81,12 +80,36 @@ __bpf_trace_tp_map_##call = { \
.tp = &__tracepoint_##call, \
.bpf_func = (void *)__bpf_trace_##template, \
.num_args = COUNT_ARGS(args), \
+ .writable_size = size, \
};
+#define FIRST(x, ...) x
+
+#undef DEFINE_EVENT_WRITABLE
+#define DEFINE_EVENT_WRITABLE(template, call, proto, args, size) \
+static inline void bpf_test_buffer_##call(void) \
+{ \
+ /* BUILD_BUG_ON() is ignored if the code is completely eliminated, but \
+ * BUILD_BUG_ON_ZERO() uses a different mechanism that is not \
+ * dead-code-eliminated. \
+ */ \
+ FIRST(proto); \
+ (void)BUILD_BUG_ON_ZERO(size != sizeof(*FIRST(args))); \
+} \
+__DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), size)
+
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(template, call, proto, args) \
+ __DEFINE_EVENT(template, call, PARAMS(proto), PARAMS(args), 0)
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef DEFINE_EVENT_WRITABLE
+#undef __DEFINE_EVENT
+#undef FIRST
+
#endif /* CONFIG_BPF_EVENTS */