aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 13:27:01 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-22 13:27:01 -0800
commit06afb0f36106ecb839c5e2509905e68c1e2677de (patch)
treee54b4528c648fc59ab89726b3786bf6626600c06 /include/linux/bpf.h
parentMerge tag 'trace-tools-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace (diff)
parentftrace: Fix regression with module command in stack_trace_filter (diff)
downloadwireguard-linux-06afb0f36106ecb839c5e2509905e68c1e2677de.tar.xz
wireguard-linux-06afb0f36106ecb839c5e2509905e68c1e2677de.zip
Merge tag 'trace-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing updates from Steven Rostedt: - Addition of faultable tracepoints There's a tracepoint attached to both a system call entry and exit. This location is known to allow page faults. The tracepoints are called under an rcu_read_lock() which does not allow faults that can sleep. This limits the ability of tracepoint handlers to page fault in user space system call parameters. Now these tracepoints have been made "faultable", allowing the callbacks to fault in user space parameters and record them. Note, only the infrastructure has been implemented. The consumers (perf, ftrace, BPF) now need to have their code modified to allow faults. - Fix up of BPF code for the tracepoint faultable logic - Update tracepoints to use the new static branch API - Remove trace_*_rcuidle() variants and the SRCU protection they used - Remove unused TRACE_EVENT_FL_FILTERED logic - Replace strncpy() with strscpy() and memcpy() - Use replace per_cpu_ptr(smp_processor_id()) with this_cpu_ptr() - Fix perf events to not duplicate samples when tracing is enabled - Replace atomic64_add_return(1, counter) with atomic64_inc_return(counter) - Make stack trace buffer 4K instead of PAGE_SIZE - Remove TRACE_FLAG_IRQS_NOSUPPORT flag as it was never used - Get the true return address for function tracer when function graph tracer is also running. When function_graph trace is running along with function tracer, the parent function of the function tracer sometimes is "return_to_handler", which is the function graph trampoline to record the exit of the function. Use existing logic that calls into the fgraph infrastructure to find the real return address. - Remove (un)regfunc pointers out of tracepoint structure - Added last minute bug fix for setting pending modules in stack function filter. echo "write*:mod:ext3" > /sys/kernel/tracing/stack_trace_filter Would cause a kernel NULL dereference. - Minor clean ups * tag 'trace-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (31 commits) ftrace: Fix regression with module command in stack_trace_filter tracing: Fix function name for trampoline ftrace: Get the true parent ip for function tracer tracing: Remove redundant check on field->field in histograms bpf: ensure RCU Tasks Trace GP for sleepable raw tracepoint BPF links bpf: decouple BPF link/attach hook and BPF program sleepable semantics bpf: put bpf_link's program when link is safe to be deallocated tracing: Replace strncpy() with strscpy() when copying comm tracing: Add might_fault() check in __DECLARE_TRACE_SYSCALL tracing: Fix syscall tracepoint use-after-free tracing: Introduce tracepoint_is_faultable() tracing: Introduce tracepoint extended structure tracing: Remove TRACE_FLAG_IRQS_NOSUPPORT tracing: Replace multiple deprecated strncpy with memcpy tracing: Make percpu stack trace buffer invariant to PAGE_SIZE tracing: Use atomic64_inc_return() in trace_clock_counter() trace/trace_event_perf: remove duplicate samples on the first tracepoint event tracing/bpf: Add might_fault check to syscall probes tracing/perf: Add might_fault check to syscall probes tracing/ftrace: Add might_fault check to syscall probes ...
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h20
1 files changed, 18 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3ace0d6227e3..eaee2a819f4c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1645,6 +1645,11 @@ struct bpf_link {
enum bpf_link_type type;
const struct bpf_link_ops *ops;
struct bpf_prog *prog;
+ /* whether BPF link itself has "sleepable" semantics, which can differ
+ * from underlying BPF program having a "sleepable" semantics, as BPF
+ * link's semantics is determined by target attach hook
+ */
+ bool sleepable;
/* rcu is used before freeing, work can be used to schedule that
* RCU-based freeing before that, so they never overlap
*/
@@ -1661,8 +1666,10 @@ struct bpf_link_ops {
*/
void (*dealloc)(struct bpf_link *link);
/* deallocate link resources callback, called after RCU grace period;
- * if underlying BPF program is sleepable we go through tasks trace
- * RCU GP and then "classic" RCU GP
+ * if either the underlying BPF program is sleepable or BPF link's
+ * target hook is sleepable, we'll go through tasks trace RCU GP and
+ * then "classic" RCU GP; this need for chaining tasks trace and
+ * classic RCU GPs is designated by setting bpf_link->sleepable flag
*/
void (*dealloc_deferred)(struct bpf_link *link);
int (*detach)(struct bpf_link *link);
@@ -2409,6 +2416,9 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
const struct bpf_link_ops *ops, struct bpf_prog *prog);
+void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ bool sleepable);
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
int bpf_link_settle(struct bpf_link_primer *primer);
void bpf_link_cleanup(struct bpf_link_primer *primer);
@@ -2764,6 +2774,12 @@ static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
{
}
+static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type,
+ const struct bpf_link_ops *ops, struct bpf_prog *prog,
+ bool sleepable)
+{
+}
+
static inline int bpf_link_prime(struct bpf_link *link,
struct bpf_link_primer *primer)
{