aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 20:05:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-01 20:05:19 -0700
commit79ef0c00142519bc34e1341447f3797436cc48bf (patch)
tree200c1ada8b4f31b79ec8a67939ca5fbcd0339958 /arch/sh/kernel
parentMerge tag 'hwmon-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging (diff)
parenttracing/histogram: Fix semicolon.cocci warnings (diff)
downloadlinux-dev-79ef0c00142519bc34e1341447f3797436cc48bf.tar.xz
linux-dev-79ef0c00142519bc34e1341447f3797436cc48bf.zip
Merge tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing updates from Steven Rostedt: - kprobes: Restructured stack unwinder to show properly on x86 when a stack dump happens from a kretprobe callback. - Fix to bootconfig parsing - Have tracefs allow owner and group permissions by default (only denying others). There's been pressure to allow non root to tracefs in a controlled fashion, and using groups is probably the safest. - Bootconfig memory managament updates. - Bootconfig clean up to have the tools directory be less dependent on changes in the kernel tree. - Allow perf to be traced by function tracer. - Rewrite of function graph tracer to be a callback from the function tracer instead of having its own trampoline (this change will happen on an arch by arch basis, and currently only x86_64 implements it). - Allow multiple direct trampolines (bpf hooks to functions) be batched together in one synchronization. - Allow histogram triggers to add variables that can perform calculations against the event's fields. - Use the linker to determine architecture callbacks from the ftrace trampoline to allow for proper parameter prototypes and prevent warnings from the compiler. - Extend histogram triggers to key off of variables. - Have trace recursion use bit magic to determine preempt context over if branches. - Have trace recursion disable preemption as all use cases do anyway. - Added testing for verification of tracing utilities. - Various small clean ups and fixes. * tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (101 commits) tracing/histogram: Fix semicolon.cocci warnings tracing/histogram: Fix documentation inline emphasis warning tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together tracing: Show size of requested perf buffer bootconfig: Initialize ret in xbc_parse_tree() ftrace: do CPU checking after preemption disabled ftrace: disable preemption when recursion locked tracing/histogram: Document expression arithmetic and constants tracing/histogram: Optimize division by a power of 2 tracing/histogram: Covert expr to const if both operands are constants tracing/histogram: Simplify handling of .sym-offset in expressions tracing: Fix operator precedence for hist triggers expression tracing: Add division and multiplication support for hist triggers tracing: Add support for creating hist trigger variables from literal selftests/ftrace: Stop tracing while reading the trace file by default MAINTAINERS: Update KPROBES and TRACING entries test_kprobes: Move it from kernel/ to lib/ docs, kprobes: Remove invalid URL and add new reference samples/kretprobes: Fix return value if register_kretprobe() failed lib/bootconfig: Fix the xbc_get_info kerneldoc ...
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/ftrace.c5
-rw-r--r--arch/sh/kernel/kprobes.c12
2 files changed, 6 insertions, 11 deletions
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c
index 295c43315bbe..930001bb8c6a 100644
--- a/arch/sh/kernel/ftrace.c
+++ b/arch/sh/kernel/ftrace.c
@@ -252,11 +252,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return ftrace_modify_code(rec->ip, old, new);
}
-
-int __init ftrace_dyn_arch_init(void)
-{
- return 0;
-}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/sh/kernel/kprobes.c b/arch/sh/kernel/kprobes.c
index 1c7f358ef0be..aed1ea8e2c2f 100644
--- a/arch/sh/kernel/kprobes.c
+++ b/arch/sh/kernel/kprobes.c
@@ -207,7 +207,7 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
ri->fp = NULL;
/* Replace the return addr with trampoline addr */
- regs->pr = (unsigned long)kretprobe_trampoline;
+ regs->pr = (unsigned long)__kretprobe_trampoline;
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
@@ -293,17 +293,17 @@ no_kprobe:
*/
static void __used kretprobe_trampoline_holder(void)
{
- asm volatile (".globl kretprobe_trampoline\n"
- "kretprobe_trampoline:\n\t"
+ asm volatile (".globl __kretprobe_trampoline\n"
+ "__kretprobe_trampoline:\n\t"
"nop\n");
}
/*
- * Called when we hit the probe point at kretprobe_trampoline
+ * Called when we hit the probe point at __kretprobe_trampoline
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
- regs->pc = __kretprobe_trampoline_handler(regs, &kretprobe_trampoline, NULL);
+ regs->pc = __kretprobe_trampoline_handler(regs, NULL);
return 1;
}
@@ -442,7 +442,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
}
static struct kprobe trampoline_p = {
- .addr = (kprobe_opcode_t *)&kretprobe_trampoline,
+ .addr = (kprobe_opcode_t *)&__kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};