aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/stacktrace.c
diff options
context:
space:
mode:
authorArjan van de Ven <arjan@linux.intel.com>2008-01-30 13:33:07 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:33:07 +0100
commit5bc27dc2f55fd3043597b5a8de6536183f28a449 (patch)
treedca83b12fb2f01f85a9e31bf1fb3802bd2cfef2a /arch/x86/kernel/stacktrace.c
parentx86: improve the 32 bit Frame Pointer backtracer to also use the traditional backtrace (diff)
downloadlinux-dev-5bc27dc2f55fd3043597b5a8de6536183f28a449.tar.xz
linux-dev-5bc27dc2f55fd3043597b5a8de6536183f28a449.zip
x86: pull bp calculation earlier into the backtrace path
Right now, we take the stack pointer early during the backtrace path, but only calculate bp several functions deep later, making it hard to reconcile the stack and bp backtraces (as well as showing several internal backtrace functions on the stack with bp based backtracing). This patch moves the bp taking to the same place we take the stack pointer; sadly this ripples through several layers of the back tracing stack, but it's not all that bad in the end I hope. Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/stacktrace.c')
-rw-r--r--arch/x86/kernel/stacktrace.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 8c4e4f5bf040..4f4021b5bfb5 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -33,7 +33,8 @@ static void save_stack_address(void *data, unsigned long addr, int reliable)
trace->entries[trace->nr_entries++] = addr;
}
-static void save_stack_address_nosched(void *data, unsigned long addr)
+static void
+save_stack_address_nosched(void *data, unsigned long addr, int reliable)
{
struct stack_trace *trace = (struct stack_trace *)data;
if (in_sched_functions(addr))
@@ -65,14 +66,14 @@ static const struct stacktrace_ops save_stack_ops_nosched = {
*/
void save_stack_trace(struct stack_trace *trace)
{
- dump_trace(current, NULL, NULL, &save_stack_ops, trace);
+ dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
- dump_trace(tsk, NULL, NULL, &save_stack_ops_nosched, trace);
+ dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}