aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-04 19:13:30 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-04 19:30:32 +0200
commit0d905bca23aca5c86a10ee101bcd3b1abbd40b25 (patch)
tree5a4e6b956d1923ac1d28ae8b8f3034c1c90df5a5 /include
parentperf_counter: x86: fixup nmi_watchdog vs perf_counter boo-boo (diff)
downloadlinux-dev-0d905bca23aca5c86a10ee101bcd3b1abbd40b25.tar.xz
linux-dev-0d905bca23aca5c86a10ee101bcd3b1abbd40b25.zip
perf_counter: initialize the per-cpu context earlier
percpu scheduling for perfcounters wants to take the context lock, but that lock first needs to be initialized. Currently it is an early_initcall() - but that is too late, the task tick runs much sooner than that. Call it explicitly from the scheduler init sequence instead. [ Impact: fix access-before-init crash ] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/perf_counter.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index f776851f8c4b..a356fa69796c 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -573,6 +573,8 @@ extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_priv;
+extern void perf_counter_init(void);
+
#else
static inline void
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
@@ -600,9 +602,10 @@ perf_counter_mmap(unsigned long addr, unsigned long len,
static inline void
perf_counter_munmap(unsigned long addr, unsigned long len,
- unsigned long pgoff, struct file *file) { }
+ unsigned long pgoff, struct file *file) { }
static inline void perf_counter_comm(struct task_struct *tsk) { }
+static inline void perf_counter_init(void) { }
#endif
#endif /* __KERNEL__ */