aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 18:56:56 +0200
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:55 +0200
commitee30a7b2fc072f139dac44826860d2c1f422137c (patch)
tree4f11173879b25cc31822cb0a8d9dbd48395e74bc /include/linux/sched.h
parentlocking, printk: Annotate logbuf_lock as raw (diff)
downloadlinux-dev-ee30a7b2fc072f139dac44826860d2c1f422137c.tar.xz
linux-dev-ee30a7b2fc072f139dac44826860d2c1f422137c.zip
locking, sched: Annotate thread_group_cputimer as raw
The thread_group_cputimer lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ac2c0578e0f..e672236c08e0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -510,7 +510,7 @@ struct task_cputime {
struct thread_group_cputimer {
struct task_cputime cputime;
int running;
- spinlock_t lock;
+ raw_spinlock_t lock;
};
#include <linux/rwsem.h>
@@ -2566,7 +2566,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
static inline void thread_group_cputime_init(struct signal_struct *sig)
{
- spin_lock_init(&sig->cputimer.lock);
+ raw_spin_lock_init(&sig->cputimer.lock);
}
/*