aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2008-02-13 15:45:39 +0100
committerIngo Molnar <mingo@elte.hu>2008-02-13 15:45:39 +0100
commit4cf5d77a6eefaa7a464bc34e8cb767356f10fd74 (patch)
treead011b73207b6c8523189b873d8b1c3757d84e12 /kernel/sched.c
parentsched: fair-group: separate tg->shares from task_group_lock (diff)
downloadlinux-dev-4cf5d77a6eefaa7a464bc34e8cb767356f10fd74.tar.xz
linux-dev-4cf5d77a6eefaa7a464bc34e8cb767356f10fd74.zip
sched: fix incorrect irq lock usage in normalize_rt_tasks()
lockdep spotted this bogus irq locking. normalize_rt_tasks() can be called from hardirq context through sysrq-n Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6b02276baaa6..88a17c7128c3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7291,7 +7291,7 @@ void normalize_rt_tasks(void)
unsigned long flags;
struct rq *rq;
- read_lock_irq(&tasklist_lock);
+ read_lock_irqsave(&tasklist_lock, flags);
do_each_thread(g, p) {
/*
* Only normalize user tasks:
@@ -7317,16 +7317,16 @@ void normalize_rt_tasks(void)
continue;
}
- spin_lock_irqsave(&p->pi_lock, flags);
+ spin_lock(&p->pi_lock);
rq = __task_rq_lock(p);
normalize_task(rq, p);
__task_rq_unlock(rq);
- spin_unlock_irqrestore(&p->pi_lock, flags);
+ spin_unlock(&p->pi_lock);
} while_each_thread(g, p);
- read_unlock_irq(&tasklist_lock);
+ read_unlock_irqrestore(&tasklist_lock, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ */