aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-11-27 21:43:10 +0000
committerIngo Molnar <mingo@elte.hu>2012-03-13 10:17:45 +0100
commit01f23e1630d944f7085cd8fd5793e31ea91c03d8 (patch)
treef21651af720e87721c3829ff69a41c5a9d8cb31d /kernel/sched
parentLinux 3.3-rc7 (diff)
downloadlinux-dev-01f23e1630d944f7085cd8fd5793e31ea91c03d8.tar.xz
linux-dev-01f23e1630d944f7085cd8fd5793e31ea91c03d8.zip
sched/arch: Introduce the finish_arch_post_lock_switch() scheduler callback
This callback is called by the scheduler after rq->lock has been released and interrupts enabled. It will be used in subsequent patches on the ARM architecture. Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Frank Rowand <frank.rowand@am.sony.com> Tested-by: Will Deacon <will.deacon@arm.com> Tested-by: Marc Zyngier <Marc.Zyngier@arm.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/n/20120313110840.7b444deb6b1bb902c15f3cdf@canb.auug.org.au Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/sched.h3
2 files changed, 4 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b342f57879e6..423f40f32a59 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1932,6 +1932,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
local_irq_enable();
#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
finish_lock_switch(rq, prev);
+ finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
if (mm)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 98c0c2623db8..d72483d07c9f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -692,6 +692,9 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifndef finish_arch_switch
# define finish_arch_switch(prev) do { } while (0)
#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)