aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/entry_64.S
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2008-07-16 14:21:34 +1000
committerPaul Mackerras <paulus@samba.org>2008-08-20 16:34:57 +1000
commit01f3880dd8a7fa78c419da2db740cba511ca7798 (patch)
tree6a1e6ab37fa42ae1847f2ff44f26270dc6848c7c /arch/powerpc/kernel/entry_64.S
parentpowerpc: Fix vio_bus_probe oops on probe error (diff)
downloadlinux-dev-01f3880dd8a7fa78c419da2db740cba511ca7798.tar.xz
linux-dev-01f3880dd8a7fa78c419da2db740cba511ca7798.zip
powerpc: Streamline ret_from_except_lite for non-iSeries platforms
There is a small passage of code in ret_from_except_lite which is only required on iSeries. For a multi-platform kernel on non-iSeries machines this means we end up executing ~15 nops in ret_from_except_lite. It would be nicer if non-iSeries could skip the code entirely, and on iSeries we can jump out of line to execute the code. I have no performance numbers to justify this, other than the assertion that executing 15 nops takes longer than executing 0. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to '')
-rw-r--r--arch/powerpc/kernel/entry_64.S53
1 files changed, 29 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2d802e97097c..55445f1dba8a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -512,31 +512,12 @@ _GLOBAL(ret_from_except_lite)
#endif
restore:
- ld r5,SOFTE(r1)
-#ifdef CONFIG_PPC_ISERIES
BEGIN_FW_FTR_SECTION
- cmpdi 0,r5,0
- beq 4f
- /* Check for pending interrupts (iSeries) */
- ld r3,PACALPPACAPTR(r13)
- ld r3,LPPACAANYINT(r3)
- cmpdi r3,0
- beq+ 4f /* skip do_IRQ if no interrupts */
-
- li r3,0
- stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl .trace_hardirqs_off
- mfmsr r10
-#endif
- ori r10,r10,MSR_EE
- mtmsrd r10 /* hard-enable again */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite /* loop back and handle more */
-4:
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif
+ ld r5,SOFTE(r1)
+FW_FTR_SECTION_ELSE
+ b iseries_check_pending_irqs
+ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
+2:
TRACE_AND_RESTORE_IRQ(r5);
/* extract EE bit and use it to restore paca->hard_enabled */
@@ -592,6 +573,30 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
rfid
b . /* prevent speculative execution */
+iseries_check_pending_irqs:
+#ifdef CONFIG_PPC_ISERIES
+ ld r5,SOFTE(r1)
+ cmpdi 0,r5,0
+ beq 2b
+ /* Check for pending interrupts (iSeries) */
+ ld r3,PACALPPACAPTR(r13)
+ ld r3,LPPACAANYINT(r3)
+ cmpdi r3,0
+ beq+ 2b /* skip do_IRQ if no interrupts */
+
+ li r3,0
+ stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ bl .trace_hardirqs_off
+ mfmsr r10
+#endif
+ ori r10,r10,MSR_EE
+ mtmsrd r10 /* hard-enable again */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_IRQ
+ b .ret_from_except_lite /* loop back and handle more */
+#endif
+
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */