aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/kernel/exceptions-64s.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/exceptions-64s.S')
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S407
1 files changed, 230 insertions, 177 deletions
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index b66dd6f775a4..eaf2f167c342 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -13,6 +13,7 @@
*
*/
+#include <linux/linkage.h>
#include <asm/hw_irq.h>
#include <asm/exception-64s.h>
#include <asm/ptrace.h>
@@ -111,6 +112,7 @@ name:
#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
#define __ISTACK(name) .L_ISTACK_ ## name
#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
+#define IMSR_R12 .L_IMSR_R12_\name\() /* Assumes MSR saved to r12 */
#define INT_DEFINE_BEGIN(n) \
.macro int_define_ ## n name
@@ -176,6 +178,9 @@ do_define_int n
.ifndef IKUAP
IKUAP=1
.endif
+ .ifndef IMSR_R12
+ IMSR_R12=0
+ .endif
.endm
/*
@@ -281,7 +286,7 @@ BEGIN_FTR_SECTION
mfspr r9,SPRN_PPR
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
- std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
+ std r10,IAREA+EX_R10(r13) /* save r10 */
.if ICFAR
BEGIN_FTR_SECTION
mfspr r10,SPRN_CFAR
@@ -321,7 +326,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
mfctr r10
std r10,IAREA+EX_CTR(r13)
mfcr r9
- std r11,IAREA+EX_R11(r13)
+ std r11,IAREA+EX_R11(r13) /* save r11 - r12 */
std r12,IAREA+EX_R12(r13)
/*
@@ -502,6 +507,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
std r10,0(r1) /* make stack chain pointer */
std r0,GPR0(r1) /* save r0 in stackframe */
std r10,GPR1(r1) /* save r1 in stackframe */
+ SANITIZE_GPR(0)
/* Mark our [H]SRRs valid for return */
li r10,1
@@ -544,8 +550,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r9,GPR11(r1)
std r10,GPR12(r1)
std r11,GPR13(r1)
+ .if !IMSR_R12
+ SANITIZE_GPRS(9, 12)
+ .else
+ SANITIZE_GPRS(9, 11)
+ .endif
SAVE_NVGPRS(r1)
+ SANITIZE_NVGPRS()
.if IDAR
.if IISIDE
@@ -577,10 +589,10 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
- std r2,GPR2(r1) /* save r2 in stackframe */
- SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
+ SAVE_GPRS(2, 8, r1) /* save r2 - r8 in stackframe */
+ SANITIZE_GPRS(2, 8)
mflr r9 /* Get LR, later save to stack */
- ld r2,PACATOC(r13) /* get kernel TOC into r2 */
+ LOAD_PACA_TOC() /* get kernel TOC into r2 */
std r9,_LINK(r1)
lbz r10,PACAIRQSOFTMASK(r13)
mfspr r11,SPRN_XER /* save XER in stackframe */
@@ -589,9 +601,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
li r9,IVEC
std r9,_TRAP(r1) /* set trap number */
li r10,0
- ld r11,exception_marker@toc(r2)
+ LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
std r10,RESULT(r1) /* clear regs->result */
- std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
+ std r11,STACK_INT_FRAME_MARKER(r1) /* mark the frame */
.endm
/*
@@ -610,7 +622,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro SEARCH_RESTART_TABLE
#ifdef CONFIG_RELOCATABLE
mr r12,r2
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
LOAD_REG_ADDR(r9, __start___restart_table)
LOAD_REG_ADDR(r10, __stop___restart_table)
mr r2,r12
@@ -640,7 +652,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.macro SEARCH_SOFT_MASK_TABLE
#ifdef CONFIG_RELOCATABLE
mr r12,r2
- ld r2,PACATOC(r13)
+ LOAD_PACA_TOC()
LOAD_REG_ADDR(r9, __start___soft_mask_table)
LOAD_REG_ADDR(r10, __stop___soft_mask_table)
mr r2,r12
@@ -696,6 +708,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
mtlr r9
ld r9,_CCR(r1)
mtcr r9
+ SANITIZE_RESTORE_NVGPRS()
REST_GPRS(2, 13, r1)
REST_GPR(0, r1)
/* restore original r1. */
@@ -703,6 +716,71 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
.endm
/*
+ * EARLY_BOOT_FIXUP - Fix real-mode interrupt with wrong endian in early boot.
+ *
+ * There's a short window during boot where although the kernel is running
+ * little endian, any exceptions will cause the CPU to switch back to big
+ * endian. For example a WARN() boils down to a trap instruction, which will
+ * cause a program check, and we end up here but with the CPU in big endian
+ * mode. The first instruction of the program check handler (in GEN_INT_ENTRY
+ * below) is an mtsprg, which when executed in the wrong endian is an lhzu with
+ * a ~3GB displacement from r3. The content of r3 is random, so that is a load
+ * from some random location, and depending on the system can easily lead to a
+ * checkstop, or an infinitely recursive page fault.
+ *
+ * So to handle that case we have a trampoline here that can detect we are in
+ * the wrong endian and flip us back to the correct endian. We can't flip
+ * MSR[LE] using mtmsr, so we have to use rfid. That requires backing up SRR0/1
+ * as well as a GPR. To do that we use SPRG0/2/3, as SPRG1 is already used for
+ * the paca. SPRG3 is user readable, but this trampoline is only active very
+ * early in boot, and SPRG3 will be reinitialised in vdso_getcpu_init() before
+ * userspace starts.
+ */
+.macro EARLY_BOOT_FIXUP
+BEGIN_FTR_SECTION
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
+ b 2f // Skip trampoline if endian is correct
+ .long 0xa643707d // mtsprg 0, r11 Backup r11
+ .long 0xa6027a7d // mfsrr0 r11
+ .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
+ .long 0xa6027b7d // mfsrr1 r11
+ .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
+ .long 0xa600607d // mfmsr r11
+ .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
+ .long 0xa6037b7d // mtsrr1 r11
+ /*
+ * This is 'li r11,1f' where 1f is the absolute address of that
+ * label, byteswapped into the SI field of the instruction.
+ */
+ .long 0x00006039 | \
+ ((ABS_ADDR(1f, real_vectors) & 0x00ff) << 24) | \
+ ((ABS_ADDR(1f, real_vectors) & 0xff00) << 8)
+ .long 0xa6037a7d // mtsrr0 r11
+ .long 0x2400004c // rfid
+1:
+ mfsprg r11, 3
+ mtsrr1 r11 // Restore SRR1
+ mfsprg r11, 2
+ mtsrr0 r11 // Restore SRR0
+ mfsprg r11, 0 // Restore r11
+2:
+#endif
+ /*
+ * program check could hit at any time, and pseries can not block
+ * MSR[ME] in early boot. So check if there is anything useful in r13
+ * yet, and spin forever if not.
+ */
+ mtsprg 0, r11
+ mfcr r11
+ cmpdi r13, 0
+ beq .
+ mtcr r11
+ mfsprg r11, 0
+END_FTR_SECTION(0, 1) // nop out after boot
+.endm
+
+/*
* There are a few constraints to be concerned with.
* - Real mode exceptions code/data must be located at their physical location.
* - Virtual mode exceptions must be mapped at their 0xc000... location.
@@ -815,7 +893,7 @@ __start_interrupts:
*
* Call convention:
*
- * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
+ * syscall register convention is in Documentation/arch/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
/* SCV 0 */
@@ -996,8 +1074,8 @@ EXC_COMMON_BEGIN(system_reset_common)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY system_reset
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl system_reset_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(system_reset_exception)
/* Clear MSR_RI before setting SRR0 and SRR1. */
li r9,0
@@ -1079,6 +1157,7 @@ INT_DEFINE_BEGIN(machine_check)
INT_DEFINE_END(machine_check)
EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
+ EARLY_BOOT_FIXUP
GEN_INT_ENTRY machine_check_early, virt=0
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
@@ -1142,8 +1221,11 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_early
+ addi r3,r1,STACK_INT_FRAME_REGS
+BEGIN_FTR_SECTION
+ bl CFUNC(machine_check_early_boot)
+END_FTR_SECTION(0, 1) // nop out after boot
+ bl CFUNC(machine_check_early)
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
@@ -1204,7 +1286,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
* Queue up the MCE event so that we can log it later, while
* returning from kernel or opal call.
*/
- bl machine_check_queue_event
+ bl CFUNC(machine_check_queue_event)
MACHINE_CHECK_HANDLER_WINDUP
RFI_TO_KERNEL
@@ -1229,8 +1311,8 @@ EXC_COMMON_BEGIN(machine_check_common)
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
GEN_COMMON machine_check
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception_async
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(machine_check_exception_async)
b interrupt_return_srr
@@ -1240,7 +1322,7 @@ EXC_COMMON_BEGIN(machine_check_common)
* done. Queue the event then call the idle code to do the wake up.
*/
EXC_COMMON_BEGIN(machine_check_idle_common)
- bl machine_check_queue_event
+ bl CFUNC(machine_check_queue_event)
/*
* GPR-loss wakeups are relatively straightforward, because the
@@ -1279,7 +1361,7 @@ EXC_COMMON_BEGIN(unrecoverable_mce)
BEGIN_FTR_SECTION
li r10,0 /* clear MSR_RI */
mtmsrd r10,1
- bl disable_machine_check
+ bl CFUNC(disable_machine_check)
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
ld r10,PACAKMSR(r13)
li r3,MSR_ME
@@ -1295,15 +1377,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
* This is the NMI version of the handler because we are called from
* the early handler which is a true NMI.
*/
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl machine_check_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(machine_check_exception)
/*
* We will not reach here. Even if we did, there is no way out.
* Call unrecoverable_exception and die.
*/
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(unrecoverable_exception)
b .
@@ -1353,26 +1435,26 @@ EXC_VIRT_END(data_access, 0x4300, 0x80)
EXC_COMMON_BEGIN(data_access_common)
GEN_COMMON data_access
ld r4,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
andis. r0,r4,DSISR_DABRMATCH@h
bne- 1f
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
- bl do_hash_fault
+ bl CFUNC(do_hash_fault)
MMU_FTR_SECTION_ELSE
- bl do_page_fault
+ bl CFUNC(do_page_fault)
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
- bl do_page_fault
+ bl CFUNC(do_page_fault)
#endif
b interrupt_return_srr
-1: bl do_break
+1: bl CFUNC(do_break)
/*
* do_break() may have changed the NV GPRS while handling a breakpoint.
* If so, we need to restore them with their updated values.
*/
- REST_NVGPRS(r1)
+ HANDLER_RESTORE_NVGPRS()
b interrupt_return_srr
@@ -1410,8 +1492,8 @@ EXC_COMMON_BEGIN(data_access_slb_common)
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_slb_fault
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_slb_fault)
cmpdi r3,0
bne- 1f
b fast_interrupt_return_srr
@@ -1424,8 +1506,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
li r3,-EFAULT
#endif
std r3,RESULT(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_segment_interrupt
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_bad_segment_interrupt)
b interrupt_return_srr
@@ -1456,15 +1538,15 @@ EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
EXC_VIRT_END(instruction_access, 0x4400, 0x80)
EXC_COMMON_BEGIN(instruction_access_common)
GEN_COMMON instruction_access
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
- bl do_hash_fault
+ bl CFUNC(do_hash_fault)
MMU_FTR_SECTION_ELSE
- bl do_page_fault
+ bl CFUNC(do_page_fault)
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
#else
- bl do_page_fault
+ bl CFUNC(do_page_fault)
#endif
b interrupt_return_srr
@@ -1498,8 +1580,8 @@ EXC_COMMON_BEGIN(instruction_access_slb_common)
#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_slb_fault
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_slb_fault)
cmpdi r3,0
bne- 1f
b fast_interrupt_return_srr
@@ -1512,8 +1594,8 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
li r3,-EFAULT
#endif
std r3,RESULT(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_segment_interrupt
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_bad_segment_interrupt)
b interrupt_return_srr
@@ -1566,8 +1648,8 @@ EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
EXC_COMMON_BEGIN(hardware_interrupt_common)
GEN_COMMON hardware_interrupt
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_IRQ
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_IRQ)
BEGIN_FTR_SECTION
b interrupt_return_hsrr
FTR_SECTION_ELSE
@@ -1596,9 +1678,9 @@ EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
EXC_VIRT_END(alignment, 0x4600, 0x100)
EXC_COMMON_BEGIN(alignment_common)
GEN_COMMON alignment
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl alignment_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(alignment_exception)
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -1619,51 +1701,7 @@ INT_DEFINE_BEGIN(program_check)
INT_DEFINE_END(program_check)
EXC_REAL_BEGIN(program_check, 0x700, 0x100)
-
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
- /*
- * There's a short window during boot where although the kernel is
- * running little endian, any exceptions will cause the CPU to switch
- * back to big endian. For example a WARN() boils down to a trap
- * instruction, which will cause a program check, and we end up here but
- * with the CPU in big endian mode. The first instruction of the program
- * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when
- * executed in the wrong endian is an lhzu with a ~3GB displacement from
- * r3. The content of r3 is random, so that is a load from some random
- * location, and depending on the system can easily lead to a checkstop,
- * or an infinitely recursive page fault.
- *
- * So to handle that case we have a trampoline here that can detect we
- * are in the wrong endian and flip us back to the correct endian. We
- * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires
- * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as
- * SPRG1 is already used for the paca. SPRG3 is user readable, but this
- * trampoline is only active very early in boot, and SPRG3 will be
- * reinitialised in vdso_getcpu_init() before userspace starts.
- */
-BEGIN_FTR_SECTION
- tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
- b 1f // Skip trampoline if endian is correct
- .long 0xa643707d // mtsprg 0, r11 Backup r11
- .long 0xa6027a7d // mfsrr0 r11
- .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
- .long 0xa6027b7d // mfsrr1 r11
- .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
- .long 0xa600607d // mfmsr r11
- .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
- .long 0xa6037b7d // mtsrr1 r11
- .long 0x34076039 // li r11, 0x734
- .long 0xa6037a7d // mtsrr0 r11
- .long 0x2400004c // rfid
- mfsprg r11, 3
- mtsrr1 r11 // Restore SRR1
- mfsprg r11, 2
- mtsrr0 r11 // Restore SRR0
- mfsprg r11, 0 // Restore r11
-1:
-END_FTR_SECTION(0, 1) // nop out after boot
-#endif /* CONFIG_CPU_LITTLE_ENDIAN */
-
+ EARLY_BOOT_FIXUP
GEN_INT_ENTRY program_check, virt=0
EXC_REAL_END(program_check, 0x700, 0x100)
EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
@@ -1706,9 +1744,9 @@ EXC_COMMON_BEGIN(program_check_common)
__GEN_COMMON_BODY program_check
.Ldo_program_check:
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl program_check_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(program_check_exception)
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -1726,6 +1764,7 @@ INT_DEFINE_BEGIN(fp_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(fp_unavailable)
EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
@@ -1737,8 +1776,8 @@ EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
EXC_COMMON_BEGIN(fp_unavailable_common)
GEN_COMMON fp_unavailable
bne 1f /* if from user, just load it up */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl kernel_fp_unavailable_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(kernel_fp_unavailable_exception)
0: trap
EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1:
@@ -1751,12 +1790,12 @@ BEGIN_FTR_SECTION
bne- 2f
END_FTR_SECTION_IFSET(CPU_FTR_TM)
#endif
- bl load_up_fpu
+ bl CFUNC(load_up_fpu)
b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl fp_unavailable_tm
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(fp_unavailable_tm)
b interrupt_return_srr
#endif
@@ -1799,8 +1838,8 @@ EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
EXC_VIRT_END(decrementer, 0x4900, 0x80)
EXC_COMMON_BEGIN(decrementer_common)
GEN_COMMON decrementer
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl timer_interrupt
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(timer_interrupt)
b interrupt_return_srr
@@ -1884,11 +1923,11 @@ EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
EXC_COMMON_BEGIN(doorbell_super_common)
GEN_COMMON doorbell_super
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_DOORBELL
- bl doorbell_exception
+ bl CFUNC(doorbell_exception)
#else
- bl unknown_async_exception
+ bl CFUNC(unknown_async_exception)
#endif
b interrupt_return_srr
@@ -1913,8 +1952,8 @@ EXC_VIRT_NONE(0x4b00, 0x100)
* Call convention:
*
* syscall and hypercalls register conventions are documented in
- * Documentation/powerpc/syscall64-abi.rst and
- * Documentation/powerpc/papr_hcalls.rst respectively.
+ * Documentation/arch/powerpc/syscall64-abi.rst and
+ * Documentation/arch/powerpc/papr_hcalls.rst respectively.
*
* The intersection of volatile registers that don't contain possible
* inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
@@ -2051,8 +2090,8 @@ EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
EXC_VIRT_END(single_step, 0x4d00, 0x100)
EXC_COMMON_BEGIN(single_step_common)
GEN_COMMON single_step
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl single_step_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(single_step_exception)
b interrupt_return_srr
@@ -2085,11 +2124,11 @@ EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
EXC_COMMON_BEGIN(h_data_storage_common)
GEN_COMMON h_data_storage
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
BEGIN_MMU_FTR_SECTION
- bl do_bad_page_fault_segv
+ bl CFUNC(do_bad_page_fault_segv)
MMU_FTR_SECTION_ELSE
- bl unknown_exception
+ bl CFUNC(unknown_exception)
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
b interrupt_return_hsrr
@@ -2114,8 +2153,8 @@ EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
EXC_COMMON_BEGIN(h_instr_storage_common)
GEN_COMMON h_instr_storage
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(unknown_exception)
b interrupt_return_hsrr
@@ -2137,9 +2176,9 @@ EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
EXC_COMMON_BEGIN(emulation_assist_common)
GEN_COMMON emulation_assist
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl emulation_assist_interrupt
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(emulation_assist_interrupt)
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_hsrr
@@ -2197,8 +2236,8 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
__GEN_COMMON_BODY hmi_exception_early
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl hmi_exception_realmode
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(hmi_exception_realmode)
cmpdi cr0,r3,0
bne 1f
@@ -2215,8 +2254,8 @@ EXC_COMMON_BEGIN(hmi_exception_early_common)
EXC_COMMON_BEGIN(hmi_exception_common)
GEN_COMMON hmi_exception
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl handle_hmi_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(handle_hmi_exception)
b interrupt_return_hsrr
@@ -2249,11 +2288,11 @@ EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
EXC_COMMON_BEGIN(h_doorbell_common)
GEN_COMMON h_doorbell
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_PPC_DOORBELL
- bl doorbell_exception
+ bl CFUNC(doorbell_exception)
#else
- bl unknown_async_exception
+ bl CFUNC(unknown_async_exception)
#endif
b interrupt_return_hsrr
@@ -2285,8 +2324,8 @@ EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
EXC_COMMON_BEGIN(h_virt_irq_common)
GEN_COMMON h_virt_irq
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_IRQ
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(do_IRQ)
b interrupt_return_hsrr
@@ -2331,10 +2370,22 @@ EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
EXC_COMMON_BEGIN(performance_monitor_common)
GEN_COMMON performance_monitor
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl performance_monitor_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ lbz r4,PACAIRQSOFTMASK(r13)
+ cmpdi r4,IRQS_ENABLED
+ bne 1f
+ bl CFUNC(performance_monitor_exception_async)
b interrupt_return_srr
+1:
+ bl CFUNC(performance_monitor_exception_nmi)
+ /* Clear MSR_RI before setting SRR0 and SRR1. */
+ li r9,0
+ mtmsrd r9,1
+ kuap_kernel_restore r9, r10
+
+ EXCEPTION_RESTORE_REGS hsrr=0
+ RFI_TO_KERNEL
/**
* Interrupt 0xf20 - Vector Unavailable Interrupt.
@@ -2347,6 +2398,7 @@ INT_DEFINE_BEGIN(altivec_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(altivec_unavailable)
EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
@@ -2369,19 +2421,19 @@ BEGIN_FTR_SECTION
bne- 2f
END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
#endif
- bl load_up_altivec
+ bl CFUNC(load_up_altivec)
b fast_interrupt_return_srr
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl altivec_unavailable_tm
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(altivec_unavailable_tm)
b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl altivec_unavailable_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(altivec_unavailable_exception)
b interrupt_return_srr
@@ -2396,6 +2448,7 @@ INT_DEFINE_BEGIN(vsx_unavailable)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ IMSR_R12=1
INT_DEFINE_END(vsx_unavailable)
EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
@@ -2421,15 +2474,15 @@ BEGIN_FTR_SECTION
b load_up_vsx
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2: /* User process was in a transaction */
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl vsx_unavailable_tm
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(vsx_unavailable_tm)
b interrupt_return_srr
#endif
1:
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
#endif
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl vsx_unavailable_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(vsx_unavailable_exception)
b interrupt_return_srr
@@ -2455,9 +2508,9 @@ EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
EXC_COMMON_BEGIN(facility_unavailable_common)
GEN_COMMON facility_unavailable
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl facility_unavailable_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(facility_unavailable_exception)
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
b interrupt_return_srr
@@ -2483,9 +2536,10 @@ EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
EXC_COMMON_BEGIN(h_facility_unavailable_common)
GEN_COMMON h_facility_unavailable
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl facility_unavailable_exception
- REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(facility_unavailable_exception)
+ /* XXX Shouldn't be necessary in practice */
+ HANDLER_RESTORE_NVGPRS()
b interrupt_return_hsrr
@@ -2513,8 +2567,8 @@ EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
EXC_VIRT_NONE(0x5200, 0x100)
EXC_COMMON_BEGIN(cbe_system_error_common)
GEN_COMMON cbe_system_error
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl cbe_system_error_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(cbe_system_error_exception)
b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
@@ -2544,8 +2598,8 @@ EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
EXC_COMMON_BEGIN(instruction_breakpoint_common)
GEN_COMMON instruction_breakpoint
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl instruction_breakpoint_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(instruction_breakpoint_exception)
b interrupt_return_srr
@@ -2666,8 +2720,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
EXC_COMMON_BEGIN(denorm_exception_common)
GEN_COMMON denorm_exception
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl unknown_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(unknown_exception)
b interrupt_return_hsrr
@@ -2683,8 +2737,8 @@ EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
EXC_VIRT_NONE(0x5600, 0x100)
EXC_COMMON_BEGIN(cbe_maintenance_common)
GEN_COMMON cbe_maintenance
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl cbe_maintenance_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(cbe_maintenance_exception)
b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
@@ -2708,12 +2762,12 @@ EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
EXC_COMMON_BEGIN(altivec_assist_common)
GEN_COMMON altivec_assist
- addi r3,r1,STACK_FRAME_OVERHEAD
+ addi r3,r1,STACK_INT_FRAME_REGS
#ifdef CONFIG_ALTIVEC
- bl altivec_assist_exception
- REST_NVGPRS(r1) /* instruction emulation may change GPRs */
+ bl CFUNC(altivec_assist_exception)
+ HANDLER_RESTORE_NVGPRS() /* instruction emulation may change GPRs */
#else
- bl unknown_exception
+ bl CFUNC(unknown_exception)
#endif
b interrupt_return_srr
@@ -2730,8 +2784,8 @@ EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
EXC_VIRT_NONE(0x5800, 0x100)
EXC_COMMON_BEGIN(cbe_thermal_common)
GEN_COMMON cbe_thermal
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl cbe_thermal_exception
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(cbe_thermal_exception)
b interrupt_return_hsrr
#else /* CONFIG_CBE_RAS */
@@ -2763,8 +2817,8 @@ EXC_COMMON_BEGIN(soft_nmi_common)
subi r1,r1,INT_FRAME_SIZE
__GEN_COMMON_BODY soft_nmi
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl soft_nmi_interrupt
+ addi r3,r1,STACK_INT_FRAME_REGS
+ bl CFUNC(soft_nmi_interrupt)
/* Clear MSR_RI before setting SRR0 and SRR1. */
li r9,0
@@ -2779,7 +2833,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
/*
* An interrupt came in while soft-disabled. We set paca->irq_happened, then:
- * - If it was a decrementer interrupt, we bump the dec to max and and return.
+ * - If it was a decrementer interrupt, we bump the dec to max and return.
* - If it was a doorbell we return immediately since doorbells are edge
* triggered and won't automatically refire.
* - If it was a HMI we return immediately since we handled it in realmode
@@ -2794,6 +2848,20 @@ masked_Hinterrupt:
masked_interrupt:
.endif
stw r9,PACA_EXGEN+EX_CCR(r13)
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ /*
+ * Ensure there was no previous MUST_HARD_MASK interrupt or
+ * HARD_DIS setting. If this does fire, the interrupt is still
+ * masked and MSR[EE] will be cleared on return, so no need to
+ * panic, but somebody probably enabled MSR[EE] under
+ * PACA_IRQ_HARD_DIS, mtmsr(mfmsr() | MSR_x) being a common
+ * cause.
+ */
+ lbz r9,PACAIRQHAPPENED(r13)
+ andi. r9,r9,(PACA_IRQ_MUST_HARD_MASK|PACA_IRQ_HARD_DIS)
+0: tdnei r9,0
+ EMIT_WARN_ENTRY 0b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+#endif
lbz r9,PACAIRQHAPPENED(r13)
or r9,r9,r10
stb r9,PACAIRQHAPPENED(r13)
@@ -3034,22 +3102,6 @@ EXPORT_SYMBOL(do_uaccess_flush)
MASKED_INTERRUPT
MASKED_INTERRUPT hsrr=1
- /*
- * Relocation-on interrupts: A subset of the interrupts can be delivered
- * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
- * it. Addresses are the same as the original interrupt addresses, but
- * offset by 0xc000000000004000.
- * It's impossible to receive interrupts below 0x300 via this mechanism.
- * KVM: None of these traps are from the guest ; anything that escalated
- * to HV=1 from HV=0 is delivered via real mode handlers.
- */
-
- /*
- * This uses the standard macro, since the original 0x300 vector
- * only has extra guff for STAB-based processors -- which never
- * come here.
- */
-
USE_FIXED_SECTION(virt_trampolines)
/*
* All code below __end_soft_masked is treated as soft-masked. If
@@ -3075,7 +3127,7 @@ CLOSE_FIXED_SECTION(virt_trampolines);
USE_TEXT_SECTION()
/* MSR[RI] should be clear because this uses SRR[01] */
-enable_machine_check:
+_GLOBAL(enable_machine_check)
mflr r0
bcl 20,31,$+4
0: mflr r3
@@ -3089,7 +3141,7 @@ enable_machine_check:
blr
/* MSR[RI] should be clear because this uses SRR[01] */
-disable_machine_check:
+SYM_FUNC_START_LOCAL(disable_machine_check)
mflr r0
bcl 20,31,$+4
0: mflr r3
@@ -3102,3 +3154,4 @@ disable_machine_check:
RFI_TO_KERNEL
1: mtlr r0
blr
+SYM_FUNC_END(disable_machine_check)