aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2021-06-30 17:46:16 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-06-30 22:21:20 +1000
commit325678fd052259e7c05ef29060a73c705ea90432 (patch)
tree4af33e651e345d979c3cdca9af3f9bccd7e10f38 /arch
parentpowerpc/64e: remove implicit soft-masking and interrupt exit restart logic (diff)
downloadlinux-dev-325678fd052259e7c05ef29060a73c705ea90432.tar.xz
linux-dev-325678fd052259e7c05ef29060a73c705ea90432.zip
powerpc/64s: add a table of implicit soft-masked addresses
Commit 9d1988ca87dd ("powerpc/64: treat low kernel text as irqs soft-masked") ends up catching too much code, including ret_from_fork, and parts of interrupt and syscall return that do not expect to be interrupts to be soft-masked. If an interrupt gets marked pending, and then the code proceeds out of the implicit soft-masked region it will fail to deal with the pending interrupt. Fix this by adding a new table of addresses which explicitly marks the regions of code that are soft masked. This table is only checked for interrupts that below __end_soft_masked, so most kernel interrupts will not have the overhead of the table search. Fixes: 9d1988ca87dd ("powerpc/64: treat low kernel text as irqs soft-masked") Reported-by: Sachin Sant <sachinp@linux.vnet.ibm.com> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Tested-by: Sachin Sant <sachinp@linux.vnet.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20210630074621.2109197-5-npiggin@gmail.com
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/interrupt.h3
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h7
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S64
-rw-r--r--arch/powerpc/kernel/interrupt_64.S8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S9
-rw-r--r--arch/powerpc/lib/restart_table.c26
6 files changed, 106 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index f13c93b033c7..d7df247a149c 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -75,6 +75,7 @@
#ifdef CONFIG_PPC_BOOK3S_64
extern char __end_soft_masked[];
+bool search_kernel_soft_mask_table(unsigned long addr);
unsigned long search_kernel_restart_table(unsigned long addr);
DECLARE_STATIC_KEY_FALSE(interrupt_exit_not_reentrant);
@@ -87,7 +88,7 @@ static inline bool is_implicit_soft_masked(struct pt_regs *regs)
if (regs->nip >= (unsigned long)__end_soft_masked)
return false;
- return true;
+ return search_kernel_soft_mask_table(regs->nip);
}
static inline void srr_regs_clobbered(void)
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index c9c2c36c1f8f..116c1519728a 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -762,6 +762,13 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
stringify_in_c(.long (_target) - . ;) \
stringify_in_c(.previous)
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.previous)
+
#define RESTART_TABLE(_start, _end, _target) \
stringify_in_c(.section __restart_table,"a";)\
stringify_in_c(.balign 8;) \
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index ecd07bf604c5..4aec59a77d4c 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -428,21 +428,31 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
/* If coming from user, skip soft-mask tests. */
andi. r10,r12,MSR_PR
- bne 2f
+ bne 3f
/*
- * Kernel code running below __end_soft_masked is implicitly
- * soft-masked
+ * Kernel code running below __end_soft_masked may be
+ * implicitly soft-masked if it is within the regions
+ * in the soft mask table.
*/
LOAD_HANDLER(r10, __end_soft_masked)
cmpld r11,r10
-
+ bge+ 1f
+
+ /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
+ mtctr r12
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ SEARCH_SOFT_MASK_TABLE
+ cmpdi r12,0
+ mfctr r12 /* Restore r12 to SRR1 */
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ beq 1f /* Not in soft-mask table */
li r10,IMASK
- blt- 1f
+ b 2f /* In soft-mask table, always mask */
/* Test the soft mask state against our interrupt's bit */
- lbz r10,PACAIRQSOFTMASK(r13)
-1: andi. r10,r10,IMASK
+1: lbz r10,PACAIRQSOFTMASK(r13)
+2: andi. r10,r10,IMASK
/* Associate vector numbers with bits in paca->irq_happened */
.if IVEC == 0x500 || IVEC == 0xea0
li r10,PACA_IRQ_EE
@@ -473,7 +483,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.if ISTACK
andi. r10,r12,MSR_PR /* See if coming from user */
-2: mr r10,r1 /* Save r1 */
+3: mr r10,r1 /* Save r1 */
subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
beq- 100f
ld r1,PACAKSAVE(r13) /* kernel stack to use */
@@ -624,6 +634,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
303:
.endm
+.macro SEARCH_SOFT_MASK_TABLE
+#ifdef CONFIG_RELOCATABLE
+ mr r12,r2
+ ld r2,PACATOC(r13)
+ LOAD_REG_ADDR(r9, __start___soft_mask_table)
+ LOAD_REG_ADDR(r10, __stop___soft_mask_table)
+ mr r2,r12
+#else
+ LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
+ LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
+#endif
+300:
+ cmpd r9,r10
+ beq 302f
+ ld r12,0(r9)
+ cmpld r11,r12
+ blt 301f
+ ld r12,8(r9)
+ cmpld r11,r12
+ bge 301f
+ li r12,1
+ b 303f
+301:
+ addi r9,r9,16
+ b 300b
+302:
+ li r12,0
+303:
+.endm
+
/*
* Restore all registers including H/SRR0/1 saved in a stack frame of a
* standard exception.
@@ -754,8 +794,8 @@ __start_interrupts:
* scv instructions enter the kernel without changing EE, RI, ME, or HV.
* In particular, this means we can take a maskable interrupt at any point
* in the scv handler, which is unlike any other interrupt. This is solved
- * by treating the instruction addresses below __end_soft_masked as being
- * soft-masked.
+ * by treating the instruction addresses in the handler as being soft-masked,
+ * by adding a SOFT_MASK_TABLE entry for them.
*
* AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
* ensure scv is never executed with relocation off, which means AIL-0
@@ -772,6 +812,7 @@ __start_interrupts:
* syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*/
EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
+1:
/* SCV 0 */
mr r9,r13
GET_PACA(r13)
@@ -801,8 +842,11 @@ EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
b system_call_vectored_sigill
#endif
.endr
+2:
EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
+SOFT_MASK_TABLE(1b, 2b) // Treat scv vectors as soft-masked, see comment above.
+
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
__LOAD_HANDLER(r10, system_call_vectored_common)
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index 09b8d8846c67..2c92bbca02ca 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -207,7 +207,9 @@ syscall_vectored_\name\()_restart:
bl syscall_exit_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Lsyscall_vectored_\name\()_rst_start
+1:
+SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
.endm
@@ -410,7 +412,9 @@ syscall_restart:
bl syscall_exit_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Lsyscall_rst_start
+1:
+SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
#endif
@@ -607,7 +611,9 @@ interrupt_return_\srr\()_user_restart:
bl interrupt_exit_user_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Linterrupt_return_\srr\()_user_rst_start
+1:
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
#endif
@@ -738,7 +744,9 @@ interrupt_return_\srr\()_kernel_restart:
bl interrupt_exit_kernel_restart
std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
b .Linterrupt_return_\srr\()_kernel_rst_start
+1:
+SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
#endif
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 16c5e13e00c4..40bdefe9caa7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -9,6 +9,14 @@
#define EMITS_PT_NOTE
#define RO_EXCEPTION_TABLE_ALIGN 0
+#define SOFT_MASK_TABLE(align) \
+ . = ALIGN(align); \
+ __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
+ __start___soft_mask_table = .; \
+ KEEP(*(__soft_mask_table)) \
+ __stop___soft_mask_table = .; \
+ }
+
#define RESTART_TABLE(align) \
. = ALIGN(align); \
__restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
@@ -132,6 +140,7 @@ SECTIONS
RO_DATA(PAGE_SIZE)
#ifdef CONFIG_PPC64
+ SOFT_MASK_TABLE(8)
RESTART_TABLE(8)
. = ALIGN(8);
diff --git a/arch/powerpc/lib/restart_table.c b/arch/powerpc/lib/restart_table.c
index 7cd20757cc33..bccb662c1b7b 100644
--- a/arch/powerpc/lib/restart_table.c
+++ b/arch/powerpc/lib/restart_table.c
@@ -1,15 +1,41 @@
#include <asm/interrupt.h>
#include <asm/kprobes.h>
+struct soft_mask_table_entry {
+ unsigned long start;
+ unsigned long end;
+};
+
struct restart_table_entry {
unsigned long start;
unsigned long end;
unsigned long fixup;
};
+extern struct soft_mask_table_entry __start___soft_mask_table[];
+extern struct soft_mask_table_entry __stop___soft_mask_table[];
+
extern struct restart_table_entry __start___restart_table[];
extern struct restart_table_entry __stop___restart_table[];
+/* Given an address, look for it in the soft mask table */
+bool search_kernel_soft_mask_table(unsigned long addr)
+{
+ struct soft_mask_table_entry *smte = __start___soft_mask_table;
+
+ while (smte < __stop___soft_mask_table) {
+ unsigned long start = smte->start;
+ unsigned long end = smte->end;
+
+ if (addr >= start && addr < end)
+ return true;
+
+ smte++;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(search_kernel_soft_mask_table);
+
/* Given an address, look for it in the kernel exception table */
unsigned long search_kernel_restart_table(unsigned long addr)
{