aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2019-03-11 08:30:38 +0000
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-21 23:11:47 +1000
commita68c31fc01ef7863acc0fc74694bf279456a58c4 (patch)
treedca29a37f12c08381742abc20dc7364aec9fab67 /arch/powerpc/kernel
parentpowerpc/32s: Prepare Kernel Userspace Access Protection (diff)
downloadlinux-dev-a68c31fc01ef7863acc0fc74694bf279456a58c4.tar.xz
linux-dev-a68c31fc01ef7863acc0fc74694bf279456a58c4.zip
powerpc/32s: Implement Kernel Userspace Access Protection
This patch implements Kernel Userspace Access Protection for book3s/32. Due to limitations of the processor page protection capabilities, the protection is only against writing. read protection cannot be achieved using page protection. The previous patch modifies the page protection so that RW user pages are RW for Key 0 and RO for Key 1, and it sets Key 0 for both user and kernel. This patch changes userspace segment registers are set to Ku 0 and Ks 1. When kernel needs to write to RW pages, the associated segment register is then changed to Ks 0 in order to allow write access to the kernel. In order to avoid having the read all segment registers when locking/unlocking the access, some data is kept in the thread_struct and saved on stack on exceptions. The field identifies both the first unlocked segment and the first segment following the last unlocked one. When no segment is unlocked, it contains value 0. As the hash_page() function is not able to easily determine if a protfault is due to a bad kernel access to userspace, protfaults need to be handled by handle_page_fault when KUAP is set. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> [mpe: Drop allow_read/write_to/from_user() as they're now in kup.h, and adapt allow_user_access() to do nothing when to == NULL] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c3
-rw-r--r--arch/powerpc/kernel/head_32.S11
2 files changed, 14 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 66202e02fee2..60b82198de7c 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -147,6 +147,9 @@ int main(void)
#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
OFFSET(THREAD_KVM_VCPU, thread_struct, kvm_vcpu);
#endif
+#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
+ OFFSET(KUAP, thread_struct, kuap);
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
OFFSET(PACATMSCRATCH, paca_struct, tm_scratch);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 69b97cc7079f..40aec3f00a05 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -387,7 +387,11 @@ DataAccess:
EXCEPTION_PROLOG
mfspr r10,SPRN_DSISR
stw r10,_DSISR(r11)
+#ifdef CONFIG_PPC_KUAP
+ andis. r0,r10,(DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
+#else
andis. r0,r10,(DSISR_BAD_FAULT_32S|DSISR_DABRMATCH)@h
+#endif
bne 1f /* if not, try to put a PTE */
mfspr r4,SPRN_DAR /* into the hash table */
rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
@@ -902,6 +906,9 @@ load_up_mmu:
#ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */
#endif
+#ifdef CONFIG_PPC_KUAP
+ oris r3, r3, SR_KS@h /* Set Ks */
+#endif
li r4,0
3: mtsrin r3,r4
addi r3,r3,0x111 /* increment VSID */
@@ -910,6 +917,7 @@ load_up_mmu:
li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
mtctr r0 /* for context 0 */
rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
+ rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
oris r3, r3, SR_KP@h /* Kp = 1 */
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
@@ -1020,6 +1028,9 @@ _ENTRY(switch_mmu_context)
#ifdef CONFIG_PPC_KUEP
oris r3, r3, SR_NX@h /* Set Nx */
#endif
+#ifdef CONFIG_PPC_KUAP
+ oris r3, r3, SR_KS@h /* Set Ks */
+#endif
li r0,NUM_USER_SEGMENTS
mtctr r0