aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2025-07-25 12:12:14 -0400
committerHelge Deller <deller@gmx.de>2025-07-25 22:45:24 +0200
commitf6334f4ae9a4e962ba74b026e1d965dfdf8cbef8 (patch)
tree59a00bff8c79d19a9e4e4ec12c63ae9b14f91d64
parentparisc: Drop WARN_ON_ONCE() from flush_cache_vmap (diff)
downloadwireguard-linux-f6334f4ae9a4e962ba74b026e1d965dfdf8cbef8.tar.xz
wireguard-linux-f6334f4ae9a4e962ba74b026e1d965dfdf8cbef8.zip
parisc: Revise gateway LWS calls to probe user read access
We use load and stbys,e instructions to trigger memory reference interruptions without writing to memory. Because of the way read access support is implemented, read access interruptions are only triggered at privilege levels 2 and 3. The kernel and gateway page execute at privilege level 0, so this code never triggers a read access interruption. Thus, it is currently possible for user code to execute a LWS compare and swap operation at an address that is read protected at privilege level 3 (PRIV_USER). Fix this by probing read access rights at privilege level 3 and branching to lws_fault if access isn't allowed. Signed-off-by: John David Anglin <dave.anglin@bell.net> Signed-off-by: Helge Deller <deller@gmx.de> Cc: stable@vger.kernel.org # v5.12+
-rw-r--r--arch/parisc/kernel/syscall.S30
1 files changed, 21 insertions, 9 deletions
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0fa81bf1466b..f58c4bccfbce 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -613,6 +613,9 @@ lws_compare_and_swap32:
lws_compare_and_swap:
/* Trigger memory reference interruptions without writing to memory */
1: ldw 0(%r26), %r28
+ proberi (%r26), PRIV_USER, %r28
+ comb,=,n %r28, %r0, lws_fault /* backwards, likely not taken */
+ nop
2: stbys,e %r0, 0(%r26)
/* Calculate 8-bit hash index from virtual address */
@@ -767,6 +770,9 @@ cas2_lock_start:
copy %r26, %r28
depi_safe 0, 31, 2, %r28
10: ldw 0(%r28), %r1
+ proberi (%r28), PRIV_USER, %r1
+ comb,=,n %r1, %r0, lws_fault /* backwards, likely not taken */
+ nop
11: stbys,e %r0, 0(%r28)
/* Calculate 8-bit hash index from virtual address */
@@ -951,41 +957,47 @@ atomic_xchg_begin:
/* 8-bit exchange */
1: ldb 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
2: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 16-bit exchange */
3: ldh 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
b atomic_xchg_start
4: stbys,e %r0, 0(%r20)
- nop
- nop
- nop
/* 32-bit exchange */
5: ldw 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
b atomic_xchg_start
6: stbys,e %r0, 0(%r23)
nop
nop
- nop
- nop
- nop
/* 64-bit exchange */
#ifdef CONFIG_64BIT
7: ldd 0(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
8: stdby,e %r0, 0(%r23)
#else
7: ldw 0(%r24), %r20
8: ldw 4(%r24), %r20
+ proberi (%r24), PRIV_USER, %r20
+ comb,=,n %r20, %r0, lws_fault /* backwards, likely not taken */
+ nop
copy %r23, %r20
depi_safe 0, 31, 2, %r20
9: stbys,e %r0, 0(%r20)