aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorClément Léger <cleger@rivosinc.com>2025-06-02 21:39:16 +0200
committerPalmer Dabbelt <palmer@dabbelt.com>2025-06-05 11:39:17 -0700
commitca1a66cdd685030738cf077e3955fdedfe39fbb9 (patch)
treee61cbc87426d64140b77f0491e57f0847785766e /arch
parentriscv: process: use unsigned int instead of unsigned long for put_user() (diff)
downloadlinux-rng-ca1a66cdd685030738cf077e3955fdedfe39fbb9.tar.xz
linux-rng-ca1a66cdd685030738cf077e3955fdedfe39fbb9.zip
riscv: uaccess: do not do misaligned accesses in get/put_user()
Doing misaligned access to userspace memory would make a trap on platform where it is emulated. Latest fixes removed the kernel capability to do unaligned accesses to userspace memory safely since interrupts are kept disabled at all time during that. Thus doing so would crash the kernel. Such behavior was detected with GET_UNALIGN_CTL() that was doing a put_user() with an unsigned long* address that should have been an unsigned int*. Reenabling kernel misaligned access emulation is a bit risky and it would also degrade performances. Rather than doing that, we will try to avoid any misaligned accessed by using copy_from/to_user() which does not do any misaligned accesses. This can be done only for !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS and thus allows to only generate a bit more code for this config. Signed-off-by: Clément Léger <cleger@rivosinc.com> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com> Link: https://lore.kernel.org/r/20250602193918.868962-4-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/riscv/include/asm/uaccess.h23
1 files changed, 18 insertions, 5 deletions
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index 046de7ced09c..d472da4450e6 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -169,8 +169,19 @@ do { \
#endif /* CONFIG_64BIT */
+unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
+ const void __user *from, unsigned long n);
+
#define __get_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ if (__asm_copy_from_user_sum_enabled(&(x), __gu_ptr, sizeof(*__gu_ptr))) \
+ goto label; \
+ break; \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__get_user_asm("lb", (x), __gu_ptr, label); \
@@ -297,6 +308,13 @@ do { \
#define __put_user_nocheck(x, __gu_ptr, label) \
do { \
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \
+ !IS_ALIGNED((uintptr_t)__gu_ptr, sizeof(*__gu_ptr))) { \
+ __inttype(x) val = (__inttype(x))x; \
+ if (__asm_copy_to_user_sum_enabled(__gu_ptr, &(val), sizeof(*__gu_ptr))) \
+ goto label; \
+ break; \
+ } \
switch (sizeof(*__gu_ptr)) { \
case 1: \
__put_user_asm("sb", (x), __gu_ptr, label); \
@@ -450,11 +468,6 @@ static inline void user_access_restore(unsigned long enabled) { }
(x) = (__force __typeof__(*(ptr)))__gu_val; \
} while (0)
-unsigned long __must_check __asm_copy_to_user_sum_enabled(void __user *to,
- const void *from, unsigned long n);
-unsigned long __must_check __asm_copy_from_user_sum_enabled(void *to,
- const void __user *from, unsigned long n);
-
#define unsafe_copy_to_user(_dst, _src, _len, label) \
if (__asm_copy_to_user_sum_enabled(_dst, _src, _len)) \
goto label;