aboutsummaryrefslogtreecommitdiffstats
path: root/mm/maccess.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2021-08-11 08:30:18 +0100
committerRussell King (Oracle) <rmk+kernel@armlinux.org.uk>2021-08-20 11:39:25 +0100
commit2423de2e6f4d8676b6f6e43dee437461023ca6a1 (patch)
tree06f5aa0dc1bb84599a5ed11a505c901a1ca3ad39 /mm/maccess.c
parentARM: 9105/1: atags_to_fdt: don't warn about stack size (diff)
downloadlinux-dev-2423de2e6f4d8676b6f6e43dee437461023ca6a1.tar.xz
linux-dev-2423de2e6f4d8676b6f6e43dee437461023ca6a1.zip
ARM: 9115/1: mm/maccess: fix unaligned copy_{from,to}_kernel_nofault
On machines such as ARMv5 that trap unaligned accesses, these two functions can be slow when each access needs to be emulated, or they might not work at all. Change them so that each loop is only used when both the src and dst pointers are naturally aligned. Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'mm/maccess.c')
-rw-r--r--mm/maccess.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/mm/maccess.c b/mm/maccess.c
index 3bd70405f2d8..d3f1a1f0b1c1 100644
--- a/mm/maccess.c
+++ b/mm/maccess.c
@@ -24,13 +24,21 @@ bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
long copy_from_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
if (!copy_from_kernel_nofault_allowed(src, size))
return -ERANGE;
pagefault_disable();
- copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_from_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_from_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_from_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_from_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;
@@ -50,10 +58,18 @@ EXPORT_SYMBOL_GPL(copy_from_kernel_nofault);
long copy_to_kernel_nofault(void *dst, const void *src, size_t size)
{
+ unsigned long align = 0;
+
+ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ align = (unsigned long)dst | (unsigned long)src;
+
pagefault_disable();
- copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
- copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
+ if (!(align & 7))
+ copy_to_kernel_nofault_loop(dst, src, size, u64, Efault);
+ if (!(align & 3))
+ copy_to_kernel_nofault_loop(dst, src, size, u32, Efault);
+ if (!(align & 1))
+ copy_to_kernel_nofault_loop(dst, src, size, u16, Efault);
copy_to_kernel_nofault_loop(dst, src, size, u8, Efault);
pagefault_enable();
return 0;