aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2019-08-13 16:26:54 +0100
committerWill Deacon <will@kernel.org>2019-08-14 13:04:46 +0100
commit577c2b35283fbadcc9ce4b56304ccea3ec8a5ca1 (patch)
tree8cef1e56b90a19ce79989f0b606ae88e1397343e /arch/arm64
parentarm64: memory: Fix virt_addr_valid() using __is_lm_address() (diff)
downloadlinux-dev-577c2b35283fbadcc9ce4b56304ccea3ec8a5ca1.tar.xz
linux-dev-577c2b35283fbadcc9ce4b56304ccea3ec8a5ca1.zip
arm64: memory: Ensure address tag is masked in conversion macros
When converting a linear virtual address to a physical address, pfn or struct page *, we must make sure that the tag bits are masked before the calculation otherwise we end up with corrupt pointers when running with CONFIG_KASAN_SW_TAGS=y: | Unable to handle kernel paging request at virtual address 0037fe0007580d08 | [0037fe0007580d08] address between user and kernel address ranges Mask out the tag in __virt_to_phys_nodebug() and virt_to_page(). Reported-by: Qian Cai <cai@lca.pw> Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Tested-by: Steve Capper <steve.capper@arm.com> Reviewed-by: Steve Capper <steve.capper@arm.com> Tested-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Reviewed-by: Mark Rutland <mark.rutland@arm.com> Fixes: 9cb1c5ddd2c4 ("arm64: mm: Remove bit-masking optimisations for PAGE_OFFSET and VMEMMAP_START") Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/memory.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 93ef8e5c6971..243e05ad4a67 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -252,7 +252,7 @@ static inline const void *__tag_set(const void *addr, u8 tag)
#define __kimg_to_phys(addr) ((addr) - kimage_voffset)
#define __virt_to_phys_nodebug(x) ({ \
- phys_addr_t __x = (phys_addr_t)(x); \
+ phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \
__is_lm_address(__x) ? __lm_to_phys(__x) : \
__kimg_to_phys(__x); \
})
@@ -324,7 +324,8 @@ static inline void *phys_to_virt(phys_addr_t x)
((void *)__addr_tag); \
})
-#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START))
+#define virt_to_page(vaddr) \
+ ((struct page *)((__virt_to_pgoff(__tag_reset(vaddr))) + VMEMMAP_START))
#endif
#define virt_addr_valid(addr) ({ \