From 9f3419315f3cdc41a7318e4d50ba18a592b30c8c Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 27 Nov 2019 10:00:27 +0000 Subject: arm64: mte: Add PROT_MTE support to mmap() and mprotect() To enable tagging on a memory range, the user must explicitly opt in via a new PROT_MTE flag passed to mmap() or mprotect(). Since this is a new memory type in the AttrIndx field of a pte, simplify the or'ing of these bits over the protection_map[] attributes by making MT_NORMAL index 0. There are two conditions for arch_vm_get_page_prot() to return the MT_NORMAL_TAGGED memory type: (1) the user requested it via PROT_MTE, registered as VM_MTE in the vm_flags, and (2) the vma supports MTE, decided during the mmap() call (only) and registered as VM_MTE_ALLOWED. arch_calc_vm_prot_bits() is responsible for registering the user request as VM_MTE. The newly introduced arch_calc_vm_flag_bits() sets VM_MTE_ALLOWED if the mapping is MAP_ANONYMOUS. An MTE-capable filesystem (RAM-based) may be able to set VM_MTE_ALLOWED during its mmap() file ops call. In addition, update VM_DATA_DEFAULT_FLAGS to allow mprotect(PROT_MTE) on stack or brk area. The Linux mmap() syscall currently ignores unknown PROT_* flags. In the presence of MTE, an mmap(PROT_MTE) on a file which does not support MTE will not report an error and the memory will not be mapped as Normal Tagged. For consistency, mprotect(PROT_MTE) will not report an error either if the memory range does not support MTE. Two subsequent patches in the series will propose tightening of this behaviour. Co-developed-by: Vincenzo Frascino Signed-off-by: Vincenzo Frascino Signed-off-by: Catalin Marinas Cc: Will Deacon --- arch/arm64/include/asm/mman.h | 44 ++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 41 insertions(+), 3 deletions(-) (limited to 'arch/arm64/include/asm/mman.h') diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index 081ec8de9ea6..b01051be7750 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -9,16 +9,51 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot, unsigned long pkey __always_unused) { + unsigned long ret = 0; + if (system_supports_bti() && (prot & PROT_BTI)) - return VM_ARM64_BTI; + ret |= VM_ARM64_BTI; - return 0; + if (system_supports_mte() && (prot & PROT_MTE)) + ret |= VM_MTE; + + return ret; } #define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey) +static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags) +{ + /* + * Only allow MTE on anonymous mappings as these are guaranteed to be + * backed by tags-capable memory. The vm_flags may be overridden by a + * filesystem supporting MTE (RAM-based). + */ + if (system_supports_mte() && (flags & MAP_ANONYMOUS)) + return VM_MTE_ALLOWED; + + return 0; +} +#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags) + static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) { - return (vm_flags & VM_ARM64_BTI) ? __pgprot(PTE_GP) : __pgprot(0); + pteval_t prot = 0; + + if (vm_flags & VM_ARM64_BTI) + prot |= PTE_GP; + + /* + * There are two conditions required for returning a Normal Tagged + * memory type: (1) the user requested it via PROT_MTE passed to + * mmap() or mprotect() and (2) the corresponding vma supports MTE. We + * register (1) as VM_MTE in the vma->vm_flags and (2) as + * VM_MTE_ALLOWED. Note that the latter can only be set during the + * mmap() call since mprotect() does not accept MAP_* flags. + */ + if ((vm_flags & VM_MTE) && (vm_flags & VM_MTE_ALLOWED)) + prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); + + return __pgprot(prot); } #define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) @@ -30,6 +65,9 @@ static inline bool arch_validate_prot(unsigned long prot, if (system_supports_bti()) supported |= PROT_BTI; + if (system_supports_mte()) + supported |= PROT_MTE; + return (prot & ~supported) == 0; } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) -- cgit v1.2.3-59-g8ed1b From 0042090548740921951f31fc0c20dcdb96638cb0 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Fri, 9 Aug 2019 15:48:46 +0100 Subject: arm64: mte: Validate the PROT_MTE request via arch_validate_flags() Make use of the newly introduced arch_validate_flags() hook to sanity-check the PROT_MTE request passed to mmap() and mprotect(). If the mapping does not support MTE, these syscalls will return -EINVAL. Signed-off-by: Catalin Marinas Cc: Will Deacon --- arch/arm64/include/asm/mman.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'arch/arm64/include/asm/mman.h') diff --git a/arch/arm64/include/asm/mman.h b/arch/arm64/include/asm/mman.h index b01051be7750..e3e28f7daf62 100644 --- a/arch/arm64/include/asm/mman.h +++ b/arch/arm64/include/asm/mman.h @@ -49,8 +49,10 @@ static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) * register (1) as VM_MTE in the vma->vm_flags and (2) as * VM_MTE_ALLOWED. Note that the latter can only be set during the * mmap() call since mprotect() does not accept MAP_* flags. + * Checking for VM_MTE only is sufficient since arch_validate_flags() + * does not permit (VM_MTE & !VM_MTE_ALLOWED). */ - if ((vm_flags & VM_MTE) && (vm_flags & VM_MTE_ALLOWED)) + if (vm_flags & VM_MTE) prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED); return __pgprot(prot); @@ -72,4 +74,14 @@ static inline bool arch_validate_prot(unsigned long prot, } #define arch_validate_prot(prot, addr) arch_validate_prot(prot, addr) +static inline bool arch_validate_flags(unsigned long vm_flags) +{ + if (!system_supports_mte()) + return true; + + /* only allow VM_MTE if VM_MTE_ALLOWED has been set previously */ + return !(vm_flags & VM_MTE) || (vm_flags & VM_MTE_ALLOWED); +} +#define arch_validate_flags(vm_flags) arch_validate_flags(vm_flags) + #endif /* ! __ASM_MMAN_H__ */ -- cgit v1.2.3-59-g8ed1b