aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2017-06-26 14:27:36 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2017-08-21 11:12:29 +0100
commit3bbf7157ac66a88d94b291d4d5e2b2a9319a0f90 (patch)
tree9b06c2107a0c7ee8f5496fcd6bfc06f94441c46e /arch/arm64/mm
parentMerge branch 'for-next/kernel-mode-neon' into for-next/core (diff)
downloadlinux-dev-3bbf7157ac66a88d94b291d4d5e2b2a9319a0f90.tar.xz
linux-dev-3bbf7157ac66a88d94b291d4d5e2b2a9319a0f90.zip
arm64: Convert pte handling from inline asm to using (cmp)xchg
With the support for hardware updates of the access and dirty states, the following pte handling functions had to be implemented using exclusives: __ptep_test_and_clear_young(), ptep_get_and_clear(), ptep_set_wrprotect() and ptep_set_access_flags(). To take advantage of the LSE atomic instructions and also make the code cleaner, convert these pte functions to use the more generic cmpxchg()/xchg(). Reviewed-by: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Steve Capper <steve.capper@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/fault.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 52ee273afeec..430eaf82da49 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -34,6 +34,7 @@
#include <linux/hugetlb.h>
#include <asm/bug.h>
+#include <asm/cmpxchg.h>
#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
@@ -197,8 +198,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty)
{
- pteval_t old_pteval;
- unsigned int tmp;
+ pteval_t old_pteval, pteval;
if (pte_same(*ptep, entry))
return 0;
@@ -208,7 +208,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
/* set PTE_RDONLY if actual read-only or clean PTE */
if (!pte_write(entry) || !pte_sw_dirty(entry))
- pte_val(entry) |= PTE_RDONLY;
+ entry = pte_set_rdonly(entry);
/*
* Setting the flags must be done atomically to avoid racing with the
@@ -217,16 +217,14 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
* (calculated as: a & b == ~(~a | ~b)).
*/
pte_val(entry) ^= PTE_RDONLY;
- asm volatile("// ptep_set_access_flags\n"
- " prfm pstl1strm, %2\n"
- "1: ldxr %0, %2\n"
- " eor %0, %0, %3 // negate PTE_RDONLY in *ptep\n"
- " orr %0, %0, %4 // set flags\n"
- " eor %0, %0, %3 // negate final PTE_RDONLY\n"
- " stxr %w1, %0, %2\n"
- " cbnz %w1, 1b\n"
- : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
- : "L" (PTE_RDONLY), "r" (pte_val(entry)));
+ pteval = READ_ONCE(pte_val(*ptep));
+ do {
+ old_pteval = pteval;
+ pteval ^= PTE_RDONLY;
+ pteval |= pte_val(entry);
+ pteval ^= PTE_RDONLY;
+ pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
+ } while (pteval != old_pteval);
flush_tlb_fix_spurious_fault(vma, address);
return 1;