aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-08-22 21:40:30 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2018-09-11 16:49:10 +0100
commit45a284bc5ee3d629b6da1498c2273cb22361416e (patch)
treebc6dd47bd4a105654b6fd8821e513cf22b7e698e
parentarm64: tlb: Use last-level invalidation in flush_tlb_kernel_range() (diff)
downloadlinux-dev-45a284bc5ee3d629b6da1498c2273cb22361416e.tar.xz
linux-dev-45a284bc5ee3d629b6da1498c2273cb22361416e.zip
arm64: tlb: Add DSB ISHST prior to TLBI in __flush_tlb_[kernel_]pgtable()
__flush_tlb_[kernel_]pgtable() rely on set_pXd() having a DSB after writing the new table entry and therefore avoid the barrier prior to the TLBI instruction. In preparation for delaying our walk-cache invalidation on the unmap() path, move the DSB into the TLB invalidation routines. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/tlbflush.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 7e2a35424ca4..e257f8655b84 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -213,6 +213,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
{
unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
+ dsb(ishst);
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
dsb(ish);
@@ -222,6 +223,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
unsigned long addr = __TLBI_VADDR(kaddr, 0);
+ dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
}