aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/tlb.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/tlb.h')
-rw-r--r--arch/arm64/include/asm/tlb.h19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index bd00017d529a..b078fdec10d5 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -34,20 +34,21 @@ static void tlb_flush(struct mmu_gather *tlb);
static inline void tlb_flush(struct mmu_gather *tlb)
{
struct vm_area_struct vma = TLB_FLUSH_VMA(tlb->mm, 0);
+ bool last_level = !tlb->freed_tables;
+ unsigned long stride = tlb_get_unmap_size(tlb);
/*
- * The ASID allocator will either invalidate the ASID or mark
- * it as used.
+ * If we're tearing down the address space then we only care about
+ * invalidating the walk-cache, since the ASID allocator won't
+ * reallocate our ASID without invalidating the entire TLB.
*/
- if (tlb->fullmm)
+ if (tlb->fullmm) {
+ if (!last_level)
+ flush_tlb_mm(tlb->mm);
return;
+ }
- /*
- * The intermediate page table levels are already handled by
- * the __(pte|pmd|pud)_free_tlb() functions, so last level
- * TLBI is sufficient here.
- */
- __flush_tlb_range(&vma, tlb->start, tlb->end, PAGE_SIZE, true);
+ __flush_tlb_range(&vma, tlb->start, tlb->end, stride, last_level);
}
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,