aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic/tlb.h
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-07-13 16:59:03 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-07-14 11:11:09 -0700
commita90744bac57c3c07d0d4422af62f3e44549ade30 (patch)
tree40d96c0932f80c381b86277e31ac089585da2b33 /include/asm-generic/tlb.h
parentautofs: fix slab out of bounds read in getname_kernel() (diff)
downloadlinux-dev-a90744bac57c3c07d0d4422af62f3e44549ade30.tar.xz
linux-dev-a90744bac57c3c07d0d4422af62f3e44549ade30.zip
mm: allow arch to supply p??_free_tlb functions
The mmu_gather APIs keep track of the invalidated address range including the span covered by invalidated page table pages. Ranges covered by page tables but not ptes (and therefore no TLBs) still need to be invalidated because some architectures (x86) can cache intermediate page table entries, and invalidate those with normal TLB invalidation instructions to be almost-backward-compatible. Architectures which don't cache intermediate page table entries, or which invalidate these caches separately from TLB invalidation, do not require TLB invalidation range expanded over page tables. Allow architectures to supply their own p??_free_tlb functions, which can avoid the __tlb_adjust_range. Link: http://lkml.kernel.org/r/20180703013131.2807-1-npiggin@gmail.com Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Cc: "Aneesh Kumar K. V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Nadav Amit <nadav.amit@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--include/asm-generic/tlb.h8
1 files changed, 8 insertions, 0 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..3063125197ad 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/
+#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
+#endif
+#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
+#endif
#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#define tlb_migrate_finish(mm) do {} while (0)