aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/book3s
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2020-12-17 23:47:31 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2021-02-09 01:09:45 +1100
commit3cb1aa7aa39402f4f2cb847b1f16ade3bce43a97 (patch)
treee080be0f37477a4b24d7e0a836fc4a27117b40e2 /arch/powerpc/include/asm/book3s
parentpowerpc/64s/radix: serialize_against_pte_lookup IPIs trim mm_cpumask (diff)
downloadlinux-dev-3cb1aa7aa39402f4f2cb847b1f16ade3bce43a97.tar.xz
linux-dev-3cb1aa7aa39402f4f2cb847b1f16ade3bce43a97.zip
powerpc/64s: Implement ptep_clear_flush_young that does not flush TLBs
Similarly to the x86 commit b13b1d2d8692 ("x86/mm: In the PTE swapout page reclaim case clear the accessed bit instead of flushing the TLB"), implement ptep_clear_flush_young that does not actually flush the TLB in the case the referenced bit is cleared. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20201217134731.488135-8-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/include/asm/book3s')
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h23
1 files changed, 20 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index a39886681629..058601efbc8a 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -388,11 +388,28 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
({ \
- int __r; \
- __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
- __r; \
+ __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
})
+/*
+ * On Book3S CPUs, clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young ptep_test_and_clear_young
+
+#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
+#define pmdp_clear_flush_young pmdp_test_and_clear_young
+
static inline int __pte_write(pte_t pte)
{
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE));