aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/memory.c
diff options
context:
space:
mode:
authorRyan Roberts <ryan.roberts@arm.com>2024-02-15 10:32:02 +0000
committerAndrew Morton <akpm@linux-foundation.org>2024-02-22 15:27:18 -0800
commitc6ec76a2ebc5829e5826b218d2e1475ec11b333e (patch)
tree0bfaa6763fc8dd7c1d71e0d0f12c16c1f3fc035f /mm/memory.c
parentarm64/mm: implement new [get_and_]clear_full_ptes() batch APIs (diff)
downloadwireguard-linux-c6ec76a2ebc5829e5826b218d2e1475ec11b333e.tar.xz
wireguard-linux-c6ec76a2ebc5829e5826b218d2e1475ec11b333e.zip
mm: add pte_batch_hint() to reduce scanning in folio_pte_batch()
Some architectures (e.g. arm64) can tell from looking at a pte, if some follow-on ptes also map contiguous physical memory with the same pgprot. (for arm64, these are contpte mappings). Take advantage of this knowledge to optimize folio_pte_batch() so that it can skip these ptes when scanning to create a batch. By default, if an arch does not opt-in, folio_pte_batch() returns a compile-time 1, so the changes are optimized out and the behaviour is as before. arm64 will opt-in to providing this hint in the next patch, which will greatly reduce the cost of ptep_get() when scanning a range of contptes. Link: https://lkml.kernel.org/r/20240215103205.2607016-16-ryan.roberts@arm.com Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Acked-by: David Hildenbrand <david@redhat.com> Tested-by: John Hubbard <jhubbard@nvidia.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Barry Song <21cnbao@gmail.com> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Morse <james.morse@arm.com> Cc: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: Marc Zyngier <maz@kernel.org> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will@kernel.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Zi Yan <ziy@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 465ada39c2b7..642b4f2be523 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -988,16 +988,20 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
{
unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio);
const pte_t *end_ptep = start_ptep + max_nr;
- pte_t expected_pte = __pte_batch_clear_ignored(pte_next_pfn(pte), flags);
- pte_t *ptep = start_ptep + 1;
+ pte_t expected_pte, *ptep;
bool writable;
+ int nr;
if (any_writable)
*any_writable = false;
VM_WARN_ON_FOLIO(!pte_present(pte), folio);
- while (ptep != end_ptep) {
+ nr = pte_batch_hint(start_ptep, pte);
+ expected_pte = __pte_batch_clear_ignored(pte_advance_pfn(pte, nr), flags);
+ ptep = start_ptep + nr;
+
+ while (ptep < end_ptep) {
pte = ptep_get(ptep);
if (any_writable)
writable = !!pte_write(pte);
@@ -1011,17 +1015,18 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr,
* corner cases the next PFN might fall into a different
* folio.
*/
- if (pte_pfn(pte) == folio_end_pfn)
+ if (pte_pfn(pte) >= folio_end_pfn)
break;
if (any_writable)
*any_writable |= writable;
- expected_pte = pte_next_pfn(expected_pte);
- ptep++;
+ nr = pte_batch_hint(ptep, pte);
+ expected_pte = pte_advance_pfn(expected_pte, nr);
+ ptep += nr;
}
- return ptep - start_ptep;
+ return min(ptep - start_ptep, max_nr);
}
/*