aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLiam R. Howlett <Liam.Howlett@Oracle.com>2022-05-31 09:20:51 -0400
committerAndrew Morton <akpm@linux-foundation.org>2022-10-20 21:27:23 -0700
commitdf48a5f7a3bbac6a700026b554922943ecee1fb0 (patch)
treed210bb01c9e8cdf4070ca9d3b61db8830c966349 /mm
parentmm: /proc/pid/smaps_rollup: fix maple tree search (diff)
downloadlinux-dev-df48a5f7a3bbac6a700026b554922943ecee1fb0.tar.xz
linux-dev-df48a5f7a3bbac6a700026b554922943ecee1fb0.zip
mm/page_alloc: reduce potential fragmentation in make_alloc_exact()
Try to avoid using the left over split page on the next request for a page by calling __free_pages_ok() with FPI_TO_TAIL. This increases the potential of defragmenting memory when it's used for a short period of time. Link: https://lkml.kernel.org/r/20220531185626.yvlmymbxyoe5vags@revolver Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com> Suggested-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e20ade858e71..b5a6c815ae28 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5784,14 +5784,18 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size)
{
if (addr) {
- unsigned long alloc_end = addr + (PAGE_SIZE << order);
- unsigned long used = addr + PAGE_ALIGN(size);
-
- split_page(virt_to_page((void *)addr), order);
- while (used < alloc_end) {
- free_page(used);
- used += PAGE_SIZE;
- }
+ unsigned long nr = DIV_ROUND_UP(size, PAGE_SIZE);
+ struct page *page = virt_to_page((void *)addr);
+ struct page *last = page + nr;
+
+ split_page_owner(page, 1 << order);
+ split_page_memcg(page, 1 << order);
+ while (page < --last)
+ set_page_refcounted(last);
+
+ last = page + (1UL << order);
+ for (page += nr; page < last; page++)
+ __free_pages_ok(page, 0, FPI_TO_TAIL);
}
return (void *)addr;
}