aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorgaoxiang17 <gaoxiang17@xiaomi.com>2024-09-20 20:20:30 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-01-13 22:40:42 -0800
commit6025ea5abbe5d813d6a41c78e6ea14259fb503f4 (patch)
treec76126484bee25051c14548b1463383608fece21
parentmm:kasan: fix sparse warnings: Should it be static? (diff)
downloadwireguard-linux-6025ea5abbe5d813d6a41c78e6ea14259fb503f4.tar.xz
wireguard-linux-6025ea5abbe5d813d6a41c78e6ea14259fb503f4.zip
mm/page_alloc: add some detailed comments in can_steal_fallback
[akpm@linux-foundation.org: tweak grammar, fit to 80 cols] Link: https://lkml.kernel.org/r/20240920122030.159751-1-gxxa03070307@gmail.com Signed-off-by: gaoxiang17 <gaoxiang17@xiaomi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/page_alloc.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7a2853b7967d..a887ba2cc91d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1855,6 +1855,14 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
if (order >= pageblock_order)
return true;
+ /*
+ * Movable pages won't cause permanent fragmentation, so when you alloc
+ * small pages, you just need to temporarily steal unmovable or
+ * reclaimable pages that are closest to the request size. After a
+ * while, memory compaction may occur to form large contiguous pages,
+ * and the next movable allocation may not need to steal. Unmovable and
+ * reclaimable allocations need to actually steal pages.
+ */
if (order >= pageblock_order / 2 ||
start_mt == MIGRATE_RECLAIMABLE ||
start_mt == MIGRATE_UNMOVABLE ||