aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorZhaoyang Huang <zhaoyang.huang@unisoc.com>2024-09-26 13:06:47 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-11-05 16:56:29 -0800
commit473c371254d2c9906c286c939eaa99d0fac13e38 (patch)
tree737db41c247be9c82c6976bf01e13cd3e7a8d8b9
parentmm: pgtable: remove pte_offset_map_nolock() (diff)
downloadwireguard-linux-473c371254d2c9906c286c939eaa99d0fac13e38.tar.xz
wireguard-linux-473c371254d2c9906c286c939eaa99d0fac13e38.zip
mm: migrate LRU_REFS_MASK bits in folio_migrate_flags
Bits of LRU_REFS_MASK are not inherited during migration which lead to new folio start from tier0 when MGLRU enabled. Try to bring as much bits of folio->flags as possible since compaction and alloc_contig_range which introduce migration do happen at times. Link: https://lkml.kernel.org/r/20240926050647.5653-1-zhaoyang.huang@unisoc.com Signed-off-by: Zhaoyang Huang <zhaoyang.huang@unisoc.com> Suggested-by: Yu Zhao <yuzhao@google.com> Acked-by: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Yu Zhao <yuzhao@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/mm_inline.h10
-rw-r--r--mm/migrate.c1
2 files changed, 11 insertions, 0 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index f4fe593c1400..6f801c7b36e2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -291,6 +291,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return true;
}
+static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+{
+ unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK;
+
+ set_mask_bits(&new->flags, LRU_REFS_MASK, refs);
+}
#else /* !CONFIG_LRU_GEN */
static inline bool lru_gen_enabled(void)
@@ -313,6 +319,10 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio,
return false;
}
+static inline void folio_migrate_refs(struct folio *new, struct folio *old)
+{
+
+}
#endif /* CONFIG_LRU_GEN */
static __always_inline
diff --git a/mm/migrate.c b/mm/migrate.c
index dfa24e41e8f9..72c6657f4f72 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -695,6 +695,7 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
if (folio_test_idle(folio))
folio_set_idle(newfolio);
+ folio_migrate_refs(newfolio, folio);
/*
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.