aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2025-06-02 13:02:13 +0200
committerVlastimil Babka <vbabka@suse.cz>2025-06-17 11:57:36 +0200
commit5660ee54e7982f9097ddc684e90f15bdcc7fef4b (patch)
tree1e3ad4a37179175052ac30459f1c033f82265684
parentmm, slab: restore NUMA policy support for large kmalloc (diff)
downloadwireguard-linux-5660ee54e7982f9097ddc684e90f15bdcc7fef4b.tar.xz
wireguard-linux-5660ee54e7982f9097ddc684e90f15bdcc7fef4b.zip
mm, slab: use frozen pages for large kmalloc
Since slab pages are now frozen, it makes sense to have large kmalloc() objects behave same as small kmalloc(), as the choice between the two is an implementation detail depending on allocation size. Notably, increasing refcount on a slab page containing kmalloc() object is not possible anymore, so it should be consistent for large kmalloc pages. Therefore, change large kmalloc to use the frozen pages API. Because of some unexpected fallout in the slab pages case (see commit b9c0e49abfca ("mm: decline to manipulate the refcount on a slab page"), implement the same kind of checks and warnings as part of this change. Notably, networking code using sendpage_ok() to determine whether the page refcount can be manipulated in the network stack should continue behaving correctly. Before this change, the function returns true for large kmalloc pages and page refcount can be manipulated. After this change, the function will return false. Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Harry Yoo <harry.yoo@oracle.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/slub.c6
2 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0ef2ba0c667a..a35d5958603f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1325,6 +1325,8 @@ static inline void get_page(struct page *page)
struct folio *folio = page_folio(page);
if (WARN_ON_ONCE(folio_test_slab(folio)))
return;
+ if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
+ return;
folio_get(folio);
}
@@ -1419,7 +1421,7 @@ static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page);
- if (folio_test_slab(folio))
+ if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
folio_put(folio);
diff --git a/mm/slub.c b/mm/slub.c
index 06d64a5fb1bf..823042efbfc9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4271,9 +4271,9 @@ static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
flags |= __GFP_COMP;
if (node == NUMA_NO_NODE)
- folio = (struct folio *)alloc_pages_noprof(flags, order);
+ folio = (struct folio *)alloc_frozen_pages_noprof(flags, order);
else
- folio = (struct folio *)__alloc_pages_noprof(flags, order, node, NULL);
+ folio = (struct folio *)__alloc_frozen_pages_noprof(flags, order, node, NULL);
if (folio) {
ptr = folio_address(folio);
@@ -4770,7 +4770,7 @@ static void free_large_kmalloc(struct folio *folio, void *object)
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
-(PAGE_SIZE << order));
__folio_clear_large_kmalloc(folio);
- folio_put(folio);
+ free_frozen_pages(&folio->page, order);
}
/*