aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-04 14:45:52 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:25:40 +0100
commitb918653b4f32881afb383b7b5853a7edfe0937a6 (patch)
tree9b833a44d786b29faa3398ad7faf487e85d5a974 /mm
parentmm: Split slab into its own type (diff)
downloadlinux-dev-b918653b4f32881afb383b7b5853a7edfe0937a6.tar.xz
linux-dev-b918653b4f32881afb383b7b5853a7edfe0937a6.zip
mm: Convert [un]account_slab_page() to struct slab
Convert the parameter of these functions to struct slab instead of struct page and drop _page from the names. For now their callers just convert page to slab. [ vbabka@suse.cz: replace existing functions instead of calling them ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Roman Gushchin <guro@fb.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slab.h17
-rw-r--r--mm/slub.c4
3 files changed, 12 insertions, 13 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 381875e23277..7f147805d0ab 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1380,7 +1380,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
return NULL;
}
- account_slab_page(page, cachep->gfporder, cachep, flags);
+ account_slab(page_slab(page), cachep->gfporder, cachep, flags);
__SetPageSlab(page);
/* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1405,7 +1405,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += 1 << order;
- unaccount_slab_page(page, order, cachep);
+ unaccount_slab(page_slab(page), order, cachep);
__free_pages(page, order);
}
diff --git a/mm/slab.h b/mm/slab.h
index 0e67a8cb7f80..dd3f72fddff6 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -583,24 +583,23 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
return page->slab_cache;
}
-static __always_inline void account_slab_page(struct page *page, int order,
- struct kmem_cache *s,
- gfp_t gfp)
+static __always_inline void account_slab(struct slab *slab, int order,
+ struct kmem_cache *s, gfp_t gfp)
{
if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
- memcg_alloc_page_obj_cgroups(page, s, gfp, true);
+ memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true);
- mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
PAGE_SIZE << order);
}
-static __always_inline void unaccount_slab_page(struct page *page, int order,
- struct kmem_cache *s)
+static __always_inline void unaccount_slab(struct slab *slab, int order,
+ struct kmem_cache *s)
{
if (memcg_kmem_enabled())
- memcg_free_page_obj_cgroups(page);
+ memcg_free_page_obj_cgroups(slab_page(slab));
- mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
-(PAGE_SIZE << order));
}
diff --git a/mm/slub.c b/mm/slub.c
index a211d96011ba..c94fb4b4d655 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1943,7 +1943,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
page->objects = oo_objects(oo);
- account_slab_page(page, oo_order(oo), s, flags);
+ account_slab(page_slab(page), oo_order(oo), s, flags);
page->slab_cache = s;
__SetPageSlab(page);
@@ -2014,7 +2014,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
page->slab_cache = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- unaccount_slab_page(page, order, s);
+ unaccount_slab(page_slab(page), order, s);
__free_pages(page, order);
}