aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-10-29 12:18:24 +0200
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:26:01 +0100
commit4020b4a22604f832bacd8e3d8577ee8b15b829e2 (patch)
tree6ba08b366bd60e9fa3196111a52e51bd71047642 /mm
parentmm/slub: Convert alloc_slab_page() to return a struct slab (diff)
downloadlinux-dev-4020b4a22604f832bacd8e3d8577ee8b15b829e2.tar.xz
linux-dev-4020b4a22604f832bacd8e3d8577ee8b15b829e2.zip
mm/slub: Convert __free_slab() to use struct slab
__free_slab() is on the boundary of distinguishing struct slab and struct page so start with struct slab but convert to folio for working with flags and folio_page() to call functions that require struct page. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 65cbdeae7edb..4d64c5b42629 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2005,35 +2005,34 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
-static void __free_slab(struct kmem_cache *s, struct page *page)
+static void __free_slab(struct kmem_cache *s, struct slab *slab)
{
- int order = compound_order(page);
+ struct folio *folio = slab_folio(slab);
+ int order = folio_order(folio);
int pages = 1 << order;
if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
void *p;
- slab_pad_check(s, page);
- for_each_object(p, s, page_address(page),
- page->objects)
- check_object(s, page, p, SLUB_RED_INACTIVE);
+ slab_pad_check(s, folio_page(folio, 0));
+ for_each_object(p, s, slab_address(slab), slab->objects)
+ check_object(s, folio_page(folio, 0), p, SLUB_RED_INACTIVE);
}
- __ClearPageSlabPfmemalloc(page);
- __ClearPageSlab(page);
- /* In union with page->mapping where page allocator expects NULL */
- page->slab_cache = NULL;
+ __slab_clear_pfmemalloc(slab);
+ __folio_clear_slab(folio);
+ folio->mapping = NULL;
if (current->reclaim_state)
current->reclaim_state->reclaimed_slab += pages;
- unaccount_slab(page_slab(page), order, s);
- __free_pages(page, order);
+ unaccount_slab(slab, order, s);
+ __free_pages(folio_page(folio, 0), order);
}
static void rcu_free_slab(struct rcu_head *h)
{
struct page *page = container_of(h, struct page, rcu_head);
- __free_slab(page->slab_cache, page);
+ __free_slab(page->slab_cache, page_slab(page));
}
static void free_slab(struct kmem_cache *s, struct page *page)
@@ -2041,7 +2040,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) {
call_rcu(&page->rcu_head, rcu_free_slab);
} else
- __free_slab(s, page);
+ __free_slab(s, page_slab(page));
}
static void discard_slab(struct kmem_cache *s, struct page *page)