aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorHyeonggon Yoo <42.hyeyoo@gmail.com>2022-03-07 07:40:55 +0000
committerVlastimil Babka <vbabka@suse.cz>2022-03-09 12:25:18 +0100
commit5182f3c9180397b16d15981b385ecfad9249e527 (patch)
treeb774fdc78d5cdb3a25dd85c1e7aa402d9e6aa2c6 /mm/slub.c
parentLinux 5.17-rc7 (diff)
downloadlinux-dev-5182f3c9180397b16d15981b385ecfad9249e527.tar.xz
linux-dev-5182f3c9180397b16d15981b385ecfad9249e527.zip
mm/slub: limit number of node partial slabs only in cache creation
SLUB sets number of minimum partial slabs for node (min_partial) using set_min_partial(). SLUB holds at least min_partial slabs even if they're empty to avoid excessive use of page allocator. set_min_partial() limits value of min_partial limits value of min_partial MIN_PARTIAL and MAX_PARTIAL. As set_min_partial() can be called by min_partial_store() too, Only limit value of min_partial in kmem_cache_open() so that it can be changed to value that a user wants. [ rientjes@google.com: Fold set_min_partial() into its callers ] Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Link: https://lore.kernel.org/r/20220307074057.902222-2-42.hyeyoo@gmail.com
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c14
1 files changed, 3 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 261474092e43..1ce09b0347ad 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4000,15 +4000,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
return 1;
}
-static void set_min_partial(struct kmem_cache *s, unsigned long min)
-{
- if (min < MIN_PARTIAL)
- min = MIN_PARTIAL;
- else if (min > MAX_PARTIAL)
- min = MAX_PARTIAL;
- s->min_partial = min;
-}
-
static void set_cpu_partial(struct kmem_cache *s)
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
@@ -4215,7 +4206,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
* The larger the object size is, the more slabs we want on the partial
* list to avoid pounding the page allocator excessively.
*/
- set_min_partial(s, ilog2(s->size) / 2);
+ s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+ s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
set_cpu_partial(s);
@@ -5396,7 +5388,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
if (err)
return err;
- set_min_partial(s, min);
+ s->min_partial = min;
return length;
}
SLAB_ATTR(min_partial);