aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-11-10 14:12:45 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:26:53 +0100
commit9c01e9af171f13cf6573f404ecaf96dfa48233ab (patch)
tree1af8f4560e4f707a0587cd9febe0ac34605f3abd /mm
parentmm/slub: Simplify struct slab slabs field definition (diff)
downloadlinux-dev-9c01e9af171f13cf6573f404ecaf96dfa48233ab.tar.xz
linux-dev-9c01e9af171f13cf6573f404ecaf96dfa48233ab.zip
mm/slub: Define struct slab fields for CONFIG_SLUB_CPU_PARTIAL only when enabled
The fields 'next' and 'slabs' are only used when CONFIG_SLUB_CPU_PARTIAL is enabled. We can put their definition to #ifdef to prevent accidental use when disabled. Currenlty show_slab_objects() and slabs_cpu_partial_show() contain code accessing the slabs field that's effectively dead with CONFIG_SLUB_CPU_PARTIAL=n through the wrappers slub_percpu_partial() and slub_percpu_partial_read_once(), but to prevent a compile error, we need to hide all this code behind #ifdef. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Reviewed-by: Roman Gushchin <guro@fb.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slub.c8
2 files changed, 8 insertions, 2 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 495008f89bf6..f14e723b9e3c 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -25,10 +25,12 @@ struct slab {
union {
struct list_head slab_list;
struct rcu_head rcu_head;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
struct {
struct slab *next;
int slabs; /* Nr of slabs left */
};
+#endif
};
struct kmem_cache *slab_cache;
/* Double-word boundary */
diff --git a/mm/slub.c b/mm/slub.c
index d08ba1025aae..261474092e43 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5258,6 +5258,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
slab = slub_percpu_partial_read_once(c);
if (slab) {
node = slab_nid(slab);
@@ -5270,6 +5271,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
total += x;
nodes[node] += x;
}
+#endif
}
}
@@ -5469,9 +5471,10 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
{
int objects = 0;
int slabs = 0;
- int cpu;
+ int cpu __maybe_unused;
int len = 0;
+#ifdef CONFIG_SLUB_CPU_PARTIAL
for_each_online_cpu(cpu) {
struct slab *slab;
@@ -5480,12 +5483,13 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
if (slab)
slabs += slab->slabs;
}
+#endif
/* Approximate half-full slabs, see slub_set_cpu_partial() */
objects = (slabs * oo_objects(s->oo)) / 2;
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SLUB_CPU_PARTIAL) && defined(CONFIG_SMP)
for_each_online_cpu(cpu) {
struct slab *slab;