aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2021-11-04 11:30:58 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:26:29 +0100
commit401fb12c68c257b9c9116b1475c0ac26b646fcc0 (patch)
treec9f7b475ecc49532ddf201a01631752af9caab19 /mm
parentmm/kfence: Convert kfence_guarded_alloc() to struct slab (diff)
downloadlinux-dev-401fb12c68c257b9c9116b1475c0ac26b646fcc0.tar.xz
linux-dev-401fb12c68c257b9c9116b1475c0ac26b646fcc0.zip
mm/sl*b: Differentiate struct slab fields by sl*b implementations
With a struct slab definition separate from struct page, we can go further and define only fields that the chosen sl*b implementation uses. This means everything between __page_flags and __page_refcount placeholders now depends on the chosen CONFIG_SL*B. Some fields exist in all implementations (slab_list) but can be part of a union in some, so it's simpler to repeat them than complicate the definition with ifdefs even more. The patch doesn't change physical offsets of the fields, although it could be done later - for example it's now clear that tighter packing in SLOB could be possible. This should also prevent accidental use of fields that don't exist in given implementation. Before this patch virt_to_cache() and cache_from_obj() were visible for SLOB (albeit not used), although they rely on the slab_cache field that isn't set by SLOB. With this patch it's now a compile error, so these functions are now hidden behind an #ifndef CONFIG_SLOB. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com> Tested-by: Marco Elver <elver@google.com> # kfence Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Tested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Alexander Potapenko <glider@google.com> Cc: Marco Elver <elver@google.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: <kasan-dev@googlegroups.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/kfence/core.c9
-rw-r--r--mm/slab.h48
2 files changed, 43 insertions, 14 deletions
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 4eb60cf5ff8b..267dfde43b91 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -427,10 +427,11 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
/* Set required slab fields. */
slab = virt_to_slab((void *)meta->addr);
slab->slab_cache = cache;
- if (IS_ENABLED(CONFIG_SLUB))
- slab->objects = 1;
- if (IS_ENABLED(CONFIG_SLAB))
- slab->s_mem = addr;
+#if defined(CONFIG_SLUB)
+ slab->objects = 1;
+#elif defined(CONFIG_SLAB)
+ slab->s_mem = addr;
+#endif
/* Memory initialization. */
for_each_canary(meta, set_canary_byte);
diff --git a/mm/slab.h b/mm/slab.h
index b8d7ed42cca8..ad15f624a6b5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -8,9 +8,24 @@
/* Reuses the bits in struct page */
struct slab {
unsigned long __page_flags;
+
+#if defined(CONFIG_SLAB)
+
union {
struct list_head slab_list;
- struct { /* Partial pages */
+ struct rcu_head rcu_head;
+ };
+ struct kmem_cache *slab_cache;
+ void *freelist; /* array of free object indexes */
+ void *s_mem; /* first object */
+ unsigned int active;
+
+#elif defined(CONFIG_SLUB)
+
+ union {
+ struct list_head slab_list;
+ struct rcu_head rcu_head;
+ struct {
struct slab *next;
#ifdef CONFIG_64BIT
int slabs; /* Nr of slabs left */
@@ -18,25 +33,32 @@ struct slab {
short int slabs;
#endif
};
- struct rcu_head rcu_head;
};
- struct kmem_cache *slab_cache; /* not slob */
+ struct kmem_cache *slab_cache;
/* Double-word boundary */
void *freelist; /* first free object */
union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
+ unsigned long counters;
+ struct {
unsigned inuse:16;
unsigned objects:15;
unsigned frozen:1;
};
};
+ unsigned int __unused;
+
+#elif defined(CONFIG_SLOB)
+
+ struct list_head slab_list;
+ void *__unused_1;
+ void *freelist; /* first free block */
+ void *__unused_2;
+ int units;
+
+#else
+#error "Unexpected slab allocator configured"
+#endif
- union {
- unsigned int active; /* SLAB */
- int units; /* SLOB */
- };
atomic_t __page_refcount;
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
@@ -48,10 +70,14 @@ struct slab {
SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */
SLAB_MATCH(slab_list, slab_list);
+#ifndef CONFIG_SLOB
SLAB_MATCH(rcu_head, rcu_head);
SLAB_MATCH(slab_cache, slab_cache);
+#endif
+#ifdef CONFIG_SLAB
SLAB_MATCH(s_mem, s_mem);
SLAB_MATCH(active, active);
+#endif
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, memcg_data);
@@ -599,6 +625,7 @@ static inline void memcg_slab_free_hook(struct kmem_cache *s,
}
#endif /* CONFIG_MEMCG_KMEM */
+#ifndef CONFIG_SLOB
static inline struct kmem_cache *virt_to_cache(const void *obj)
{
struct slab *slab;
@@ -645,6 +672,7 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
print_tracking(cachep, x);
return cachep;
}
+#endif /* CONFIG_SLOB */
static inline size_t slab_ksize(const struct kmem_cache *s)
{