aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/slab.h
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2024-02-23 19:27:18 +0100
committerVlastimil Babka <vbabka@suse.cz>2024-02-26 10:10:07 +0100
commitcc61eb851c9ae38546d7df6076fd883d3dbc322d (patch)
treea2103acc92def90e1de4f23de700f201e92a0bbb /include/linux/slab.h
parentmm, slab: deprecate SLAB_MEM_SPREAD flag (diff)
downloadwireguard-linux-cc61eb851c9ae38546d7df6076fd883d3dbc322d.tar.xz
wireguard-linux-cc61eb851c9ae38546d7df6076fd883d3dbc322d.zip
mm, slab: use an enum to define SLAB_ cache creation flags
The values of SLAB_ cache creation flags are defined by hand, which is tedious and error-prone. Use an enum to assign the bit number and a __SLAB_FLAG_BIT() macro to #define the final flags. This renumbers the flag values, which is OK as they are only used internally. Also define a __SLAB_FLAG_UNUSED macro to assign value to flags disabled by their respective config options in a unified and sparse-friendly way. Reviewed-and-tested-by: Xiongwei Song <xiongwei.song@windriver.com> Reviewed-by: Chengming Zhou <chengming.zhou@linux.dev> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'include/linux/slab.h')
-rw-r--r--include/linux/slab.h94
1 files changed, 67 insertions, 27 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index b1675ff6b904..f6323763cd61 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -21,29 +21,69 @@
#include <linux/cleanup.h>
#include <linux/hash.h>
+enum _slab_flag_bits {
+ _SLAB_CONSISTENCY_CHECKS,
+ _SLAB_RED_ZONE,
+ _SLAB_POISON,
+ _SLAB_KMALLOC,
+ _SLAB_HWCACHE_ALIGN,
+ _SLAB_CACHE_DMA,
+ _SLAB_CACHE_DMA32,
+ _SLAB_STORE_USER,
+ _SLAB_PANIC,
+ _SLAB_TYPESAFE_BY_RCU,
+ _SLAB_TRACE,
+#ifdef CONFIG_DEBUG_OBJECTS
+ _SLAB_DEBUG_OBJECTS,
+#endif
+ _SLAB_NOLEAKTRACE,
+ _SLAB_NO_MERGE,
+#ifdef CONFIG_FAILSLAB
+ _SLAB_FAILSLAB,
+#endif
+#ifdef CONFIG_MEMCG_KMEM
+ _SLAB_ACCOUNT,
+#endif
+#ifdef CONFIG_KASAN_GENERIC
+ _SLAB_KASAN,
+#endif
+ _SLAB_NO_USER_FLAGS,
+#ifdef CONFIG_KFENCE
+ _SLAB_SKIP_KFENCE,
+#endif
+#ifndef CONFIG_SLUB_TINY
+ _SLAB_RECLAIM_ACCOUNT,
+#endif
+ _SLAB_OBJECT_POISON,
+ _SLAB_CMPXCHG_DOUBLE,
+ _SLAB_FLAGS_LAST_BIT
+};
+
+#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr)))
+#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U))
/*
* Flags to pass to kmem_cache_create().
* The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op
*/
/* DEBUG: Perform (expensive) checks on alloc/free */
-#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
+#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS)
/* DEBUG: Red zone objs in a cache */
-#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
+#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE)
/* DEBUG: Poison objects */
-#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
+#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON)
/* Indicate a kmalloc slab */
-#define SLAB_KMALLOC ((slab_flags_t __force)0x00001000U)
+#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC)
/* Align objs on cache lines */
-#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
+#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN)
/* Use GFP_DMA memory */
-#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
+#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA)
/* Use GFP_DMA32 memory */
-#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
+#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32)
/* DEBUG: Store the last owner for bug hunting */
-#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
+#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER)
/* Panic if kmem_cache_create() fails */
-#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
+#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC)
/*
* SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
@@ -95,19 +135,19 @@
* Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
/* Defer freeing slabs to RCU */
-#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
+#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU)
/* Trace allocations and frees */
-#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
+#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE)
/* Flag to prevent checks on free */
#ifdef CONFIG_DEBUG_OBJECTS
-# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS)
#else
-# define SLAB_DEBUG_OBJECTS 0
+# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED
#endif
/* Avoid kmemleak tracing */
-#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
+#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE)
/*
* Prevent merging with compatible kmem caches. This flag should be used
@@ -119,25 +159,25 @@
* - performance critical caches, should be very rare and consulted with slab
* maintainers, and not used together with CONFIG_SLUB_TINY
*/
-#define SLAB_NO_MERGE ((slab_flags_t __force)0x01000000U)
+#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE)
/* Fault injection mark */
#ifdef CONFIG_FAILSLAB
-# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
+# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB)
#else
-# define SLAB_FAILSLAB 0
+# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED
#endif
/* Account to memcg */
#ifdef CONFIG_MEMCG_KMEM
-# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
+# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT)
#else
-# define SLAB_ACCOUNT 0
+# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED
#endif
#ifdef CONFIG_KASAN_GENERIC
-#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
+#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN)
#else
-#define SLAB_KASAN 0
+#define SLAB_KASAN __SLAB_FLAG_UNUSED
#endif
/*
@@ -145,25 +185,25 @@
* Intended for caches created for self-tests so they have only flags
* specified in the code and other flags are ignored.
*/
-#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
+#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS)
#ifdef CONFIG_KFENCE
-#define SLAB_SKIP_KFENCE ((slab_flags_t __force)0x20000000U)
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE)
#else
-#define SLAB_SKIP_KFENCE 0
+#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED
#endif
/* The following flags affect the page allocator grouping pages by mobility */
/* Objects are reclaimable */
#ifndef CONFIG_SLUB_TINY
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT)
#else
-#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0)
+#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED
#endif
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
/* Obsolete unused flag, to be removed */
-#define SLAB_MEM_SPREAD ((slab_flags_t __force)0U)
+#define SLAB_MEM_SPREAD __SLAB_FLAG_UNUSED
/*
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.