aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-07-06 15:25:11 -0500
committerPekka Enberg <penberg@kernel.org>2012-07-09 12:13:35 +0300
commit97d06609158e61f6bdf538c4a6788e2de492236f (patch)
treefa3f57ff3e2d3f4f866d84dd9d634ade43941be8 /mm
parentmm, sl[aou]b: Extract common code for kmem_cache_create() (diff)
downloadlinux-dev-97d06609158e61f6bdf538c4a6788e2de492236f.tar.xz
linux-dev-97d06609158e61f6bdf538c4a6788e2de492236f.zip
mm, sl[aou]b: Common definition for boot state of the slab allocators
All allocators have some sort of support for the bootstrap status. Setup a common definition for the boot states and make all slab allocators use that definition. Reviewed-by: Glauber Costa <glommer@parallels.com> Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h29
-rw-r--r--mm/slab_common.c9
-rw-r--r--mm/slob.c14
-rw-r--r--mm/slub.c21
5 files changed, 62 insertions, 56 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 10c821e492bf..59a466b85b0f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -87,6 +87,7 @@
*/
#include <linux/slab.h>
+#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
@@ -565,27 +566,6 @@ static struct kmem_cache cache_cache = {
#define BAD_ALIEN_MAGIC 0x01020304ul
-/*
- * chicken and egg problem: delay the per-cpu array allocation
- * until the general caches are up.
- */
-static enum {
- NONE,
- PARTIAL_AC,
- PARTIAL_L3,
- EARLY,
- LATE,
- FULL
-} g_cpucache_up;
-
-/*
- * used by boot code to determine if it can use slab based allocator
- */
-int slab_is_available(void)
-{
- return g_cpucache_up >= EARLY;
-}
-
#ifdef CONFIG_LOCKDEP
/*
@@ -651,7 +631,7 @@ static void init_node_lock_keys(int q)
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up < LATE)
+ if (slab_state < UP)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
@@ -1649,14 +1629,14 @@ void __init kmem_cache_init(void)
}
}
- g_cpucache_up = EARLY;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
struct kmem_cache *cachep;
- g_cpucache_up = LATE;
+ slab_state = UP;
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
@@ -1668,6 +1648,9 @@ void __init kmem_cache_init_late(void)
BUG();
mutex_unlock(&cache_chain_mutex);
+ /* Done! */
+ slab_state = FULL;
+
/*
* Register a cpu startup notifier callback that initializes
* cpu_cache_get for all new cpus
@@ -1699,7 +1682,7 @@ static int __init cpucache_init(void)
start_cpu_timer(cpu);
/* Done! */
- g_cpucache_up = FULL;
+ slab_state = FULL;
return 0;
}
__initcall(cpucache_init);
@@ -2167,10 +2150,10 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
{
- if (g_cpucache_up >= LATE)
+ if (slab_state >= FULL)
return enable_cpucache(cachep, gfp);
- if (g_cpucache_up == NONE) {
+ if (slab_state == DOWN) {
/*
* Note: the first kmem_cache_create must create the cache
* that's used by kmalloc(24), otherwise the creation of
@@ -2185,16 +2168,16 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
*/
set_up_list3s(cachep, SIZE_AC);
if (INDEX_AC == INDEX_L3)
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
else
- g_cpucache_up = PARTIAL_AC;
+ slab_state = PARTIAL_ARRAYCACHE;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init), gfp);
- if (g_cpucache_up == PARTIAL_AC) {
+ if (slab_state == PARTIAL_ARRAYCACHE) {
set_up_list3s(cachep, SIZE_L3);
- g_cpucache_up = PARTIAL_L3;
+ slab_state = PARTIAL_L3;
} else {
int node;
for_each_online_node(node) {
diff --git a/mm/slab.h b/mm/slab.h
new file mode 100644
index 000000000000..f9a9815cdc82
--- /dev/null
+++ b/mm/slab.h
@@ -0,0 +1,29 @@
+#ifndef MM_SLAB_H
+#define MM_SLAB_H
+/*
+ * Internal slab definitions
+ */
+
+/*
+ * State of the slab allocator.
+ *
+ * This is used to describe the states of the allocator during bootup.
+ * Allocators use this to gradually bootstrap themselves. Most allocators
+ * have the problem that the structures used for managing slab caches are
+ * allocated from slab caches themselves.
+ */
+enum slab_state {
+ DOWN, /* No slab functionality yet */
+ PARTIAL, /* SLUB: kmem_cache_node available */
+ PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */
+ PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */
+ UP, /* Slab caches usable but not all extras yet */
+ FULL /* Everything is working */
+};
+
+extern enum slab_state slab_state;
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
+
+#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 80412beb67cc..ca1aaf69a1f5 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -16,6 +16,10 @@
#include <asm/tlbflush.h>
#include <asm/page.h>
+#include "slab.h"
+
+enum slab_state slab_state;
+
/*
* kmem_cache_create - Create a cache.
* @name: A string which is used in /proc/slabinfo to identify this cache.
@@ -66,3 +70,8 @@ out:
return s;
}
EXPORT_SYMBOL(kmem_cache_create);
+
+int slab_is_available(void)
+{
+ return slab_state >= UP;
+}
diff --git a/mm/slob.c b/mm/slob.c
index d63923d549ec..0111e0dece93 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -59,6 +59,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+#include "slab.h"
+
#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
@@ -531,6 +533,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
c->align = align;
kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL);
+ c->refcount = 1;
}
return c;
}
@@ -616,19 +619,12 @@ int kmem_cache_shrink(struct kmem_cache *d)
}
EXPORT_SYMBOL(kmem_cache_shrink);
-static unsigned int slob_ready __read_mostly;
-
-int slab_is_available(void)
-{
- return slob_ready;
-}
-
void __init kmem_cache_init(void)
{
- slob_ready = 1;
+ slab_state = UP;
}
void __init kmem_cache_init_late(void)
{
- /* Nothing to do */
+ slab_state = FULL;
}
diff --git a/mm/slub.c b/mm/slub.c
index 6551cc9a51f8..4c385164d9f7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/bitops.h>
#include <linux/slab.h>
+#include "slab.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmemcheck.h>
@@ -182,13 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache);
static struct notifier_block slab_notifier;
#endif
-static enum {
- DOWN, /* No slab functionality available */
- PARTIAL, /* Kmem_cache_node works */
- UP, /* Everything works but does not show up in sysfs */
- SYSFS /* Sysfs up */
-} slab_state = DOWN;
-
/* A list of all slab caches on the system */
static DECLARE_RWSEM(slub_lock);
static LIST_HEAD(slab_caches);
@@ -237,11 +231,6 @@ static inline void stat(const struct kmem_cache *s, enum stat_item si)
* Core slab cache functions
*******************************************************************/
-int slab_is_available(void)
-{
- return slab_state >= UP;
-}
-
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
{
return s->node[node];
@@ -5274,7 +5263,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
const char *name;
int unmergeable;
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/* Defer until later */
return 0;
@@ -5319,7 +5308,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
static void sysfs_slab_remove(struct kmem_cache *s)
{
- if (slab_state < SYSFS)
+ if (slab_state < FULL)
/*
* Sysfs has not been setup yet so no need to remove the
* cache from sysfs.
@@ -5347,7 +5336,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
{
struct saved_alias *al;
- if (slab_state == SYSFS) {
+ if (slab_state == FULL) {
/*
* If we have a leftover link then remove it.
*/
@@ -5380,7 +5369,7 @@ static int __init slab_sysfs_init(void)
return -ENOSYS;
}
- slab_state = SYSFS;
+ slab_state = FULL;
list_for_each_entry(s, &slab_caches, list) {
err = sysfs_slab_add(s);