aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c813
1 files changed, 475 insertions, 338 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 6f8495e2185b..d66c2b0d9715 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -55,7 +55,7 @@
*
* SMP synchronization:
* constructors and destructors are called without any locking.
- * Several members in kmem_cache_t and struct slab never change, they
+ * Several members in struct kmem_cache and struct slab never change, they
* are accessed without any locking.
* The per-cpu arrays are never accessed from the wrong cpu, no locking,
* and local interrupts are disabled so slab code is preempt-safe.
@@ -244,7 +244,7 @@ struct slab {
*/
struct slab_rcu {
struct rcu_head head;
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
void *addr;
};
@@ -294,6 +294,7 @@ struct kmem_list3 {
unsigned long next_reap;
int free_touched;
unsigned int free_limit;
+ unsigned int colour_next; /* Per-node cache coloring */
spinlock_t list_lock;
struct array_cache *shared; /* shared per node */
struct array_cache **alien; /* on other nodes */
@@ -316,6 +317,8 @@ struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
*/
static __always_inline int index_of(const size_t size)
{
+ extern void __bad_size(void);
+
if (__builtin_constant_p(size)) {
int i = 0;
@@ -326,25 +329,23 @@ static __always_inline int index_of(const size_t size)
i++;
#include "linux/kmalloc_sizes.h"
#undef CACHE
- {
- extern void __bad_size(void);
- __bad_size();
- }
+ __bad_size();
} else
- BUG();
+ __bad_size();
return 0;
}
#define INDEX_AC index_of(sizeof(struct arraycache_init))
#define INDEX_L3 index_of(sizeof(struct kmem_list3))
-static inline void kmem_list3_init(struct kmem_list3 *parent)
+static void kmem_list3_init(struct kmem_list3 *parent)
{
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
INIT_LIST_HEAD(&parent->slabs_free);
parent->shared = NULL;
parent->alien = NULL;
+ parent->colour_next = 0;
spin_lock_init(&parent->list_lock);
parent->free_objects = 0;
parent->free_touched = 0;
@@ -364,7 +365,7 @@ static inline void kmem_list3_init(struct kmem_list3 *parent)
} while (0)
/*
- * kmem_cache_t
+ * struct kmem_cache
*
* manages a cache.
*/
@@ -375,7 +376,7 @@ struct kmem_cache {
unsigned int batchcount;
unsigned int limit;
unsigned int shared;
- unsigned int objsize;
+ unsigned int buffer_size;
/* 2) touched by every alloc & free from the backend */
struct kmem_list3 *nodelists[MAX_NUMNODES];
unsigned int flags; /* constant flags */
@@ -391,16 +392,15 @@ struct kmem_cache {
size_t colour; /* cache colouring range */
unsigned int colour_off; /* colour offset */
- unsigned int colour_next; /* cache colouring */
- kmem_cache_t *slabp_cache;
+ struct kmem_cache *slabp_cache;
unsigned int slab_size;
unsigned int dflags; /* dynamic flags */
/* constructor func */
- void (*ctor) (void *, kmem_cache_t *, unsigned long);
+ void (*ctor) (void *, struct kmem_cache *, unsigned long);
/* de-constructor func */
- void (*dtor) (void *, kmem_cache_t *, unsigned long);
+ void (*dtor) (void *, struct kmem_cache *, unsigned long);
/* 4) cache creation/removal */
const char *name;
@@ -423,8 +423,14 @@ struct kmem_cache {
atomic_t freemiss;
#endif
#if DEBUG
- int dbghead;
- int reallen;
+ /*
+ * If debugging is enabled, then the allocator can add additional
+ * fields and/or padding to every object. buffer_size contains the total
+ * object size including these internal fields, the following two
+ * variables contain the offset to the user object and its size.
+ */
+ int obj_offset;
+ int obj_size;
#endif
};
@@ -495,50 +501,50 @@ struct kmem_cache {
/* memory layout of objects:
* 0 : objp
- * 0 .. cachep->dbghead - BYTES_PER_WORD - 1: padding. This ensures that
+ * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
* the end of an object is aligned with the end of the real
* allocation. Catches writes behind the end of the allocation.
- * cachep->dbghead - BYTES_PER_WORD .. cachep->dbghead - 1:
+ * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
* redzone word.
- * cachep->dbghead: The real object.
- * cachep->objsize - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
- * cachep->objsize - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
+ * cachep->obj_offset: The real object.
+ * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
+ * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address [BYTES_PER_WORD long]
*/
-static int obj_dbghead(kmem_cache_t *cachep)
+static int obj_offset(struct kmem_cache *cachep)
{
- return cachep->dbghead;
+ return cachep->obj_offset;
}
-static int obj_reallen(kmem_cache_t *cachep)
+static int obj_size(struct kmem_cache *cachep)
{
- return cachep->reallen;
+ return cachep->obj_size;
}
-static unsigned long *dbg_redzone1(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
- return (unsigned long*) (objp+obj_dbghead(cachep)-BYTES_PER_WORD);
+ return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
}
-static unsigned long *dbg_redzone2(kmem_cache_t *cachep, void *objp)
+static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
if (cachep->flags & SLAB_STORE_USER)
- return (unsigned long *)(objp + cachep->objsize -
+ return (unsigned long *)(objp + cachep->buffer_size -
2 * BYTES_PER_WORD);
- return (unsigned long *)(objp + cachep->objsize - BYTES_PER_WORD);
+ return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
-static void **dbg_userword(kmem_cache_t *cachep, void *objp)
+static void **dbg_userword(struct kmem_cache *cachep, void *objp)
{
BUG_ON(!(cachep->flags & SLAB_STORE_USER));
- return (void **)(objp + cachep->objsize - BYTES_PER_WORD);
+ return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
}
#else
-#define obj_dbghead(x) 0
-#define obj_reallen(cachep) (cachep->objsize)
+#define obj_offset(x) 0
+#define obj_size(cachep) (cachep->buffer_size)
#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
@@ -591,6 +597,18 @@ static inline struct slab *page_get_slab(struct page *page)
return (struct slab *)page->lru.prev;
}
+static inline struct kmem_cache *virt_to_cache(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_cache(page);
+}
+
+static inline struct slab *virt_to_slab(const void *obj)
+{
+ struct page *page = virt_to_page(obj);
+ return page_get_slab(page);
+}
+
/* These are the default caches for kmalloc. Custom caches can have other sizes. */
struct cache_sizes malloc_sizes[] = {
#define CACHE(x) { .cs_size = (x) },
@@ -619,16 +637,16 @@ static struct arraycache_init initarray_generic =
{ {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
/* internal cache of cache description objs */
-static kmem_cache_t cache_cache = {
+static struct kmem_cache cache_cache = {
.batchcount = 1,
.limit = BOOT_CPUCACHE_ENTRIES,
.shared = 1,
- .objsize = sizeof(kmem_cache_t),
+ .buffer_size = sizeof(struct kmem_cache),
.flags = SLAB_NO_REAP,
.spinlock = SPIN_LOCK_UNLOCKED,
.name = "kmem_cache",
#if DEBUG
- .reallen = sizeof(kmem_cache_t),
+ .obj_size = sizeof(struct kmem_cache),
#endif
};
@@ -657,17 +675,17 @@ static enum {
static DEFINE_PER_CPU(struct work_struct, reap_work);
-static void free_block(kmem_cache_t *cachep, void **objpp, int len, int node);
-static void enable_cpucache(kmem_cache_t *cachep);
+static void free_block(struct kmem_cache *cachep, void **objpp, int len, int node);
+static void enable_cpucache(struct kmem_cache *cachep);
static void cache_reap(void *unused);
-static int __node_shrink(kmem_cache_t *cachep, int node);
+static int __node_shrink(struct kmem_cache *cachep, int node);
-static inline struct array_cache *ac_data(kmem_cache_t *cachep)
+static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
{
return cachep->array[smp_processor_id()];
}
-static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
+static inline struct kmem_cache *__find_general_cachep(size_t size, gfp_t gfpflags)
{
struct cache_sizes *csizep = malloc_sizes;
@@ -691,43 +709,80 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, gfp_t gfpflags)
return csizep->cs_cachep;
}
-kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
+struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
{
return __find_general_cachep(size, gfpflags);
}
EXPORT_SYMBOL(kmem_find_general_cachep);
-/* Cal the num objs, wastage, and bytes left over for a given slab size. */
-static void cache_estimate(unsigned long gfporder, size_t size, size_t align,
- int flags, size_t *left_over, unsigned int *num)
+static size_t slab_mgmt_size(size_t nr_objs, size_t align)
{
- int i;
- size_t wastage = PAGE_SIZE << gfporder;
- size_t extra = 0;
- size_t base = 0;
+ return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
+}
- if (!(flags & CFLGS_OFF_SLAB)) {
- base = sizeof(struct slab);
- extra = sizeof(kmem_bufctl_t);
- }
- i = 0;
- while (i * size + ALIGN(base + i * extra, align) <= wastage)
- i++;
- if (i > 0)
- i--;
+/* Calculate the number of objects and left-over bytes for a given
+ buffer size. */
+static void cache_estimate(unsigned long gfporder, size_t buffer_size,
+ size_t align, int flags, size_t *left_over,
+ unsigned int *num)
+{
+ int nr_objs;
+ size_t mgmt_size;
+ size_t slab_size = PAGE_SIZE << gfporder;
+
+ /*
+ * The slab management structure can be either off the slab or
+ * on it. For the latter case, the memory allocated for a
+ * slab is used for:
+ *
+ * - The struct slab
+ * - One kmem_bufctl_t for each object
+ * - Padding to respect alignment of @align
+ * - @buffer_size bytes for each object
+ *
+ * If the slab management structure is off the slab, then the
+ * alignment will already be calculated into the size. Because
+ * the slabs are all pages aligned, the objects will be at the
+ * correct alignment when allocated.
+ */
+ if (flags & CFLGS_OFF_SLAB) {
+ mgmt_size = 0;
+ nr_objs = slab_size / buffer_size;
+
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
+ } else {
+ /*
+ * Ignore padding for the initial guess. The padding
+ * is at most @align-1 bytes, and @buffer_size is at
+ * least @align. In the worst case, this result will
+ * be one greater than the number of objects that fit
+ * into the memory allocation when taking the padding
+ * into account.
+ */
+ nr_objs = (slab_size - sizeof(struct slab)) /
+ (buffer_size + sizeof(kmem_bufctl_t));
+
+ /*
+ * This calculated number will be either the right
+ * amount, or one greater than what we want.
+ */
+ if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
+ > slab_size)
+ nr_objs--;
- if (i > SLAB_LIMIT)
- i = SLAB_LIMIT;
+ if (nr_objs > SLAB_LIMIT)
+ nr_objs = SLAB_LIMIT;
- *num = i;
- wastage -= i * size;
- wastage -= ALIGN(base + i * extra, align);
- *left_over = wastage;
+ mgmt_size = slab_mgmt_size(nr_objs, align);
+ }
+ *num = nr_objs;
+ *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
}
#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
-static void __slab_error(const char *function, kmem_cache_t *cachep, char *msg)
+static void __slab_error(const char *function, struct kmem_cache *cachep, char *msg)
{
printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
function, cachep->name, msg);
@@ -774,9 +829,9 @@ static struct array_cache *alloc_arraycache(int node, int entries,
}
#ifdef CONFIG_NUMA
-static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
+static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
-static inline struct array_cache **alloc_alien_cache(int node, int limit)
+static struct array_cache **alloc_alien_cache(int node, int limit)
{
struct array_cache **ac_ptr;
int memsize = sizeof(void *) * MAX_NUMNODES;
@@ -803,7 +858,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit)
return ac_ptr;
}
-static inline void free_alien_cache(struct array_cache **ac_ptr)
+static void free_alien_cache(struct array_cache **ac_ptr)
{
int i;
@@ -816,8 +871,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
kfree(ac_ptr);
}
-static inline void __drain_alien_cache(kmem_cache_t *cachep,
- struct array_cache *ac, int node)
+static void __drain_alien_cache(struct kmem_cache *cachep,
+ struct array_cache *ac, int node)
{
struct kmem_list3 *rl3 = cachep->nodelists[node];
@@ -829,14 +884,14 @@ static inline void __drain_alien_cache(kmem_cache_t *cachep,
}
}
-static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
+static void drain_alien_cache(struct kmem_cache *cachep, struct array_cache **alien)
{
int i = 0;
struct array_cache *ac;
unsigned long flags;
for_each_online_node(i) {
- ac = l3->alien[i];
+ ac = alien[i];
if (ac) {
spin_lock_irqsave(&ac->lock, flags);
__drain_alien_cache(cachep, ac, i);
@@ -845,16 +900,25 @@ static void drain_alien_cache(kmem_cache_t *cachep, struct kmem_list3 *l3)
}
}
#else
-#define alloc_alien_cache(node, limit) do { } while (0)
-#define free_alien_cache(ac_ptr) do { } while (0)
-#define drain_alien_cache(cachep, l3) do { } while (0)
+
+#define drain_alien_cache(cachep, alien) do { } while (0)
+
+static inline struct array_cache **alloc_alien_cache(int node, int limit)
+{
+ return (struct array_cache **) 0x01020304ul;
+}
+
+static inline void free_alien_cache(struct array_cache **ac_ptr)
+{
+}
+
#endif
static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
int memsize = sizeof(struct kmem_list3);
@@ -881,6 +945,11 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
+ /*
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
+ */
cachep->nodelists[node] = l3;
}
@@ -895,26 +964,46 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
& array cache's */
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
+ cachep->batchcount);
if (!nc)
goto bad;
+ shared = alloc_arraycache(node,
+ cachep->shared * cachep->batchcount,
+ 0xbaadf00d);
+ if (!shared)
+ goto bad;
+
+ alien = alloc_alien_cache(node, cachep->limit);
+ if (!alien)
+ goto bad;
cachep->array[cpu] = nc;
l3 = cachep->nodelists[node];
BUG_ON(!l3);
- if (!l3->shared) {
- if (!(nc = alloc_arraycache(node,
- cachep->shared *
- cachep->batchcount,
- 0xbaadf00d)))
- goto bad;
- /* we are serialised from CPU_DEAD or
- CPU_UP_CANCELLED by the cpucontrol lock */
- l3->shared = nc;
+ spin_lock_irq(&l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+ * CPU_UP_CANCELLED by the cpucontrol lock
+ */
+ l3->shared = shared;
+ shared = NULL;
+ }
+#ifdef CONFIG_NUMA
+ if (!l3->alien) {
+ l3->alien = alien;
+ alien = NULL;
}
+#endif
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ free_alien_cache(alien);
}
mutex_unlock(&cache_chain_mutex);
break;
@@ -923,25 +1012,34 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
+ /*
+ * Even if all the cpus of a node are down, we don't free the
+ * kmem_list3 of any cache. This to avoid a race between
+ * cpu_down, and a kmalloc allocation from another cpu for
+ * memory from the node of the cpu going down. The list3
+ * structure is usually allocated from kmem_cache_create() and
+ * gets destroyed at kmem_cache_destroy().
+ */
/* fall thru */
case CPU_UP_CANCELED:
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
cpumask_t mask;
mask = node_to_cpumask(node);
- spin_lock_irq(&cachep->spinlock);
/* cpu is dead; no one can alloc from it. */
nc = cachep->array[cpu];
cachep->array[cpu] = NULL;
l3 = cachep->nodelists[node];
if (!l3)
- goto unlock_cache;
+ goto free_array_cache;
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
/* Free limit for this kmem_list3 */
l3->free_limit -= cachep->batchcount;
@@ -949,34 +1047,44 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
free_block(cachep, nc->entry, nc->avail, node);
if (!cpus_empty(mask)) {
- spin_unlock(&l3->list_lock);
- goto unlock_cache;
+ spin_unlock_irq(&l3->list_lock);
+ goto free_array_cache;
}
- if (l3->shared) {
+ shared = l3->shared;
+ if (shared) {
free_block(cachep, l3->shared->entry,
l3->shared->avail, node);
- kfree(l3->shared);
l3->shared = NULL;
}
- if (l3->alien) {
- drain_alien_cache(cachep, l3);
- free_alien_cache(l3->alien);
- l3->alien = NULL;
- }
- /* free slabs belonging to this node */
- if (__node_shrink(cachep, node)) {
- cachep->nodelists[node] = NULL;
- spin_unlock(&l3->list_lock);
- kfree(l3);
- } else {
- spin_unlock(&l3->list_lock);
+ alien = l3->alien;
+ l3->alien = NULL;
+
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+ drain_alien_cache(cachep, alien);
+ free_alien_cache(alien);
}
- unlock_cache:
- spin_unlock_irq(&cachep->spinlock);
+free_array_cache:
kfree(nc);
}
+ /*
+ * In the previous loop, all the objects were freed to
+ * the respective cache's slabs, now we can go ahead and
+ * shrink each nodelist to its limit.
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+ spin_lock_irq(&l3->list_lock);
+ /* free slabs belonging to this node */
+ __node_shrink(cachep, node);
+ spin_unlock_irq(&l3->list_lock);
+ }
mutex_unlock(&cache_chain_mutex);
break;
#endif
@@ -992,7 +1100,7 @@ static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
/*
* swap the static kmem_list3 with kmalloced memory
*/
-static void init_list(kmem_cache_t *cachep, struct kmem_list3 *list, int nodeid)
+static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, int nodeid)
{
struct kmem_list3 *ptr;
@@ -1032,14 +1140,14 @@ void __init kmem_cache_init(void)
/* Bootstrap is tricky, because several objects are allocated
* from caches that do not exist yet:
- * 1) initialize the cache_cache cache: it contains the kmem_cache_t
+ * 1) initialize the cache_cache cache: it contains the struct kmem_cache
* structures of all caches, except cache_cache itself: cache_cache
* is statically allocated.
* Initially an __init data area is used for the head array and the
* kmem_list3 structures, it's replaced with a kmalloc allocated
* array at the end of the bootstrap.
* 2) Create the first kmalloc cache.
- * The kmem_cache_t for the new cache is allocated normally.
+ * The struct kmem_cache for the new cache is allocated normally.
* An __init data area is used for the head array.
* 3) Create the remaining kmalloc caches, with minimally sized
* head arrays.
@@ -1057,15 +1165,14 @@ void __init kmem_cache_init(void)
cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
- cache_cache.objsize = ALIGN(cache_cache.objsize, cache_line_size());
+ cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, cache_line_size());
- cache_estimate(0, cache_cache.objsize, cache_line_size(), 0,
+ cache_estimate(0, cache_cache.buffer_size, cache_line_size(), 0,
&left_over, &cache_cache.num);
if (!cache_cache.num)
BUG();
cache_cache.colour = left_over / cache_cache.colour_off;
- cache_cache.colour_next = 0;
cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
sizeof(struct slab), cache_line_size());
@@ -1132,8 +1239,8 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable();
- BUG_ON(ac_data(&cache_cache) != &initarray_cache.cache);
- memcpy(ptr, ac_data(&cache_cache),
+ BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
+ memcpy(ptr, cpu_cache_get(&cache_cache),
sizeof(struct arraycache_init));
cache_cache.array[smp_processor_id()] = ptr;
local_irq_enable();
@@ -1141,9 +1248,9 @@ void __init kmem_cache_init(void)
ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
local_irq_disable();
- BUG_ON(ac_data(malloc_sizes[INDEX_AC].cs_cachep)
+ BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
!= &initarray_generic.cache);
- memcpy(ptr, ac_data(malloc_sizes[INDEX_AC].cs_cachep),
+ memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
sizeof(struct arraycache_init));
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
ptr;
@@ -1170,7 +1277,7 @@ void __init kmem_cache_init(void)
/* 6) resize the head arrays to their final sizes */
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
enable_cpucache(cachep);
@@ -1181,7 +1288,7 @@ void __init kmem_cache_init(void)
g_cpucache_up = FULL;
/* Register a cpu startup notifier callback
- * that initializes ac_data for all new cpus
+ * that initializes cpu_cache_get for all new cpus
*/
register_cpu_notifier(&cpucache_notifier);
@@ -1213,7 +1320,7 @@ __initcall(cpucache_init);
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct page *page;
void *addr;
@@ -1239,7 +1346,7 @@ static void *kmem_getpages(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
* Interface to system's page release.
*/
-static void kmem_freepages(kmem_cache_t *cachep, void *addr)
+static void kmem_freepages(struct kmem_cache *cachep, void *addr)
{
unsigned long i = (1 << cachep->gfporder);
struct page *page = virt_to_page(addr);
@@ -1261,7 +1368,7 @@ static void kmem_freepages(kmem_cache_t *cachep, void *addr)
static void kmem_rcu_free(struct rcu_head *head)
{
struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
- kmem_cache_t *cachep = slab_rcu->cachep;
+ struct kmem_cache *cachep = slab_rcu->cachep;
kmem_freepages(cachep, slab_rcu->addr);
if (OFF_SLAB(cachep))
@@ -1271,12 +1378,12 @@ static void kmem_rcu_free(struct rcu_head *head)
#if DEBUG
#ifdef CONFIG_DEBUG_PAGEALLOC
-static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
+static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
unsigned long caller)
{
- int size = obj_reallen(cachep);
+ int size = obj_size(cachep);
- addr = (unsigned long *)&((char *)addr)[obj_dbghead(cachep)];
+ addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
if (size < 5 * sizeof(unsigned long))
return;
@@ -1304,10 +1411,10 @@ static void store_stackinfo(kmem_cache_t *cachep, unsigned long *addr,
}
#endif
-static void poison_obj(kmem_cache_t *cachep, void *addr, unsigned char val)
+static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
{
- int size = obj_reallen(cachep);
- addr = &((char *)addr)[obj_dbghead(cachep)];
+ int size = obj_size(cachep);
+ addr = &((char *)addr)[obj_offset(cachep)];
memset(addr, val, size);
*(unsigned char *)(addr + size - 1) = POISON_END;
@@ -1326,7 +1433,7 @@ static void dump_line(char *data, int offset, int limit)
#if DEBUG
-static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
+static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
{
int i, size;
char *realobj;
@@ -1344,8 +1451,8 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
(unsigned long)*dbg_userword(cachep, objp));
printk("\n");
}
- realobj = (char *)objp + obj_dbghead(cachep);
- size = obj_reallen(cachep);
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
for (i = 0; i < size && lines; i += 16, lines--) {
int limit;
limit = 16;
@@ -1355,14 +1462,14 @@ static void print_objinfo(kmem_cache_t *cachep, void *objp, int lines)
}
}
-static void check_poison_obj(kmem_cache_t *cachep, void *objp)
+static void check_poison_obj(struct kmem_cache *cachep, void *objp)
{
char *realobj;
int size, i;
int lines = 0;
- realobj = (char *)objp + obj_dbghead(cachep);
- size = obj_reallen(cachep);
+ realobj = (char *)objp + obj_offset(cachep);
+ size = obj_size(cachep);
for (i = 0; i < size; i++) {
char exp = POISON_FREE;
@@ -1395,20 +1502,20 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
/* Print some data about the neighboring objects, if they
* exist:
*/
- struct slab *slabp = page_get_slab(virt_to_page(objp));
+ struct slab *slabp = virt_to_slab(objp);
int objnr;
- objnr = (objp - slabp->s_mem) / cachep->objsize;
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
if (objnr) {
- objp = slabp->s_mem + (objnr - 1) * cachep->objsize;
- realobj = (char *)objp + obj_dbghead(cachep);
+ objp = slabp->s_mem + (objnr - 1) * cachep->buffer_size;
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
}
if (objnr + 1 < cachep->num) {
- objp = slabp->s_mem + (objnr + 1) * cachep->objsize;
- realobj = (char *)objp + obj_dbghead(cachep);
+ objp = slabp->s_mem + (objnr + 1) * cachep->buffer_size;
+ realobj = (char *)objp + obj_offset(cachep);
printk(KERN_ERR "Next obj: start=%p, len=%d\n",
realobj, size);
print_objinfo(cachep, objp, 2);
@@ -1417,25 +1524,23 @@ static void check_poison_obj(kmem_cache_t *cachep, void *objp)
}
#endif
-/* Destroy all the objs in a slab, and release the mem back to the system.
- * Before calling the slab must have been unlinked from the cache.
- * The cache-lock is not held/needed.
+#if DEBUG
+/**
+ * slab_destroy_objs - call the registered destructor for each object in
+ * a slab that is to be destroyed.
*/
-static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
{
- void *addr = slabp->s_mem - slabp->colouroff;
-
-#if DEBUG
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0
+ if ((cachep->buffer_size % PAGE_SIZE) == 0
&& OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE,
+ cachep->buffer_size / PAGE_SIZE,
1);
else
check_poison_obj(cachep, objp);
@@ -1452,18 +1557,32 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
"was overwritten");
}
if (cachep->dtor && !(cachep->flags & SLAB_POISON))
- (cachep->dtor) (objp + obj_dbghead(cachep), cachep, 0);
+ (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
}
+}
#else
+static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
+{
if (cachep->dtor) {
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
(cachep->dtor) (objp, cachep, 0);
}
}
+}
#endif
+/**
+ * Destroy all the objs in a slab, and release the mem back to the system.
+ * Before calling the slab must have been unlinked from the cache.
+ * The cache-lock is not held/needed.
+ */
+static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
+{
+ void *addr = slabp->s_mem - slabp->colouroff;
+
+ slab_destroy_objs(cachep, slabp);
if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
struct slab_rcu *slab_rcu;
@@ -1478,9 +1597,9 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp)
}
}
-/* For setting up all the kmem_list3s for cache whose objsize is same
+/* For setting up all the kmem_list3s for cache whose buffer_size is same
as size of kmem_list3. */
-static inline void set_up_list3s(kmem_cache_t *cachep, int index)
+static void set_up_list3s(struct kmem_cache *cachep, int index)
{
int node;
@@ -1493,15 +1612,20 @@ static inline void set_up_list3s(kmem_cache_t *cachep, int index)
}
/**
- * calculate_slab_order - calculate size (page order) of slabs and the number
- * of objects per slab.
+ * calculate_slab_order - calculate size (page order) of slabs
+ * @cachep: pointer to the cache that is being created
+ * @size: size of objects to be created in this cache.
+ * @align: required alignment for the objects.
+ * @flags: slab allocation flags
+ *
+ * Also calculates the number of objects per slab.
*
* This could be made much more intelligent. For now, try to avoid using
* high order pages for slabs. When the gfp() functions are more friendly
* towards high-order requests, this should be changed.
*/
-static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
- size_t align, gfp_t flags)
+static inline size_t calculate_slab_order(struct kmem_cache *cachep,
+ size_t size, size_t align, unsigned long flags)
{
size_t left_over = 0;
@@ -1572,13 +1696,13 @@ static inline size_t calculate_slab_order(kmem_cache_t *cachep, size_t size,
* cacheline. This can be beneficial if you're counting cycles as closely
* as davem.
*/
-kmem_cache_t *
+struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
- unsigned long flags, void (*ctor)(void*, kmem_cache_t *, unsigned long),
- void (*dtor)(void*, kmem_cache_t *, unsigned long))
+ unsigned long flags, void (*ctor)(void*, struct kmem_cache *, unsigned long),
+ void (*dtor)(void*, struct kmem_cache *, unsigned long))
{
size_t left_over, slab_size, ralign;
- kmem_cache_t *cachep = NULL;
+ struct kmem_cache *cachep = NULL;
struct list_head *p;
/*
@@ -1596,7 +1720,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
- kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
+ struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
mm_segment_t old_fs = get_fs();
char tmp;
int res;
@@ -1611,7 +1735,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
set_fs(old_fs);
if (res) {
printk("SLAB: cache with size %d has lost its name\n",
- pc->objsize);
+ pc->buffer_size);
continue;
}
@@ -1696,20 +1820,20 @@ kmem_cache_create (const char *name, size_t size, size_t align,
align = ralign;
/* Get cache's description obj. */
- cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
+ cachep = kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
goto oops;
- memset(cachep, 0, sizeof(kmem_cache_t));
+ memset(cachep, 0, sizeof(struct kmem_cache));
#if DEBUG
- cachep->reallen = size;
+ cachep->obj_size = size;
if (flags & SLAB_RED_ZONE) {
/* redzoning only works with word aligned caches */
align = BYTES_PER_WORD;
/* add space for red zone words */
- cachep->dbghead += BYTES_PER_WORD;
+ cachep->obj_offset += BYTES_PER_WORD;
size += 2 * BYTES_PER_WORD;
}
if (flags & SLAB_STORE_USER) {
@@ -1722,8 +1846,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
}
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
- && cachep->reallen > cache_line_size() && size < PAGE_SIZE) {
- cachep->dbghead += PAGE_SIZE - size;
+ && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
+ cachep->obj_offset += PAGE_SIZE - size;
size = PAGE_SIZE;
}
#endif
@@ -1786,7 +1910,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
spin_lock_init(&cachep->spinlock);
- cachep->objsize = size;
+ cachep->buffer_size = size;
if (flags & CFLGS_OFF_SLAB)
cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
@@ -1843,11 +1967,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
- BUG_ON(!ac_data(cachep));
- ac_data(cachep)->avail = 0;
- ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
- ac_data(cachep)->batchcount = 1;
- ac_data(cachep)->touched = 0;
+ BUG_ON(!cpu_cache_get(cachep));
+ cpu_cache_get(cachep)->avail = 0;
+ cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
+ cpu_cache_get(cachep)->batchcount = 1;
+ cpu_cache_get(cachep)->touched = 0;
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
}
@@ -1875,7 +1999,7 @@ static void check_irq_on(void)
BUG_ON(irqs_disabled());
}
-static void check_spinlock_acquired(kmem_cache_t *cachep)
+static void check_spinlock_acquired(struct kmem_cache *cachep)
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -1883,7 +2007,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep)
#endif
}
-static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node)
+static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -1916,45 +2040,43 @@ static void smp_call_function_all_cpus(void (*func)(void *arg), void *arg)
preempt_enable();
}
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node);
static void do_drain(void *arg)
{
- kmem_cache_t *cachep = (kmem_cache_t *) arg;
+ struct kmem_cache *cachep = (struct kmem_cache *) arg;
struct array_cache *ac;
int node = numa_node_id();
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
spin_lock(&cachep->nodelists[node]->list_lock);
free_block(cachep, ac->entry, ac->avail, node);
spin_unlock(&cachep->nodelists[node]->list_lock);
ac->avail = 0;
}
-static void drain_cpu_caches(kmem_cache_t *cachep)
+static void drain_cpu_caches(struct kmem_cache *cachep)
{
struct kmem_list3 *l3;
int node;
smp_call_function_all_cpus(do_drain, cachep);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
for_each_online_node(node) {
l3 = cachep->nodelists[node];
if (l3) {
- spin_lock(&l3->list_lock);
+ spin_lock_irq(&l3->list_lock);
drain_array_locked(cachep, l3->shared, 1, node);
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
if (l3->alien)
- drain_alien_cache(cachep, l3);
+ drain_alien_cache(cachep, l3->alien);
}
}
- spin_unlock_irq(&cachep->spinlock);
}
-static int __node_shrink(kmem_cache_t *cachep, int node)
+static int __node_shrink(struct kmem_cache *cachep, int node)
{
struct slab *slabp;
struct kmem_list3 *l3 = cachep->nodelists[node];
@@ -1983,7 +2105,7 @@ static int __node_shrink(kmem_cache_t *cachep, int node)
return ret;
}
-static int __cache_shrink(kmem_cache_t *cachep)
+static int __cache_shrink(struct kmem_cache *cachep)
{
int ret = 0, i = 0;
struct kmem_list3 *l3;
@@ -2009,7 +2131,7 @@ static int __cache_shrink(kmem_cache_t *cachep)
* Releases as many slabs as possible for a cache.
* To help debugging, a zero exit status indicates all slabs were released.
*/
-int kmem_cache_shrink(kmem_cache_t *cachep)
+int kmem_cache_shrink(struct kmem_cache *cachep)
{
if (!cachep || in_interrupt())
BUG();
@@ -2022,7 +2144,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* kmem_cache_destroy - delete a cache
* @cachep: the cache to destroy
*
- * Remove a kmem_cache_t object from the slab cache.
+ * Remove a struct kmem_cache object from the slab cache.
* Returns 0 on success.
*
* It is expected this function will be called by a module when it is
@@ -2035,7 +2157,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
* The caller must guarantee that noone will allocate memory from the cache
* during the kmem_cache_destroy().
*/
-int kmem_cache_destroy(kmem_cache_t *cachep)
+int kmem_cache_destroy(struct kmem_cache *cachep)
{
int i;
struct kmem_list3 *l3;
@@ -2086,7 +2208,7 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
EXPORT_SYMBOL(kmem_cache_destroy);
/* Get the memory for a slab management obj. */
-static struct slab *alloc_slabmgmt(kmem_cache_t *cachep, void *objp,
+static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
int colour_off, gfp_t local_flags)
{
struct slab *slabp;
@@ -2112,13 +2234,13 @@ static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
return (kmem_bufctl_t *) (slabp + 1);
}
-static void cache_init_objs(kmem_cache_t *cachep,
+static void cache_init_objs(struct kmem_cache *cachep,
struct slab *slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i < cachep->num; i++) {
- void *objp = slabp->s_mem + cachep->objsize * i;
+ void *objp = slabp->s_mem + cachep->buffer_size * i;
#if DEBUG
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
@@ -2136,7 +2258,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
* Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
- cachep->ctor(objp + obj_dbghead(cachep), cachep,
+ cachep->ctor(objp + obj_offset(cachep), cachep,
ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
@@ -2147,10 +2269,10 @@ static void cache_init_objs(kmem_cache_t *cachep,
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)
&& cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 0);
+ cachep->buffer_size / PAGE_SIZE, 0);
#else
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
@@ -2161,7 +2283,7 @@ static void cache_init_objs(kmem_cache_t *cachep,
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
+static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -2172,7 +2294,43 @@ static void kmem_flagcheck(kmem_cache_t *cachep, gfp_t flags)
}
}
-static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
+static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp, int nodeid)
+{
+ void *objp = slabp->s_mem + (slabp->free * cachep->buffer_size);
+ kmem_bufctl_t next;
+
+ slabp->inuse++;
+ next = slab_bufctl(slabp)[slabp->free];
+#if DEBUG
+ slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
+ WARN_ON(slabp->nodeid != nodeid);
+#endif
+ slabp->free = next;
+
+ return objp;
+}
+
+static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp, void *objp,
+ int nodeid)
+{
+ unsigned int objnr = (unsigned)(objp-slabp->s_mem) / cachep->buffer_size;
+
+#if DEBUG
+ /* Verify that the slab belongs to the intended node */
+ WARN_ON(slabp->nodeid != nodeid);
+
+ if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
+ printk(KERN_ERR "slab: double free detected in cache "
+ "'%s', objp %p\n", cachep->name, objp);
+ BUG();
+ }
+#endif
+ slab_bufctl(slabp)[objnr] = slabp->free;
+ slabp->free = objnr;
+ slabp->inuse--;
+}
+
+static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp, void *objp)
{
int i;
struct page *page;
@@ -2191,7 +2349,7 @@ static void set_slab_attr(kmem_cache_t *cachep, struct slab *slabp, void *objp)
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct slab *slabp;
void *objp;
@@ -2217,20 +2375,20 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
*/
ctor_flags |= SLAB_CTOR_ATOMIC;
- /* About to mess with non-constant members - lock. */
+ /* Take the l3 list lock to change the colour_next on this node */
check_irq_off();
- spin_lock(&cachep->spinlock);
+ l3 = cachep->nodelists[nodeid];
+ spin_lock(&l3->list_lock);
/* Get colour for the slab, and cal the next value. */
- offset = cachep->colour_next;
- cachep->colour_next++;
- if (cachep->colour_next >= cachep->colour)
- cachep->colour_next = 0;
- offset *= cachep->colour_off;
+ offset = l3->colour_next;
+ l3->colour_next++;
+ if (l3->colour_next >= cachep->colour)
+ l3->colour_next = 0;
+ spin_unlock(&l3->list_lock);
- spin_unlock(&cachep->spinlock);
+ offset *= cachep->colour_off;
- check_irq_off();
if (local_flags & __GFP_WAIT)
local_irq_enable();
@@ -2260,7 +2418,6 @@ static int cache_grow(kmem_cache_t *cachep, gfp_t flags, int nodeid)
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
- l3 = cachep->nodelists[nodeid];
spin_lock(&l3->list_lock);
/* Make slab active. */
@@ -2302,14 +2459,14 @@ static void kfree_debugcheck(const void *objp)
}
}
-static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
+static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
void *caller)
{
struct page *page;
unsigned int objnr;
struct slab *slabp;
- objp -= obj_dbghead(cachep);
+ objp -= obj_offset(cachep);
kfree_debugcheck(objp);
page = virt_to_page(objp);
@@ -2341,31 +2498,31 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = caller;
- objnr = (objp - slabp->s_mem) / cachep->objsize;
+ objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
BUG_ON(objnr >= cachep->num);
- BUG_ON(objp != slabp->s_mem + objnr * cachep->objsize);
+ BUG_ON(objp != slabp->s_mem + objnr * cachep->buffer_size);
if (cachep->flags & SLAB_DEBUG_INITIAL) {
/* Need to call the slab's constructor so the
* caller can perform a verify of its state (debugging).
* Called without the cache-lock held.
*/
- cachep->ctor(objp + obj_dbghead(cachep),
+ cachep->ctor(objp + obj_offset(cachep),
cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
}
if (cachep->flags & SLAB_POISON && cachep->dtor) {
/* we want to cache poison the object,
* call the destruction callback
*/
- cachep->dtor(objp + obj_dbghead(cachep), cachep, 0);
+ cachep->dtor(objp + obj_offset(cachep), cachep, 0);
}
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep)) {
store_stackinfo(cachep, objp, (unsigned long)caller);
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 0);
+ cachep->buffer_size / PAGE_SIZE, 0);
} else {
poison_obj(cachep, objp, POISON_FREE);
}
@@ -2376,7 +2533,7 @@ static void *cache_free_debugcheck(kmem_cache_t *cachep, void *objp,
return objp;
}
-static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
+static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
{
kmem_bufctl_t i;
int entries = 0;
@@ -2409,14 +2566,14 @@ static void check_slabp(kmem_cache_t *cachep, struct slab *slabp)
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
+static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
retry:
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -2461,22 +2618,12 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
while (slabp->inuse < cachep->num && batchcount--) {
- kmem_bufctl_t next;
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
- /* get obj pointer */
- ac->entry[ac->avail++] = slabp->s_mem +
- slabp->free * cachep->objsize;
-
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
- WARN_ON(numa_node_id() != slabp->nodeid);
-#endif
- slabp->free = next;
+ ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
+ numa_node_id());
}
check_slabp(cachep, slabp);
@@ -2498,7 +2645,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
x = cache_grow(cachep, flags, numa_node_id());
// cache_grow can reenable interrupts, then ac could change.
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
if (!x && ac->avail == 0) // no objects in sight? abort
return NULL;
@@ -2510,7 +2657,7 @@ static void *cache_alloc_refill(kmem_cache_t *cachep, gfp_t flags)
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
+cache_alloc_debugcheck_before(struct kmem_cache *cachep, gfp_t flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
@@ -2519,16 +2666,16 @@ cache_alloc_debugcheck_before(kmem_cache_t *cachep, gfp_t flags)
}
#if DEBUG
-static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
+static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, gfp_t flags,
void *objp, void *caller)
{
if (!objp)
return objp;
if (cachep->flags & SLAB_POISON) {
#ifdef CONFIG_DEBUG_PAGEALLOC
- if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
+ if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
kernel_map_pages(virt_to_page(objp),
- cachep->objsize / PAGE_SIZE, 1);
+ cachep->buffer_size / PAGE_SIZE, 1);
else
check_poison_obj(cachep, objp);
#else
@@ -2553,7 +2700,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
*dbg_redzone1(cachep, objp) = RED_ACTIVE;
*dbg_redzone2(cachep, objp) = RED_ACTIVE;
}
- objp += obj_dbghead(cachep);
+ objp += obj_offset(cachep);
if (cachep->ctor && cachep->flags & SLAB_POISON) {
unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
@@ -2568,7 +2715,7 @@ static void *cache_alloc_debugcheck_after(kmem_cache_t *cachep, gfp_t flags,
#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
#endif
-static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
void *objp;
struct array_cache *ac;
@@ -2583,7 +2730,7 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
#endif
check_irq_off();
- ac = ac_data(cachep);
+ ac = cpu_cache_get(cachep);
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
ac->touched = 1;
@@ -2595,7 +2742,8 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
return objp;
}
-static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+static __always_inline void *
+__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
unsigned long save_flags;
void *objp;
@@ -2606,7 +2754,7 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
objp = ____cache_alloc(cachep, flags);
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp,
- __builtin_return_address(0));
+ caller);
prefetchw(objp);
return objp;
}
@@ -2615,19 +2763,19 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, gfp_t flags)
/*
* A interface to enable slab creation on nodeid
*/
-static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
struct list_head *entry;
struct slab *slabp;
struct kmem_list3 *l3;
void *obj;
- kmem_bufctl_t next;
int x;
l3 = cachep->nodelists[nodeid];
BUG_ON(!l3);
retry:
+ check_irq_off();
spin_lock(&l3->list_lock);
entry = l3->slabs_partial.next;
if (entry == &l3->slabs_partial) {
@@ -2647,14 +2795,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
BUG_ON(slabp->inuse == cachep->num);
- /* get obj pointer */
- obj = slabp->s_mem + slabp->free * cachep->objsize;
- slabp->inuse++;
- next = slab_bufctl(slabp)[slabp->free];
-#if DEBUG
- slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
-#endif
- slabp->free = next;
+ obj = slab_get_obj(cachep, slabp, nodeid);
check_slabp(cachep, slabp);
l3->free_objects--;
/* move slabp to correct slabp list: */
@@ -2685,7 +2826,7 @@ static void *__cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
/*
* Caller needs to acquire correct kmem_list's list_lock
*/
-static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
+static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node)
{
int i;
@@ -2694,29 +2835,14 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
for (i = 0; i < nr_objects; i++) {
void *objp = objpp[i];
struct slab *slabp;
- unsigned int objnr;
- slabp = page_get_slab(virt_to_page(objp));
+ slabp = virt_to_slab(objp);
l3 = cachep->nodelists[node];
list_del(&slabp->list);
- objnr = (objp - slabp->s_mem) / cachep->objsize;
check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
-
-#if DEBUG
- /* Verify that the slab belongs to the intended node */
- WARN_ON(slabp->nodeid != node);
-
- if (slab_bufctl(slabp)[objnr] != BUFCTL_FREE) {
- printk(KERN_ERR "slab: double free detected in cache "
- "'%s', objp %p\n", cachep->name, objp);
- BUG();
- }
-#endif
- slab_bufctl(slabp)[objnr] = slabp->free;
- slabp->free = objnr;
+ slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep);
- slabp->inuse--;
l3->free_objects++;
check_slabp(cachep, slabp);
@@ -2738,7 +2864,7 @@ static void free_block(kmem_cache_t *cachep, void **objpp, int nr_objects,
}
}
-static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
+static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
@@ -2797,9 +2923,9 @@ static void cache_flusharray(kmem_cache_t *cachep, struct array_cache *ac)
*
* Called with disabled ints.
*/
-static inline void __cache_free(kmem_cache_t *cachep, void *objp)
+static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
- struct array_cache *ac = ac_data(cachep);
+ struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
@@ -2810,7 +2936,7 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
#ifdef CONFIG_NUMA
{
struct slab *slabp;
- slabp = page_get_slab(virt_to_page(objp));
+ slabp = virt_to_slab(objp);
if (unlikely(slabp->nodeid != numa_node_id())) {
struct array_cache *alien = NULL;
int nodeid = slabp->nodeid;
@@ -2856,9 +2982,9 @@ static inline void __cache_free(kmem_cache_t *cachep, void *objp)
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags)
+void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, __builtin_return_address(0));
}
EXPORT_SYMBOL(kmem_cache_alloc);
@@ -2876,12 +3002,12 @@ EXPORT_SYMBOL(kmem_cache_alloc);
*
* Currently only used for dentry validation.
*/
-int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
+int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
{
unsigned long addr = (unsigned long)ptr;
unsigned long min_addr = PAGE_OFFSET;
unsigned long align_mask = BYTES_PER_WORD - 1;
- unsigned long size = cachep->objsize;
+ unsigned long size = cachep->buffer_size;
struct page *page;
if (unlikely(addr < min_addr))
@@ -2917,32 +3043,23 @@ int fastcall kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)
* New and improved: it will now make sure that the object gets
* put on the correct node list so that there is no false sharing.
*/
-void *kmem_cache_alloc_node(kmem_cache_t *cachep, gfp_t flags, int nodeid)
+void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
unsigned long save_flags;
void *ptr;
- if (nodeid == -1)
- return __cache_alloc(cachep, flags);
-
- if (unlikely(!cachep->nodelists[nodeid])) {
- /* Fall back to __cache_alloc if we run into trouble */
- printk(KERN_WARNING
- "slab: not allocating in inactive node %d for cache %s\n",
- nodeid, cachep->name);
- return __cache_alloc(cachep, flags);
- }
-
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
- if (nodeid == numa_node_id())
+
+ if (nodeid == -1 || nodeid == numa_node_id() ||
+ !cachep->nodelists[nodeid])
ptr = ____cache_alloc(cachep, flags);
else
ptr = __cache_alloc_node(cachep, flags, nodeid);
local_irq_restore(save_flags);
- ptr =
- cache_alloc_debugcheck_after(cachep, flags, ptr,
- __builtin_return_address(0));
+
+ ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
+ __builtin_return_address(0));
return ptr;
}
@@ -2950,7 +3067,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
void *kmalloc_node(size_t size, gfp_t flags, int node)
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
cachep = kmem_find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
@@ -2981,9 +3098,10 @@ EXPORT_SYMBOL(kmalloc_node);
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void *__kmalloc(size_t size, gfp_t flags)
+static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
+ void *caller)
{
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
/* If you want to save a few bytes .text space: replace
* __ with kmem_.
@@ -2993,10 +3111,27 @@ void *__kmalloc(size_t size, gfp_t flags)
cachep = __find_general_cachep(size, flags);
if (unlikely(cachep == NULL))
return NULL;
- return __cache_alloc(cachep, flags);
+ return __cache_alloc(cachep, flags, caller);
+}
+
+#ifndef CONFIG_DEBUG_SLAB
+
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ return __do_kmalloc(size, flags, NULL);
}
EXPORT_SYMBOL(__kmalloc);
+#else
+
+void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
+{
+ return __do_kmalloc(size, flags, caller);
+}
+EXPORT_SYMBOL(__kmalloc_track_caller);
+
+#endif
+
#ifdef CONFIG_SMP
/**
* __alloc_percpu - allocate one copy of the object for every present
@@ -3054,7 +3189,7 @@ EXPORT_SYMBOL(__alloc_percpu);
* Free an object which was previously allocated from this
* cache.
*/
-void kmem_cache_free(kmem_cache_t *cachep, void *objp)
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
@@ -3075,15 +3210,15 @@ EXPORT_SYMBOL(kmem_cache_free);
*/
void kfree(const void *objp)
{
- kmem_cache_t *c;
+ struct kmem_cache *c;
unsigned long flags;
if (unlikely(!objp))
return;
local_irq_save(flags);
kfree_debugcheck(objp);
- c = page_get_cache(virt_to_page(objp));
- mutex_debug_check_no_locks_freed(objp, obj_reallen(c));
+ c = virt_to_cache(objp);
+ mutex_debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
@@ -3112,13 +3247,13 @@ void free_percpu(const void *objp)
EXPORT_SYMBOL(free_percpu);
#endif
-unsigned int kmem_cache_size(kmem_cache_t *cachep)
+unsigned int kmem_cache_size(struct kmem_cache *cachep)
{
- return obj_reallen(cachep);
+ return obj_size(cachep);
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(kmem_cache_t *cachep)
+const char *kmem_cache_name(struct kmem_cache *cachep)
{
return cachep->name;
}
@@ -3127,7 +3262,7 @@ EXPORT_SYMBOL_GPL(kmem_cache_name);
/*
* This initializes kmem_list3 for all nodes.
*/
-static int alloc_kmemlist(kmem_cache_t *cachep)
+static int alloc_kmemlist(struct kmem_cache *cachep)
{
int node;
struct kmem_list3 *l3;
@@ -3183,7 +3318,7 @@ static int alloc_kmemlist(kmem_cache_t *cachep)
}
struct ccupdate_struct {
- kmem_cache_t *cachep;
+ struct kmem_cache *cachep;
struct array_cache *new[NR_CPUS];
};
@@ -3193,13 +3328,13 @@ static void do_ccupdate_local(void *info)
struct array_cache *old;
check_irq_off();
- old = ac_data(new->cachep);
+ old = cpu_cache_get(new->cachep);
new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
new->new[smp_processor_id()] = old;
}
-static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
+static int do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount,
int shared)
{
struct ccupdate_struct new;
@@ -3220,11 +3355,11 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
smp_call_function_all_cpus(do_ccupdate_local, (void *)&new);
check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
cachep->batchcount = batchcount;
cachep->limit = limit;
cachep->shared = shared;
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
for_each_online_cpu(i) {
struct array_cache *ccold = new.new[i];
@@ -3245,7 +3380,7 @@ static int do_tune_cpucache(kmem_cache_t *cachep, int limit, int batchcount,
return 0;
}
-static void enable_cpucache(kmem_cache_t *cachep)
+static void enable_cpucache(struct kmem_cache *cachep)
{
int err;
int limit, shared;
@@ -3258,13 +3393,13 @@ static void enable_cpucache(kmem_cache_t *cachep)
* The numbers are guessed, we should auto-tune as described by
* Bonwick.
*/
- if (cachep->objsize > 131072)
+ if (cachep->buffer_size > 131072)
limit = 1;
- else if (cachep->objsize > PAGE_SIZE)
+ else if (cachep->buffer_size > PAGE_SIZE)
limit = 8;
- else if (cachep->objsize > 1024)
+ else if (cachep->buffer_size > 1024)
limit = 24;
- else if (cachep->objsize > 256)
+ else if (cachep->buffer_size > 256)
limit = 54;
else
limit = 120;
@@ -3279,7 +3414,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
*/
shared = 0;
#ifdef CONFIG_SMP
- if (cachep->objsize <= PAGE_SIZE)
+ if (cachep->buffer_size <= PAGE_SIZE)
shared = 8;
#endif
@@ -3297,7 +3432,7 @@ static void enable_cpucache(kmem_cache_t *cachep)
cachep->name, -err);
}
-static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
+static void drain_array_locked(struct kmem_cache *cachep, struct array_cache *ac,
int force, int node)
{
int tofree;
@@ -3342,12 +3477,12 @@ static void cache_reap(void *unused)
}
list_for_each(walk, &cache_chain) {
- kmem_cache_t *searchp;
+ struct kmem_cache *searchp;
struct list_head *p;
int tofree;
struct slab *slabp;
- searchp = list_entry(walk, kmem_cache_t, next);
+ searchp = list_entry(walk, struct kmem_cache, next);
if (searchp->flags & SLAB_NO_REAP)
goto next;
@@ -3356,10 +3491,10 @@ static void cache_reap(void *unused)
l3 = searchp->nodelists[numa_node_id()];
if (l3->alien)
- drain_alien_cache(searchp, l3);
+ drain_alien_cache(searchp, l3->alien);
spin_lock_irq(&l3->list_lock);
- drain_array_locked(searchp, ac_data(searchp), 0,
+ drain_array_locked(searchp, cpu_cache_get(searchp), 0,
numa_node_id());
if (time_after(l3->next_reap, jiffies))
@@ -3450,15 +3585,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
if (p == &cache_chain)
return NULL;
}
- return list_entry(p, kmem_cache_t, next);
+ return list_entry(p, struct kmem_cache, next);
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
++*pos;
return cachep->next.next == &cache_chain ? NULL
- : list_entry(cachep->next.next, kmem_cache_t, next);
+ : list_entry(cachep->next.next, struct kmem_cache, next);
}
static void s_stop(struct seq_file *m, void *p)
@@ -3468,7 +3603,7 @@ static void s_stop(struct seq_file *m, void *p)
static int s_show(struct seq_file *m, void *p)
{
- kmem_cache_t *cachep = p;
+ struct kmem_cache *cachep = p;
struct list_head *q;
struct slab *slabp;
unsigned long active_objs;
@@ -3480,8 +3615,7 @@ static int s_show(struct seq_file *m, void *p)
int node;
struct kmem_list3 *l3;
- check_irq_on();
- spin_lock_irq(&cachep->spinlock);
+ spin_lock(&cachep->spinlock);
active_objs = 0;
num_slabs = 0;
for_each_online_node(node) {
@@ -3489,7 +3623,8 @@ static int s_show(struct seq_file *m, void *p)
if (!l3)
continue;
- spin_lock(&l3->list_lock);
+ check_irq_on();
+ spin_lock_irq(&l3->list_lock);
list_for_each(q, &l3->slabs_full) {
slabp = list_entry(q, struct slab, list);
@@ -3514,9 +3649,10 @@ static int s_show(struct seq_file *m, void *p)
num_slabs++;
}
free_objects += l3->free_objects;
- shared_avail += l3->shared->avail;
+ if (l3->shared)
+ shared_avail += l3->shared->avail;
- spin_unlock(&l3->list_lock);
+ spin_unlock_irq(&l3->list_lock);
}
num_slabs += active_slabs;
num_objs = num_slabs * cachep->num;
@@ -3528,7 +3664,7 @@ static int s_show(struct seq_file *m, void *p)
printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
- name, active_objs, num_objs, cachep->objsize,
+ name, active_objs, num_objs, cachep->buffer_size,
cachep->num, (1 << cachep->gfporder));
seq_printf(m, " : tunables %4u %4u %4u",
cachep->limit, cachep->batchcount, cachep->shared);
@@ -3560,7 +3696,7 @@ static int s_show(struct seq_file *m, void *p)
}
#endif
seq_putc(m, '\n');
- spin_unlock_irq(&cachep->spinlock);
+ spin_unlock(&cachep->spinlock);
return 0;
}
@@ -3618,7 +3754,8 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
mutex_lock(&cache_chain_mutex);
res = -EINVAL;
list_for_each(p, &cache_chain) {
- kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
+ struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
+ next);
if (!strcmp(cachep->name, kbuf)) {
if (limit < 1 ||
@@ -3656,5 +3793,5 @@ unsigned int ksize(const void *objp)
if (unlikely(objp == NULL))
return 0;
- return obj_reallen(page_get_cache(virt_to_page(objp)));
+ return obj_size(virt_to_cache(objp));
}