aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slub_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 19:14:19 +0000
committerPekka Enberg <penberg@kernel.org>2013-02-01 12:32:08 +0200
commit2c59dd6544212faa5ce761920d2251f4152f408d (patch)
treec2547eb50205b72368e0b4758fc7c9a0111238a5 /include/linux/slub_def.h
parentstat: Use size_t for sizes instead of unsigned (diff)
downloadlinux-dev-2c59dd6544212faa5ce761920d2251f4152f408d.tar.xz
linux-dev-2c59dd6544212faa5ce761920d2251f4152f408d.zip
slab: Common Kmalloc cache determination
Extract the optimized lookup functions from slub and put them into slab_common.c. Then make slab use these functions as well. Joonsoo notes that this fixes some issues with constant folding which also reduces the code size for slub. https://lkml.org/lkml/2012/10/20/82 Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r--include/linux/slub_def.h41
1 files changed, 10 insertions, 31 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 3701896f7f8a..16341e5316de 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -115,29 +115,6 @@ struct kmem_cache {
struct kmem_cache_node *node[MAX_NUMNODES];
};
-#ifdef CONFIG_ZONE_DMA
-#define SLUB_DMA __GFP_DMA
-#else
-/* Disable DMA functionality */
-#define SLUB_DMA (__force gfp_t)0
-#endif
-
-/*
- * Find the slab cache for a given combination of allocation flags and size.
- *
- * This ought to end up with a global pointer to the right cache
- * in kmalloc_caches.
- */
-static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
-{
- int index = kmalloc_index(size);
-
- if (index == 0)
- return NULL;
-
- return kmalloc_caches[index];
-}
-
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
void *__kmalloc(size_t size, gfp_t flags);
@@ -195,13 +172,14 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
if (size > KMALLOC_MAX_CACHE_SIZE)
return kmalloc_large(size, flags);
- if (!(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ if (!(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
- if (!s)
+ if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_trace(s, flags, size);
+ return kmem_cache_alloc_trace(kmalloc_caches[index],
+ flags, size);
}
}
return __kmalloc(size, flags);
@@ -228,13 +206,14 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s,
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
- size <= KMALLOC_MAX_CACHE_SIZE && !(flags & SLUB_DMA)) {
- struct kmem_cache *s = kmalloc_slab(size);
+ size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) {
+ int index = kmalloc_index(size);
- if (!s)
+ if (!index)
return ZERO_SIZE_PTR;
- return kmem_cache_alloc_node_trace(s, flags, node, size);
+ return kmem_cache_alloc_node_trace(kmalloc_caches[index],
+ flags, node, size);
}
return __kmalloc_node(size, flags, node);
}