aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c76
1 files changed, 55 insertions, 21 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 4907563ef7ff..ffe13fdf8144 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -29,6 +29,7 @@
#include <linux/math64.h>
#include <linux/fault-inject.h>
#include <linux/stacktrace.h>
+#include <linux/prefetch.h>
#include <trace/events/kmem.h>
@@ -269,6 +270,11 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
return *(void **)(object + s->offset);
}
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+ prefetch(object + s->offset);
+}
+
static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
{
void *p;
@@ -1560,6 +1566,7 @@ static void *get_partial_node(struct kmem_cache *s,
} else {
page->freelist = t;
available = put_cpu_partial(s, page, 0);
+ stat(s, CPU_PARTIAL_NODE);
}
if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
break;
@@ -1581,6 +1588,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
void *object;
+ unsigned int cpuset_mems_cookie;
/*
* The defrag ratio allows a configuration of the tradeoffs between
@@ -1604,23 +1612,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
- get_mems_allowed();
- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
- struct kmem_cache_node *n;
-
- n = get_node(s, zone_to_nid(zone));
-
- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
- n->nr_partial > s->min_partial) {
- object = get_partial_node(s, n, c);
- if (object) {
- put_mems_allowed();
- return object;
+ do {
+ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ struct kmem_cache_node *n;
+
+ n = get_node(s, zone_to_nid(zone));
+
+ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+ n->nr_partial > s->min_partial) {
+ object = get_partial_node(s, n, c);
+ if (object) {
+ /*
+ * Return the object even if
+ * put_mems_allowed indicated that
+ * the cpuset mems_allowed was
+ * updated in parallel. It's a
+ * harmless race between the alloc
+ * and the cpuset update.
+ */
+ put_mems_allowed(cpuset_mems_cookie);
+ return object;
+ }
}
}
- }
- put_mems_allowed();
+ } while (!put_mems_allowed(cpuset_mems_cookie));
#endif
return NULL;
}
@@ -1973,6 +1990,7 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
local_irq_restore(flags);
pobjects = 0;
pages = 0;
+ stat(s, CPU_PARTIAL_DRAIN);
}
}
@@ -1984,7 +2002,6 @@ int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
page->next = oldpage;
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
- stat(s, CPU_PARTIAL_FREE);
return pobjects;
}
@@ -2018,9 +2035,17 @@ static void flush_cpu_slab(void *d)
__flush_cpu_slab(s, smp_processor_id());
}
+static bool has_cpu_slab(int cpu, void *info)
+{
+ struct kmem_cache *s = info;
+ struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+
+ return !!(c->page);
+}
+
static void flush_all(struct kmem_cache *s)
{
- on_each_cpu(flush_cpu_slab, s, 1);
+ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
}
/*
@@ -2309,6 +2334,8 @@ redo:
object = __slab_alloc(s, gfpflags, node, addr, c);
else {
+ void *next_object = get_freepointer_safe(s, object);
+
/*
* The cmpxchg will only match if there was no additional
* operation and if we are on the right processor.
@@ -2324,11 +2351,12 @@ redo:
if (unlikely(!this_cpu_cmpxchg_double(
s->cpu_slab->freelist, s->cpu_slab->tid,
object, tid,
- get_freepointer_safe(s, object), next_tid(tid)))) {
+ next_object, next_tid(tid)))) {
note_cmpxchg_failure("slab_alloc", s, tid);
goto redo;
}
+ prefetch_freepointer(s, next_object);
stat(s, ALLOC_FASTPATH);
}
@@ -2465,9 +2493,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
* If we just froze the page then put it onto the
* per cpu partial list.
*/
- if (new.frozen && !was_frozen)
+ if (new.frozen && !was_frozen) {
put_cpu_partial(s, page, 1);
-
+ stat(s, CPU_PARTIAL_FREE);
+ }
/*
* The list lock was not taken therefore no list
* activity can be necessary.
@@ -3929,13 +3958,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
if (kmem_cache_open(s, n,
size, align, flags, ctor)) {
list_add(&s->list, &slab_caches);
+ up_write(&slub_lock);
if (sysfs_slab_add(s)) {
+ down_write(&slub_lock);
list_del(&s->list);
kfree(n);
kfree(s);
goto err;
}
- up_write(&slub_lock);
return s;
}
kfree(n);
@@ -5059,6 +5089,8 @@ STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
+STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
#endif
static struct attribute *slab_attrs[] = {
@@ -5124,6 +5156,8 @@ static struct attribute *slab_attrs[] = {
&cmpxchg_double_cpu_fail_attr.attr,
&cpu_partial_alloc_attr.attr,
&cpu_partial_free_attr.attr,
+ &cpu_partial_node_attr.attr,
+ &cpu_partial_drain_attr.attr,
#endif
#ifdef CONFIG_FAILSLAB
&failslab_attr.attr,