From e7b691b085fda913830e5280ae6f724b2a63c824 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Sat, 9 Jun 2012 02:40:03 -0700 Subject: slab/mempolicy: always use local policy from interrupt context slab_node() could access current->mempolicy from interrupt context. However there's a race condition during exit where the mempolicy is first freed and then the pointer zeroed. Using this from interrupts seems bogus anyways. The interrupt will interrupt a random process and therefore get a random mempolicy. Many times, this will be idle's, which noone can change. Just disable this here and always use local for slab from interrupts. I also cleaned up the callers of slab_node a bit which always passed the same argument. I believe the original mempolicy code did that in fact, so it's likely a regression. v2: send version with correct logic v3: simplify. fix typo. Reported-by: Arun Sharma Cc: penberg@kernel.org Cc: cl@linux.com Signed-off-by: Andi Kleen [tdmackey@twitter.com: Rework control flow based on feedback from cl@linux.com, fix logic, and cleanup current task_struct reference] Acked-by: David Rientjes Acked-by: Christoph Lameter Acked-by: KOSAKI Motohiro Signed-off-by: David Mackey Signed-off-by: Pekka Enberg --- mm/mempolicy.c | 8 +++++++- mm/slab.c | 4 ++-- mm/slub.c | 2 +- 3 files changed, 10 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f15c1b24ca18..cb0b230aa3f2 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1602,8 +1602,14 @@ static unsigned interleave_nodes(struct mempolicy *policy) * task can change it's policy. The system default policy requires no * such protection. */ -unsigned slab_node(struct mempolicy *policy) +unsigned slab_node(void) { + struct mempolicy *policy; + + if (in_interrupt()) + return numa_node_id(); + + policy = current->mempolicy; if (!policy || policy->flags & MPOL_F_LOCAL) return numa_node_id(); diff --git a/mm/slab.c b/mm/slab.c index fc4a77446700..dd607a8e6706 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3310,7 +3310,7 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags) if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) nid_alloc = cpuset_slab_spread_node(); else if (current->mempolicy) - nid_alloc = slab_node(current->mempolicy); + nid_alloc = slab_node(); if (nid_alloc != nid_here) return ____cache_alloc_node(cachep, flags, nid_alloc); return NULL; @@ -3342,7 +3342,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags) retry_cpuset: cpuset_mems_cookie = get_mems_allowed(); - zonelist = node_zonelist(slab_node(current->mempolicy), flags); + zonelist = node_zonelist(slab_node(), flags); retry: /* diff --git a/mm/slub.c b/mm/slub.c index 797271f5afb8..348fed1643f4 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1617,7 +1617,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, do { cpuset_mems_cookie = get_mems_allowed(); - zonelist = node_zonelist(slab_node(current->mempolicy), flags); + zonelist = node_zonelist(slab_node(), flags); for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { struct kmem_cache_node *n; -- cgit v1.2.3-59-g8ed1b