aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2013-02-22 16:34:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 17:50:18 -0800
commit692e89abd154b04d212dce0c18a449bda15aac04 (patch)
tree91073f62049a9ffe49686cd10d4fd5cb09b26ac7 /mm/memcontrol.c
parentmemcg: replace cgroup_lock with memcg specific memcg_lock (diff)
downloadlinux-dev-692e89abd154b04d212dce0c18a449bda15aac04.tar.xz
linux-dev-692e89abd154b04d212dce0c18a449bda15aac04.zip
memcg: increment static branch right after limit set
We were deferring the kmemcg static branch increment to a later time, due to a nasty dependency between the cpu_hotplug lock, taken by the jump label update, and the cgroup_lock. Now we no longer take the cgroup lock, and we can save ourselves the trouble. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--mm/memcontrol.c31
1 files changed, 7 insertions, 24 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 46cdaef78b01..f4f41c36e703 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4974,8 +4974,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
{
int ret = -EINVAL;
#ifdef CONFIG_MEMCG_KMEM
- bool must_inc_static_branch = false;
-
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
/*
* For simplicity, we won't allow this to be disabled. It also can't
@@ -5004,7 +5002,13 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
goto out;
}
- must_inc_static_branch = true;
+ static_key_slow_inc(&memcg_kmem_enabled_key);
+ /*
+ * setting the active bit after the inc will guarantee no one
+ * starts accounting before all call sites are patched
+ */
+ memcg_kmem_set_active(memcg);
+
/*
* kmem charges can outlive the cgroup. In the case of slab
* pages, for instance, a page contain objects from various
@@ -5017,27 +5021,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
out:
mutex_unlock(&set_limit_mutex);
mutex_unlock(&memcg_create_mutex);
-
- /*
- * We are by now familiar with the fact that we can't inc the static
- * branch inside cgroup_lock. See disarm functions for details. A
- * worker here is overkill, but also wrong: After the limit is set, we
- * must start accounting right away. Since this operation can't fail,
- * we can safely defer it to here - no rollback will be needed.
- *
- * The boolean used to control this is also safe, because
- * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
- * able to set it to true;
- */
- if (must_inc_static_branch) {
- static_key_slow_inc(&memcg_kmem_enabled_key);
- /*
- * setting the active bit after the inc will guarantee no one
- * starts accounting before all call sites are patched
- */
- memcg_kmem_set_active(memcg);
- }
-
#endif
return ret;
}