aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-07-11 20:56:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-07-12 11:05:44 -0700
commitfb2f2b0adb98bbbbbb51c5a5327f3f90f5dc417e (patch)
tree028755fe0ca55d38534e9b2701caf910f93b8053 /mm/slab.h
parentmm: memcg/slab: stop setting page->mem_cgroup pointer for slab pages (diff)
downloadlinux-dev-fb2f2b0adb98bbbbbb51c5a5327f3f90f5dc417e.tar.xz
linux-dev-fb2f2b0adb98bbbbbb51c5a5327f3f90f5dc417e.zip
mm: memcg/slab: reparent memcg kmem_caches on cgroup removal
Let's reparent non-root kmem_caches on memcg offlining. This allows us to release the memory cgroup without waiting for the last outstanding kernel object (e.g. dentry used by another application). Since the parent cgroup is already charged, everything we need to do is to splice the list of kmem_caches to the parent's kmem_caches list, swap the memcg pointer, drop the css refcounter for each kmem_cache and adjust the parent's css refcounter. Please, note that kmem_cache->memcg_params.memcg isn't a stable pointer anymore. It's safe to read it under rcu_read_lock(), cgroup_mutex held, or any other way that protects the memory cgroup from being released. We can race with the slab allocation and deallocation paths. It's not a big problem: parent's charge and slab global stats are always correct, and we don't care anymore about the child usage and global stats. The child cgroup is already offline, so we don't use or show it anywhere. Local slab stats (NR_SLAB_RECLAIMABLE and NR_SLAB_UNRECLAIMABLE) aren't used anywhere except count_shadow_nodes(). But even there it won't break anything: after reparenting "nodes" will be 0 on child level (because we're already reparenting shrinker lists), and on parent level page stats always were 0, and this patch won't change anything. [guro@fb.com: properly handle kmem_caches reparented to root_mem_cgroup] Link: http://lkml.kernel.org/r/20190620213427.1691847-1-guro@fb.com Link: http://lkml.kernel.org/r/20190611231813.3148843-11-guro@fb.com Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Vladimir Davydov <vdavydov.dev@gmail.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Waiman Long <longman@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Andrei Vagin <avagin@gmail.com> Cc: Qian Cai <cai@lca.pw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h41
1 files changed, 32 insertions, 9 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 7ead47cb9338..a62372d0f271 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -261,6 +261,9 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
* which do not have slab_cache pointer set.
* So this function assumes that the page can pass PageHead() and PageSlab()
* checks.
+ *
+ * The kmem_cache can be reparented asynchronously. The caller must ensure
+ * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
*/
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
{
@@ -268,7 +271,7 @@ static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
s = READ_ONCE(page->slab_cache);
if (s && !is_root_cache(s))
- return s->memcg_params.memcg;
+ return READ_ONCE(s->memcg_params.memcg);
return NULL;
}
@@ -285,10 +288,22 @@ static __always_inline int memcg_charge_slab(struct page *page,
struct lruvec *lruvec;
int ret;
- memcg = s->memcg_params.memcg;
+ rcu_read_lock();
+ memcg = READ_ONCE(s->memcg_params.memcg);
+ while (memcg && !css_tryget_online(&memcg->css))
+ memcg = parent_mem_cgroup(memcg);
+ rcu_read_unlock();
+
+ if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ (1 << order));
+ percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
+ return 0;
+ }
+
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
if (ret)
- return ret;
+ goto out;
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
@@ -296,8 +311,9 @@ static __always_inline int memcg_charge_slab(struct page *page,
/* transer try_charge() page references to kmem_cache */
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
css_put_many(&memcg->css, 1 << order);
-
- return 0;
+out:
+ css_put(&memcg->css);
+ return ret;
}
/*
@@ -310,10 +326,17 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
struct mem_cgroup *memcg;
struct lruvec *lruvec;
- memcg = s->memcg_params.memcg;
- lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
- mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
- memcg_kmem_uncharge_memcg(page, order, memcg);
+ rcu_read_lock();
+ memcg = READ_ONCE(s->memcg_params.memcg);
+ if (likely(!mem_cgroup_is_root(memcg))) {
+ lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
+ mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
+ memcg_kmem_uncharge_memcg(page, order, memcg);
+ } else {
+ mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+ -(1 << order));
+ }
+ rcu_read_unlock();
percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
}