diff options
| author | 2021-02-24 12:03:15 -0800 | |
|---|---|---|
| committer | 2021-02-24 13:38:29 -0800 | |
| commit | f3344adf38bdb3107d40483dd9501215ad40edce (patch) | |
| tree | afd77361c5a91919d9c953a3d6fc906b7da6ee54 | |
| parent | mm: memcg/slab: pre-allocate obj_cgroups for slab caches with SLAB_ACCOUNT (diff) | |
| download | linux-dev-f3344adf38bdb3107d40483dd9501215ad40edce.tar.xz linux-dev-f3344adf38bdb3107d40483dd9501215ad40edce.zip | |
mm: memcontrol: optimize per-lruvec stats counter memory usage
The vmstat threshold is 32 (MEMCG_CHARGE_BATCH), Actually the threshold
can be as big as MEMCG_CHARGE_BATCH * PAGE_SIZE. It still fits into s32.
So introduce struct batched_lruvec_stat to optimize memory usage.
The size of struct lruvec_stat is 304 bytes on 64 bit systems. As it is a
per-cpu structure. So with this patch, we can save 304 / 2 * ncpu bytes
per-memcg per-node where ncpu is the number of the possible CPU. If there
are c memory cgroup (include dying cgroup) and n NUMA node in the system.
Finally, we can save (152 * ncpu * c * n) bytes.
[akpm@linux-foundation.org: fix typo in comment]
Link: https://lkml.kernel.org/r/20201210042121.39665-1-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Chris Down <chris@chrisdown.name>
Cc: Yafang Shao <laoar.shao@gmail.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | include/linux/memcontrol.h | 14 | ||||
| -rw-r--r-- | mm/memcontrol.c | 10 |
2 files changed, 21 insertions, 3 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 7a4dd1cb19fe..41bbf71edd9f 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -92,6 +92,10 @@ struct lruvec_stat { long count[NR_VM_NODE_STAT_ITEMS]; }; +struct batched_lruvec_stat { + s32 count[NR_VM_NODE_STAT_ITEMS]; +}; + /* * Bitmap of shrinker::id corresponding to memcg-aware shrinkers, * which have elements charged to this memcg. @@ -107,11 +111,17 @@ struct memcg_shrinker_map { struct mem_cgroup_per_node { struct lruvec lruvec; - /* Legacy local VM stats */ + /* + * Legacy local VM stats. This should be struct lruvec_stat and + * cannot be optimized to struct batched_lruvec_stat. Because + * the threshold of the lruvec_stat_cpu can be as big as + * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this + * filed has no upper limit. + */ struct lruvec_stat __percpu *lruvec_stat_local; /* Subtree VM stats (batched updates) */ - struct lruvec_stat __percpu *lruvec_stat_cpu; + struct batched_lruvec_stat __percpu *lruvec_stat_cpu; atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 60ce452e42e6..b259e7d8ce41 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5208,7 +5208,7 @@ static int alloc_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node) return 1; } - pn->lruvec_stat_cpu = alloc_percpu_gfp(struct lruvec_stat, + pn->lruvec_stat_cpu = alloc_percpu_gfp(struct batched_lruvec_stat, GFP_KERNEL_ACCOUNT); if (!pn->lruvec_stat_cpu) { free_percpu(pn->lruvec_stat_local); @@ -7093,6 +7093,14 @@ static int __init mem_cgroup_init(void) { int cpu, node; + /* + * Currently s32 type (can refer to struct batched_lruvec_stat) is + * used for per-memcg-per-cpu caching of per-node statistics. In order + * to work fine, we should make sure that the overfill threshold can't + * exceed S32_MAX / PAGE_SIZE. + */ + BUILD_BUG_ON(MEMCG_CHARGE_BATCH > S32_MAX / PAGE_SIZE); + cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, memcg_hotplug_cpu_dead); |
