aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorShakeel Butt <shakeel.butt@linux.dev>2025-05-18 23:31:42 -0700
committerAndrew Morton <akpm@linux-foundation.org>2025-05-31 22:46:09 -0700
commit3ac4638a734abbeb1d4a02b042e26a80a4205975 (patch)
tree30fe50b8d0fc03b469ac7ef4ad1f9eca06eeabd7 /mm
parentmemcg: nmi-safe slab stats updates (diff)
downloadlinux-rng-3ac4638a734abbeb1d4a02b042e26a80a4205975.tar.xz
linux-rng-3ac4638a734abbeb1d4a02b042e26a80a4205975.zip
memcg: make memcg_rstat_updated nmi safe
Currently kernel maintains memory related stats updates per-cgroup to optimize stats flushing. The stats_updates is defined as atomic64_t which is not nmi-safe on some archs. Actually we don't really need 64bit atomic as the max value stats_updates can get should be less than nr_cpus * MEMCG_CHARGE_BATCH. A normal atomic_t should suffice. Also the function cgroup_rstat_updated() is still not nmi-safe but there is parallel effort to make it nmi-safe, so until then let's ignore it in the nmi context. Link: https://lkml.kernel.org/r/20250519063142.111219-6-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 74de4e523094..7e64dbf578d7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -531,7 +531,7 @@ struct memcg_vmstats {
unsigned long events_pending[NR_MEMCG_EVENTS];
/* Stats updates since the last flush */
- atomic64_t stats_updates;
+ atomic_t stats_updates;
};
/*
@@ -557,7 +557,7 @@ static u64 flush_last_time;
static bool memcg_vmstats_needs_flush(struct memcg_vmstats *vmstats)
{
- return atomic64_read(&vmstats->stats_updates) >
+ return atomic_read(&vmstats->stats_updates) >
MEMCG_CHARGE_BATCH * num_online_cpus();
}
@@ -571,7 +571,9 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
if (!val)
return;
- cgroup_rstat_updated(memcg->css.cgroup, cpu);
+ /* TODO: add to cgroup update tree once it is nmi-safe. */
+ if (!in_nmi())
+ cgroup_rstat_updated(memcg->css.cgroup, cpu);
statc_pcpu = memcg->vmstats_percpu;
for (; statc_pcpu; statc_pcpu = statc->parent_pcpu) {
statc = this_cpu_ptr(statc_pcpu);
@@ -589,7 +591,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val,
continue;
stats_updates = this_cpu_xchg(statc_pcpu->stats_updates, 0);
- atomic64_add(stats_updates, &statc->vmstats->stats_updates);
+ atomic_add(stats_updates, &statc->vmstats->stats_updates);
}
}
@@ -597,7 +599,7 @@ static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
{
bool needs_flush = memcg_vmstats_needs_flush(memcg->vmstats);
- trace_memcg_flush_stats(memcg, atomic64_read(&memcg->vmstats->stats_updates),
+ trace_memcg_flush_stats(memcg, atomic_read(&memcg->vmstats->stats_updates),
force, needs_flush);
if (!force && !needs_flush)
@@ -4119,8 +4121,8 @@ static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
}
WRITE_ONCE(statc->stats_updates, 0);
/* We are in a per-cpu loop here, only do the atomic write once */
- if (atomic64_read(&memcg->vmstats->stats_updates))
- atomic64_set(&memcg->vmstats->stats_updates, 0);
+ if (atomic_read(&memcg->vmstats->stats_updates))
+ atomic_set(&memcg->vmstats->stats_updates, 0);
}
static void mem_cgroup_fork(struct task_struct *task)