From 731bdd97466a280d6bdd8eceeb13d9fab6f26cbd Mon Sep 17 00:00:00 2001 From: JP Kobryn Date: Wed, 21 May 2025 18:32:02 -0700 Subject: cgroup: avoid per-cpu allocation of size zero rstat cpu locks Subsystem rstat locks are dynamically allocated per-cpu. It was discovered that a panic can occur during this allocation when the lock size is zero. This is the case on non-smp systems, since arch_spinlock_t is defined as an empty struct. Prevent this allocation when !CONFIG_SMP by adding a pre-processor conditional around the affected block. Signed-off-by: JP Kobryn Reported-by: Klara Modin Fixes: 748922dcfabd ("cgroup: use subsystem-specific rstat locks to avoid contention") Signed-off-by: Tejun Heo --- kernel/cgroup/rstat.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'kernel') diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 7dd396ae3c68..ce4752ab9e09 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -510,11 +510,20 @@ int __init ss_rstat_init(struct cgroup_subsys *ss) { int cpu; +#ifdef CONFIG_SMP + /* + * On uniprocessor machines, arch_spinlock_t is defined as an empty + * struct. Avoid allocating a size of zero by having this block + * excluded in this case. It's acceptable to leave the subsystem locks + * unitialized since the associated lock functions are no-ops in the + * non-smp case. + */ if (ss) { ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t); if (!ss->rstat_ss_cpu_lock) return -ENOMEM; } +#endif spin_lock_init(ss_rstat_lock(ss)); for_each_possible_cpu(cpu) -- cgit v1.2.3-59-g8ed1b