aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/psi.c
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2022-08-26 00:41:04 +0800
committerPeter Zijlstra <peterz@infradead.org>2022-09-09 11:08:31 +0200
commite2ad8ab04c5cdfc8dc2f382c45d248ab01dee991 (patch)
tree8584c4f3c3179254f60e1b9e12e05ac3af8ec9d3 /kernel/sched/psi.c
parentsched/psi: Don't create cgroup PSI files when psi_disabled (diff)
downloadlinux-dev-e2ad8ab04c5cdfc8dc2f382c45d248ab01dee991.tar.xz
linux-dev-e2ad8ab04c5cdfc8dc2f382c45d248ab01dee991.zip
sched/psi: Save percpu memory when !psi_cgroups_enabled
We won't use cgroup psi_group when !psi_cgroups_enabled, so don't bother to alloc percpu memory and init for it. Also don't need to migrate task PSI stats between cgroups in cgroup_move_task(). Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Link: https://lore.kernel.org/r/20220825164111.29534-4-zhouchengming@bytedance.com
Diffstat (limited to '')
-rw-r--r--kernel/sched/psi.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 39463dcc16bb..77d53c03a76f 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -201,6 +201,7 @@ void __init psi_init(void)
{
if (!psi_enable) {
static_branch_enable(&psi_disabled);
+ static_branch_disable(&psi_cgroups_enabled);
return;
}
@@ -950,7 +951,7 @@ void psi_memstall_leave(unsigned long *flags)
#ifdef CONFIG_CGROUPS
int psi_cgroup_alloc(struct cgroup *cgroup)
{
- if (static_branch_likely(&psi_disabled))
+ if (!static_branch_likely(&psi_cgroups_enabled))
return 0;
cgroup->psi = kzalloc(sizeof(struct psi_group), GFP_KERNEL);
@@ -968,7 +969,7 @@ int psi_cgroup_alloc(struct cgroup *cgroup)
void psi_cgroup_free(struct cgroup *cgroup)
{
- if (static_branch_likely(&psi_disabled))
+ if (!static_branch_likely(&psi_cgroups_enabled))
return;
cancel_delayed_work_sync(&cgroup->psi->avgs_work);
@@ -996,7 +997,7 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
struct rq_flags rf;
struct rq *rq;
- if (static_branch_likely(&psi_disabled)) {
+ if (!static_branch_likely(&psi_cgroups_enabled)) {
/*
* Lame to do this here, but the scheduler cannot be locked
* from the outside, so we move cgroups from inside sched/.