aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorLin Shengwang <linshengwang1@huawei.com>2022-10-08 10:27:09 +0800
committerPeter Zijlstra <peterz@infradead.org>2022-10-17 16:41:24 +0200
commite705968dd687574b6ca3ebe772683d5642759132 (patch)
treeb00cd4431ae1833323ab15ac6cb66af29483cb14 /kernel/sched/sched.h
parentLinux 6.1-rc1 (diff)
downloadlinux-dev-e705968dd687574b6ca3ebe772683d5642759132.tar.xz
linux-dev-e705968dd687574b6ca3ebe772683d5642759132.zip
sched/core: Fix comparison in sched_group_cookie_match()
In commit 97886d9dcd86 ("sched: Migration changes for core scheduling"), sched_group_cookie_match() was added to help determine if a cookie matches the core state. However, while it iterates the SMT group, it fails to actually use the RQ for each of the CPUs iterated, use cpu_rq(cpu) instead of rq to fix things. Fixes: 97886d9dcd86 ("sched: Migration changes for core scheduling") Signed-off-by: Lin Shengwang <linshengwang1@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20221008022709.642-1-linshengwang1@huawei.com
Diffstat (limited to '')
-rw-r--r--kernel/sched/sched.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1644242ecd11..0d0851127369 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1182,6 +1182,14 @@ static inline bool is_migration_disabled(struct task_struct *p)
#endif
}
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
+#define this_rq() this_cpu_ptr(&runqueues)
+#define task_rq(p) cpu_rq(task_cpu(p))
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define raw_rq() raw_cpu_ptr(&runqueues)
+
struct sched_group;
#ifdef CONFIG_SCHED_CORE
static inline struct cpumask *sched_group_span(struct sched_group *sg);
@@ -1269,7 +1277,7 @@ static inline bool sched_group_cookie_match(struct rq *rq,
return true;
for_each_cpu_and(cpu, sched_group_span(group), p->cpus_ptr) {
- if (sched_core_cookie_match(rq, p))
+ if (sched_core_cookie_match(cpu_rq(cpu), p))
return true;
}
return false;
@@ -1384,14 +1392,6 @@ static inline void update_idle_core(struct rq *rq)
static inline void update_idle_core(struct rq *rq) { }
#endif
-DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
-
-#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
-#define this_rq() this_cpu_ptr(&runqueues)
-#define task_rq(p) cpu_rq(task_cpu(p))
-#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-#define raw_rq() raw_cpu_ptr(&runqueues)
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static inline struct task_struct *task_of(struct sched_entity *se)
{