From efd984c481abb516fab8bafb25bf41fd9397a43c Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Mon, 23 Aug 2021 12:16:59 +0100 Subject: sched/fair: Add NOHZ balancer flag for nohz.next_balance updates A following patch will trigger NOHZ idle balances as a means to update nohz.next_balance. Vincent noted that blocked load updates can have non-negligible overhead, which should be avoided if the intent is to only update nohz.next_balance. Add a new NOHZ balance kick flag, NOHZ_NEXT_KICK. Gate NOHZ blocked load update by the presence of NOHZ_STATS_KICK - currently all NOHZ balance kicks will have the NOHZ_STATS_KICK flag set, so no change in behaviour is expected. Suggested-by: Vincent Guittot Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20210823111700.2842997-2-valentin.schneider@arm.com --- kernel/sched/sched.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 3d3e5793e117..1fec3132d57e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2709,12 +2709,18 @@ extern void cfs_bandwidth_usage_dec(void); #define NOHZ_BALANCE_KICK_BIT 0 #define NOHZ_STATS_KICK_BIT 1 #define NOHZ_NEWILB_KICK_BIT 2 +#define NOHZ_NEXT_KICK_BIT 3 +/* Run rebalance_domains() */ #define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) +/* Update blocked load */ #define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) +/* Update blocked load when entering idle */ #define NOHZ_NEWILB_KICK BIT(NOHZ_NEWILB_KICK_BIT) +/* Update nohz.next_balance */ +#define NOHZ_NEXT_KICK BIT(NOHZ_NEXT_KICK_BIT) -#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) +#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK | NOHZ_NEXT_KICK) #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) -- cgit v1.2.3-59-g8ed1b From a480addecc0d89c200ec0b41da62ae8ceddca8d7 Mon Sep 17 00:00:00 2001 From: Josh Don Date: Thu, 19 Aug 2021 18:04:01 -0700 Subject: sched: Account number of SCHED_IDLE entities on each cfs_rq Adds cfs_rq->idle_nr_running, which accounts the number of idle entities directly enqueued on the cfs_rq. Signed-off-by: Josh Don Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20210820010403.946838-3-joshdon@google.com --- kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 25 ++++++++++++++++++++++++- kernel/sched/sched.h | 1 + 3 files changed, 27 insertions(+), 1 deletion(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 17a653b67006..2e5fdd98dacc 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -614,6 +614,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); + SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", + cfs_rq->idle_nr_running); SEQ_printf(m, " .%-30s: %d\n", "idle_h_nr_running", cfs_rq->idle_h_nr_running); SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6cc958e0584d..9c78c16ef462 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2995,6 +2995,8 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #endif cfs_rq->nr_running++; + if (se_is_idle(se)) + cfs_rq->idle_nr_running++; } static void @@ -3008,6 +3010,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #endif cfs_rq->nr_running--; + if (se_is_idle(se)) + cfs_rq->idle_nr_running--; } /* @@ -5577,6 +5581,17 @@ static int sched_idle_rq(struct rq *rq) rq->nr_running); } +/* + * Returns true if cfs_rq only has SCHED_IDLE entities enqueued. Note the use + * of idle_nr_running, which does not consider idle descendants of normal + * entities. + */ +static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq) +{ + return cfs_rq->nr_running && + cfs_rq->nr_running == cfs_rq->idle_nr_running; +} + #ifdef CONFIG_SMP static int sched_idle_cpu(int cpu) { @@ -11575,7 +11590,7 @@ int sched_group_set_idle(struct task_group *tg, long idle) for_each_possible_cpu(i) { struct rq *rq = cpu_rq(i); struct sched_entity *se = tg->se[i]; - struct cfs_rq *grp_cfs_rq = tg->cfs_rq[i]; + struct cfs_rq *parent_cfs_rq, *grp_cfs_rq = tg->cfs_rq[i]; bool was_idle = cfs_rq_is_idle(grp_cfs_rq); long idle_task_delta; struct rq_flags rf; @@ -11586,6 +11601,14 @@ int sched_group_set_idle(struct task_group *tg, long idle) if (WARN_ON_ONCE(was_idle == cfs_rq_is_idle(grp_cfs_rq))) goto next_cpu; + if (se->on_rq) { + parent_cfs_rq = cfs_rq_of(se); + if (cfs_rq_is_idle(grp_cfs_rq)) + parent_cfs_rq->idle_nr_running++; + else + parent_cfs_rq->idle_nr_running--; + } + idle_task_delta = grp_cfs_rq->h_nr_running - grp_cfs_rq->idle_h_nr_running; if (!cfs_rq_is_idle(grp_cfs_rq)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1fec3132d57e..f2965b558683 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -530,6 +530,7 @@ struct cfs_rq { struct load_weight load; unsigned int nr_running; unsigned int h_nr_running; /* SCHED_{NORMAL,BATCH,IDLE} */ + unsigned int idle_nr_running; /* SCHED_IDLE */ unsigned int idle_h_nr_running; /* SCHED_IDLE */ u64 exec_clock; -- cgit v1.2.3-59-g8ed1b From 51ce83ed523b00d58f2937ec014b12daaad55185 Mon Sep 17 00:00:00 2001 From: Josh Don Date: Thu, 19 Aug 2021 18:04:02 -0700 Subject: sched: reduce sched slice for SCHED_IDLE entities Use a small, non-scaled min granularity for SCHED_IDLE entities, when competing with normal entities. This reduces the latency of getting a normal entity back on cpu, at the expense of increased context switch frequency of SCHED_IDLE entities. The benefit of this change is to reduce the round-robin latency for normal entities when competing with a SCHED_IDLE entity. Example: on a machine with HZ=1000, spawned two threads, one of which is SCHED_IDLE, and affined to one cpu. Without this patch, the SCHED_IDLE thread runs for 4ms then waits for 1.4s. With this patch, it runs for 1ms and waits 340ms (as it round-robins with the other thread). Signed-off-by: Josh Don Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20210820010403.946838-4-joshdon@google.com --- kernel/sched/debug.c | 2 ++ kernel/sched/fair.c | 29 ++++++++++++++++++++++++----- kernel/sched/sched.h | 1 + 3 files changed, 27 insertions(+), 5 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 2e5fdd98dacc..34913a7f775a 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -311,6 +311,7 @@ static __init int sched_init_debug(void) debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); + debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity); debugfs_create_u32("wakeup_granularity_ns", 0644, debugfs_sched, &sysctl_sched_wakeup_granularity); debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms); @@ -812,6 +813,7 @@ static void sched_debug_header(struct seq_file *m) SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) PN(sysctl_sched_latency); PN(sysctl_sched_min_granularity); + PN(sysctl_sched_idle_min_granularity); PN(sysctl_sched_wakeup_granularity); P(sysctl_sched_child_runs_first); P(sysctl_sched_features); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9c78c16ef462..d835061a9bd2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -59,6 +59,14 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG; unsigned int sysctl_sched_min_granularity = 750000ULL; static unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; +/* + * Minimal preemption granularity for CPU-bound SCHED_IDLE tasks. + * Applies only when SCHED_IDLE tasks compete with normal tasks. + * + * (default: 0.75 msec) + */ +unsigned int sysctl_sched_idle_min_granularity = 750000ULL; + /* * This value is kept at sysctl_sched_latency/sysctl_sched_min_granularity */ @@ -665,6 +673,8 @@ static u64 __sched_period(unsigned long nr_running) return sysctl_sched_latency; } +static bool sched_idle_cfs_rq(struct cfs_rq *cfs_rq); + /* * We calculate the wall-time slice from the period by taking a part * proportional to the weight. @@ -674,6 +684,8 @@ static u64 __sched_period(unsigned long nr_running) static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) { unsigned int nr_running = cfs_rq->nr_running; + struct sched_entity *init_se = se; + unsigned int min_gran; u64 slice; if (sched_feat(ALT_PERIOD)) @@ -684,12 +696,13 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) for_each_sched_entity(se) { struct load_weight *load; struct load_weight lw; + struct cfs_rq *qcfs_rq; - cfs_rq = cfs_rq_of(se); - load = &cfs_rq->load; + qcfs_rq = cfs_rq_of(se); + load = &qcfs_rq->load; if (unlikely(!se->on_rq)) { - lw = cfs_rq->load; + lw = qcfs_rq->load; update_load_add(&lw, se->load.weight); load = &lw; @@ -697,8 +710,14 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se) slice = __calc_delta(slice, se->load.weight, load); } - if (sched_feat(BASE_SLICE)) - slice = max(slice, (u64)sysctl_sched_min_granularity); + if (sched_feat(BASE_SLICE)) { + if (se_is_idle(init_se) && !sched_idle_cfs_rq(cfs_rq)) + min_gran = sysctl_sched_idle_min_granularity; + else + min_gran = sysctl_sched_min_granularity; + + slice = max_t(u64, slice, min_gran); + } return slice; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f2965b558683..15a8895698b9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2403,6 +2403,7 @@ extern const_debug unsigned int sysctl_sched_migration_cost; #ifdef CONFIG_SCHED_DEBUG extern unsigned int sysctl_sched_latency; extern unsigned int sysctl_sched_min_granularity; +extern unsigned int sysctl_sched_idle_min_granularity; extern unsigned int sysctl_sched_wakeup_granularity; extern int sysctl_resched_latency_warn_ms; extern int sysctl_resched_latency_warn_once; -- cgit v1.2.3-59-g8ed1b From bcb1704a1ed2de580a46f28922e223a65f16e0f5 Mon Sep 17 00:00:00 2001 From: Huaixin Chang Date: Mon, 30 Aug 2021 11:22:14 +0800 Subject: sched/fair: Add cfs bandwidth burst statistics Two new statistics are introduced to show the internal of burst feature and explain why burst helps or not. nr_bursts: number of periods bandwidth burst occurs burst_time: cumulative wall-time (in nanoseconds) that any cpus has used above quota in respective periods Co-developed-by: Shanpei Chen Signed-off-by: Shanpei Chen Co-developed-by: Tianchen Ding Signed-off-by: Tianchen Ding Signed-off-by: Huaixin Chang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Daniel Jordan Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20210830032215.16302-2-changhuaixin@linux.alibaba.com --- kernel/sched/core.c | 13 ++++++++++--- kernel/sched/fair.c | 9 +++++++++ kernel/sched/sched.h | 3 +++ 3 files changed, 22 insertions(+), 3 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f963c8113375..ccb604a1bd22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10406,6 +10406,9 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); + seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); + return 0; } #endif /* CONFIG_CFS_BANDWIDTH */ @@ -10521,16 +10524,20 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec; + u64 throttled_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + burst_usec = cfs_b->burst_time; + do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" - "throttled_usec %llu\n", + "throttled_usec %llu\n" + "nr_bursts %d\n" + "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec); + throttled_usec, cfs_b->nr_burst, burst_usec); } #endif return 0; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5457c8001c2a..fd41abef1950 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4715,11 +4715,20 @@ static inline u64 sched_cfs_bandwidth_slice(void) */ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) { + s64 runtime; + if (unlikely(cfs_b->quota == RUNTIME_INF)) return; cfs_b->runtime += cfs_b->quota; + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > 0) { + cfs_b->burst_time += runtime; + cfs_b->nr_burst++; + } + cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); + cfs_b->runtime_snap = cfs_b->runtime; } static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 15a8895698b9..8712fc431417 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -369,6 +369,7 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 runtime_snap; s64 hierarchical_quota; u8 idle; @@ -381,7 +382,9 @@ struct cfs_bandwidth { /* Statistics: */ int nr_periods; int nr_throttled; + int nr_burst; u64 throttled_time; + u64 burst_time; #endif }; -- cgit v1.2.3-59-g8ed1b From 32ed980c3020b7a19e26dc488c10817807ba2a41 Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Tue, 14 Sep 2021 17:52:44 +0800 Subject: sched: Remove unused inline function __rq_clock_broken() These is no caller in tree since commit 523e979d3164 ("sched/core: Use PELT for scale_rt_capacity()") Signed-off-by: YueHaibing Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210914095244.52780-1-yuehaibing@huawei.com --- kernel/sched/sched.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8712fc431417..198058bb54d7 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1426,11 +1426,6 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) extern void update_rq_clock(struct rq *rq); -static inline u64 __rq_clock_broken(struct rq *rq) -{ - return READ_ONCE(rq->clock); -} - /* * rq::clock_update_flags bits * -- cgit v1.2.3-59-g8ed1b From 16d364ba6ef2aa59b409df70682770f3ed23f7c0 Mon Sep 17 00:00:00 2001 From: Ricardo Neri Date: Fri, 10 Sep 2021 18:18:15 -0700 Subject: sched/topology: Introduce sched_group::flags There exist situations in which the load balance needs to know the properties of the CPUs in a scheduling group. When using asymmetric packing, for instance, the load balancer needs to know not only the state of dst_cpu but also of its SMT siblings, if any. Use the flags of the child scheduling domains to initialize scheduling group flags. This will reflect the properties of the CPUs in the group. A subsequent changeset will make use of these new flags. No functional changes are introduced. Originally-by: Peter Zijlstra (Intel) Signed-off-by: Ricardo Neri Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Joel Fernandes (Google) Reviewed-by: Len Brown Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20210911011819.12184-3-ricardo.neri-calderon@linux.intel.com --- kernel/sched/sched.h | 1 + kernel/sched/topology.c | 21 ++++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 198058bb54d7..e5c4d4ddd83a 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1808,6 +1808,7 @@ struct sched_group { unsigned int group_weight; struct sched_group_capacity *sgc; int asym_prefer_cpu; /* CPU of highest priority in group */ + int flags; /* * The CPUs this group covers. diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 4e8698e62f07..c56faae461d9 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -716,8 +716,20 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) tmp = sd; sd = sd->parent; destroy_sched_domain(tmp); - if (sd) + if (sd) { + struct sched_group *sg = sd->groups; + + /* + * sched groups hold the flags of the child sched + * domain for convenience. Clear such flags since + * the child is being destroyed. + */ + do { + sg->flags = 0; + } while (sg != sd->groups); + sd->child = NULL; + } } for (tmp = sd; tmp; tmp = tmp->parent) @@ -916,10 +928,12 @@ build_group_from_child_sched_domain(struct sched_domain *sd, int cpu) return NULL; sg_span = sched_group_span(sg); - if (sd->child) + if (sd->child) { cpumask_copy(sg_span, sched_domain_span(sd->child)); - else + sg->flags = sd->child->flags; + } else { cpumask_copy(sg_span, sched_domain_span(sd)); + } atomic_inc(&sg->ref); return sg; @@ -1169,6 +1183,7 @@ static struct sched_group *get_group(int cpu, struct sd_data *sdd) if (child) { cpumask_copy(sched_group_span(sg), sched_domain_span(child)); cpumask_copy(group_balance_mask(sg), sched_group_span(sg)); + sg->flags = child->flags; } else { cpumask_set_cpu(cpu, sched_group_span(sg)); cpumask_set_cpu(cpu, group_balance_mask(sg)); -- cgit v1.2.3-59-g8ed1b From d07b2eee4501c393cbf5bfcad36143310cfd72f9 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Wed, 22 Sep 2021 16:57:35 +0800 Subject: sched: Make cookie functions static Make cookie functions static as these are no longer invoked directly by other code. No functional change intended. Signed-off-by: Shaokun Zhang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20210922085735.52812-1-zhangshaokun@hisilicon.com --- kernel/sched/core_sched.c | 9 +++++---- kernel/sched/sched.h | 5 ----- 2 files changed, 5 insertions(+), 9 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index 9a80e9a474c0..48ac72696012 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -11,7 +11,7 @@ struct sched_core_cookie { refcount_t refcnt; }; -unsigned long sched_core_alloc_cookie(void) +static unsigned long sched_core_alloc_cookie(void) { struct sched_core_cookie *ck = kmalloc(sizeof(*ck), GFP_KERNEL); if (!ck) @@ -23,7 +23,7 @@ unsigned long sched_core_alloc_cookie(void) return (unsigned long)ck; } -void sched_core_put_cookie(unsigned long cookie) +static void sched_core_put_cookie(unsigned long cookie) { struct sched_core_cookie *ptr = (void *)cookie; @@ -33,7 +33,7 @@ void sched_core_put_cookie(unsigned long cookie) } } -unsigned long sched_core_get_cookie(unsigned long cookie) +static unsigned long sched_core_get_cookie(unsigned long cookie) { struct sched_core_cookie *ptr = (void *)cookie; @@ -53,7 +53,8 @@ unsigned long sched_core_get_cookie(unsigned long cookie) * * Returns: the old cookie */ -unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie) +static unsigned long sched_core_update_cookie(struct task_struct *p, + unsigned long cookie) { unsigned long old_cookie; struct rq_flags rf; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e5c4d4ddd83a..a00fc7057d97 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1258,11 +1258,6 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p); extern void sched_core_get(void); extern void sched_core_put(void); -extern unsigned long sched_core_alloc_cookie(void); -extern void sched_core_put_cookie(unsigned long cookie); -extern unsigned long sched_core_get_cookie(unsigned long cookie); -extern unsigned long sched_core_update_cookie(struct task_struct *p, unsigned long cookie); - #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) -- cgit v1.2.3-59-g8ed1b From eaed27d0d01a89a510736d87f10cea02042b4756 Mon Sep 17 00:00:00 2001 From: Peng Wang Date: Tue, 19 Oct 2021 10:58:39 +0800 Subject: sched/core: Remove rq_relock() After the removal of migrate_tasks(), there is no user of rq_relock() left, so remove it. Signed-off-by: Peng Wang Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/449948fdf9be4764b3929c52572917dd25eef758.1634611953.git.rocking@linux.alibaba.com --- kernel/sched/sched.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'kernel/sched/sched.h') diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a00fc7057d97..f0b249ec581d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1615,14 +1615,6 @@ rq_lock(struct rq *rq, struct rq_flags *rf) rq_pin_lock(rq, rf); } -static inline void -rq_relock(struct rq *rq, struct rq_flags *rf) - __acquires(rq->lock) -{ - raw_spin_rq_lock(rq); - rq_repin_lock(rq, rf); -} - static inline void rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) __releases(rq->lock) -- cgit v1.2.3-59-g8ed1b