aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/sched.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-03-03 16:45:41 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-05-12 11:43:27 +0200
commit9ef7e7e33bcdb57be1afb28884053c28b5f05240 (patch)
tree40e43fa4c6d82adf7cd39fbc1f5dfb868701165b /kernel/sched/sched.h
parentsched: Core-wide rq->lock (diff)
downloadlinux-dev-9ef7e7e33bcdb57be1afb28884053c28b5f05240.tar.xz
linux-dev-9ef7e7e33bcdb57be1afb28884053c28b5f05240.zip
sched: Optimize rq_lockp() usage
rq_lockp() includes a static_branch(), which is asm-goto, which is asm volatile which defeats regular CSE. This means that: if (!static_branch(&foo)) return simple; if (static_branch(&foo) && cond) return complex; Doesn't fold and we get horrible code. Introduce __rq_lockp() without the static_branch() on. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Don Hiatt <dhiatt@digitalocean.com> Tested-by: Hongyu Ning <hongyu.ning@linux.intel.com> Tested-by: Vincent Guittot <vincent.guittot@linaro.org> Link: https://lkml.kernel.org/r/20210422123308.316696988@infradead.org
Diffstat (limited to 'kernel/sched/sched.h')
-rw-r--r--kernel/sched/sched.h33
1 files changed, 25 insertions, 8 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 29418b8c05dd..ca30af37b906 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1133,6 +1133,10 @@ static inline bool sched_core_disabled(void)
return !static_branch_unlikely(&__sched_core_enabled);
}
+/*
+ * Be careful with this function; not for general use. The return value isn't
+ * stable unless you actually hold a relevant rq->__lock.
+ */
static inline raw_spinlock_t *rq_lockp(struct rq *rq)
{
if (sched_core_enabled(rq))
@@ -1141,6 +1145,14 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
return &rq->__lock;
}
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+ if (rq->core_enabled)
+ return &rq->core->__lock;
+
+ return &rq->__lock;
+}
+
#else /* !CONFIG_SCHED_CORE */
static inline bool sched_core_enabled(struct rq *rq)
@@ -1158,11 +1170,16 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq)
return &rq->__lock;
}
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+ return &rq->__lock;
+}
+
#endif /* CONFIG_SCHED_CORE */
static inline void lockdep_assert_rq_held(struct rq *rq)
{
- lockdep_assert_held(rq_lockp(rq));
+ lockdep_assert_held(__rq_lockp(rq));
}
extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
@@ -1346,7 +1363,7 @@ extern struct callback_head balance_push_callback;
*/
static inline void rq_pin_lock(struct rq *rq, struct rq_flags *rf)
{
- rf->cookie = lockdep_pin_lock(rq_lockp(rq));
+ rf->cookie = lockdep_pin_lock(__rq_lockp(rq));
#ifdef CONFIG_SCHED_DEBUG
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
@@ -1364,12 +1381,12 @@ static inline void rq_unpin_lock(struct rq *rq, struct rq_flags *rf)
rf->clock_update_flags = RQCF_UPDATED;
#endif
- lockdep_unpin_lock(rq_lockp(rq), rf->cookie);
+ lockdep_unpin_lock(__rq_lockp(rq), rf->cookie);
}
static inline void rq_repin_lock(struct rq *rq, struct rq_flags *rf)
{
- lockdep_repin_lock(rq_lockp(rq), rf->cookie);
+ lockdep_repin_lock(__rq_lockp(rq), rf->cookie);
#ifdef CONFIG_SCHED_DEBUG
/*
@@ -2338,7 +2355,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
__acquires(busiest->lock)
__acquires(this_rq->lock)
{
- if (rq_lockp(this_rq) == rq_lockp(busiest))
+ if (__rq_lockp(this_rq) == __rq_lockp(busiest))
return 0;
if (likely(raw_spin_rq_trylock(busiest)))
@@ -2370,9 +2387,9 @@ static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
__releases(busiest->lock)
{
- if (rq_lockp(this_rq) != rq_lockp(busiest))
+ if (__rq_lockp(this_rq) != __rq_lockp(busiest))
raw_spin_rq_unlock(busiest);
- lock_set_subclass(&rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
+ lock_set_subclass(&__rq_lockp(this_rq)->dep_map, 0, _RET_IP_);
}
static inline void double_lock(spinlock_t *l1, spinlock_t *l2)
@@ -2412,7 +2429,7 @@ static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
__releases(rq1->lock)
__releases(rq2->lock)
{
- if (rq_lockp(rq1) != rq_lockp(rq2))
+ if (__rq_lockp(rq1) != __rq_lockp(rq2))
raw_spin_rq_unlock(rq2);
else
__release(rq2->lock);