aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-08-15 23:28:49 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 19:04:57 +0200
commit9934ccc75cec2bafac552c2130835630530c4f7e (patch)
treefa86960b6df2226a692c0fa07cd600fc53f68682 /kernel/locking
parentlocking/ww_mutex: Abstract out waiter enqueueing (diff)
downloadlinux-dev-9934ccc75cec2bafac552c2130835630530c4f7e.tar.xz
linux-dev-9934ccc75cec2bafac552c2130835630530c4f7e.zip
locking/ww_mutex: Abstract out mutex accessors
Move the mutex related access from various ww_mutex functions into helper functions so they can be substituted for rtmutex based ww_mutex later. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211304.622477030@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/ww_mutex.h16
1 files changed, 14 insertions, 2 deletions
diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h
index f5aaf2f19370..842dbed0a8b2 100644
--- a/kernel/locking/ww_mutex.h
+++ b/kernel/locking/ww_mutex.h
@@ -53,6 +53,18 @@ __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_wa
__mutex_add_waiter(lock, waiter, p);
}
+static inline struct task_struct *
+__ww_mutex_owner(struct mutex *lock)
+{
+ return __mutex_owner(lock);
+}
+
+static inline bool
+__ww_mutex_has_waiters(struct mutex *lock)
+{
+ return atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS;
+}
+
/*
* Wait-Die:
* The newer transactions are killed when:
@@ -157,7 +169,7 @@ static bool __ww_mutex_wound(struct mutex *lock,
struct ww_acquire_ctx *ww_ctx,
struct ww_acquire_ctx *hold_ctx)
{
- struct task_struct *owner = __mutex_owner(lock);
+ struct task_struct *owner = __ww_mutex_owner(lock);
lockdep_assert_held(&lock->wait_lock);
@@ -253,7 +265,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
* __ww_mutex_add_waiter() and makes sure we either observe ww->ctx
* and/or !empty list.
*/
- if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
+ if (likely(!__ww_mutex_has_waiters(&lock->base)))
return;
/*