aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/mutex.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 8bb2304bb78d..6c0d3040e4dc 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -587,10 +587,11 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct mutex_waiter waiter;
unsigned long flags;
bool first = false;
+ struct ww_mutex *ww;
int ret;
if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ ww = container_of(lock, struct ww_mutex, base);
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY;
}
@@ -602,12 +603,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
/* got the lock, yay! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx) {
- struct ww_mutex *ww;
- ww = container_of(lock, struct ww_mutex, base);
-
+ if (use_ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx);
- }
preempt_enable();
return 0;
}
@@ -691,10 +688,8 @@ skip_wait:
/* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- if (use_ww_ctx) {
- struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
+ if (use_ww_ctx)
ww_mutex_set_context_slowpath(ww, ww_ctx);
- }
spin_unlock_mutex(&lock->wait_lock, flags);
preempt_enable();