aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/locking/mcs_spinlock.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-01-29 12:51:42 +0100
committerIngo Molnar <mingo@kernel.org>2014-03-11 12:14:56 +0100
commitfb0527bd5ea99bfeb2dd91e3c1433ecf745d6b99 (patch)
treeb3ab4c067c035688d4295fdcadf00170465db7df /kernel/locking/mcs_spinlock.h
parentlocking/mutexes: Unlock the mutex without the wait_lock (diff)
downloadwireguard-linux-fb0527bd5ea99bfeb2dd91e3c1433ecf745d6b99.tar.xz
wireguard-linux-fb0527bd5ea99bfeb2dd91e3c1433ecf745d6b99.zip
locking/mutexes: Introduce cancelable MCS lock for adaptive spinning
Since we want a task waiting for a mutex_lock() to go to sleep and reschedule on need_resched() we must be able to abort the mcs_spin_lock() around the adaptive spin. Therefore implement a cancelable mcs lock. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: chegu_vinod@hp.com Cc: paulmck@linux.vnet.ibm.com Cc: Waiman.Long@hp.com Cc: torvalds@linux-foundation.org Cc: tglx@linutronix.de Cc: riel@redhat.com Cc: akpm@linux-foundation.org Cc: davidlohr@hp.com Cc: hpa@zytor.com Cc: andi@firstfloor.org Cc: aswin@hp.com Cc: scott.norton@hp.com Cc: Jason Low <jason.low2@hp.com> Link: http://lkml.kernel.org/n/tip-62hcl5wxydmjzd182zhvk89m@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/mcs_spinlock.h')
-rw-r--r--kernel/locking/mcs_spinlock.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index f2a5c6360083..a2dbac4aca6b 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -111,4 +111,19 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
arch_mcs_spin_unlock_contended(&next->locked);
}
+/*
+ * Cancellable version of the MCS lock above.
+ *
+ * Intended for adaptive spinning of sleeping locks:
+ * mutex_lock()/rwsem_down_{read,write}() etc.
+ */
+
+struct optimistic_spin_queue {
+ struct optimistic_spin_queue *next, *prev;
+ int locked; /* 1 if lock acquired */
+};
+
+extern bool osq_lock(struct optimistic_spin_queue **lock);
+extern void osq_unlock(struct optimistic_spin_queue **lock);
+
#endif /* __LINUX_MCS_SPINLOCK_H */