aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/qspinlock.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-08-01 15:30:28 +0200
committerPeter Zijlstra <peterz@infradead.org>2019-08-06 12:49:16 +0200
commit24a376d65177009a4dd8d846543c5dc69f5c4ced (patch)
tree4b228011e49f27b53e62594649dfa9e5becda37c /arch/x86/include/asm/qspinlock.h
parentlocking/rwsem: Check for operations on an uninitialized rwsem (diff)
downloadlinux-dev-24a376d65177009a4dd8d846543c5dc69f5c4ced.tar.xz
linux-dev-24a376d65177009a4dd8d846543c5dc69f5c4ced.zip
locking/qspinlock,x86: Clarify virt_spin_lock_key
Add a few comments to clarify how this is supposed to work. Reported-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'arch/x86/include/asm/qspinlock.h')
-rw-r--r--arch/x86/include/asm/qspinlock.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
index bd5ac6cc37db..444d6fd9a6d8 100644
--- a/arch/x86/include/asm/qspinlock.h
+++ b/arch/x86/include/asm/qspinlock.h
@@ -63,10 +63,25 @@ static inline bool vcpu_is_preempted(long cpu)
#endif
#ifdef CONFIG_PARAVIRT
+/*
+ * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
+ *
+ * Native (and PV wanting native due to vCPU pinning) should disable this key.
+ * It is done in this backwards fashion to only have a single direction change,
+ * which removes ordering between native_pv_spin_init() and HV setup.
+ */
DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
void native_pv_lock_init(void) __init;
+/*
+ * Shortcut for the queued_spin_lock_slowpath() function that allows
+ * virt to hijack it.
+ *
+ * Returns:
+ * true - lock has been negotiated, all done;
+ * false - queued_spin_lock_slowpath() will do its thing.
+ */
#define virt_spin_lock virt_spin_lock
static inline bool virt_spin_lock(struct qspinlock *lock)
{