aboutsummaryrefslogtreecommitdiffstats
path: root/lib/locking-selftest.c
diff options
context:
space:
mode:
authorBoqun Feng <boqun.feng@gmail.com>2020-08-07 15:42:37 +0800
committerPeter Zijlstra <peterz@infradead.org>2020-08-26 12:42:07 +0200
commitad56450db86413ff911eb527b5a49e04a4345e61 (patch)
treefed91c9cc78759489711e694c92b7e27c79ae12b /lib/locking-selftest.c
parentRevert "locking/lockdep/selftests: Fix mixed read-write ABBA tests" (diff)
downloadlinux-dev-ad56450db86413ff911eb527b5a49e04a4345e61.tar.xz
linux-dev-ad56450db86413ff911eb527b5a49e04a4345e61.zip
locking/selftest: Add test cases for queued_read_lock()
Add two self test cases for the following case: P0: P1: P2: <in irq handler> spin_lock_irq(&slock) read_lock(&rwlock) write_lock_irq(&rwlock) read_lock(&rwlock) spin_lock(&slock) , which is a deadlock, as the read_lock() on P0 cannot get the lock because of the fairness. P0: P1: P2: <in irq handler> spin_lock(&slock) read_lock(&rwlock) write_lock(&rwlock) read_lock(&rwlock) spin_lock_irq(&slock) , which is not a deadlock, as the read_lock() on P0 can get the lock because it could use the unfair fastpass. Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20200807074238.1632519-19-boqun.feng@gmail.com
Diffstat (limited to 'lib/locking-selftest.c')
-rw-r--r--lib/locking-selftest.c104
1 files changed, 104 insertions, 0 deletions
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c
index 4264cf4b60bb..17f8f6f37165 100644
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
@@ -2201,6 +2201,108 @@ static void ww_tests(void)
pr_cont("\n");
}
+
+/*
+ * <in hardirq handler>
+ * read_lock(&A);
+ * <hardirq disable>
+ * spin_lock(&B);
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock.
+ */
+static void queued_read_lock_hardirq_RE_Er(void)
+{
+ HARDIRQ_ENTER();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * <hardirq disable>
+ * read_lock(&A);
+ * read_lock(&A);
+ * spin_lock(&B);
+ *
+ * is not a deadlock.
+ */
+static void queued_read_lock_hardirq_ER_rE(void)
+{
+ HARDIRQ_ENTER();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ read_lock(&rwlock_A);
+ LOCK(B);
+ UNLOCK(B);
+ read_unlock(&rwlock_A);
+ HARDIRQ_ENABLE();
+}
+
+/*
+ * <hardirq disable>
+ * spin_lock(&B);
+ * read_lock(&A);
+ * <in hardirq handler>
+ * spin_lock(&B);
+ * read_lock(&A);
+ *
+ * is a deadlock. Because the two read_lock()s are both non-recursive readers.
+ */
+static void queued_read_lock_hardirq_inversion(void)
+{
+
+ HARDIRQ_ENTER();
+ LOCK(B);
+ UNLOCK(B);
+ HARDIRQ_EXIT();
+
+ HARDIRQ_DISABLE();
+ LOCK(B);
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+ UNLOCK(B);
+ HARDIRQ_ENABLE();
+
+ read_lock(&rwlock_A);
+ read_unlock(&rwlock_A);
+}
+
+static void queued_read_lock_tests(void)
+{
+ printk(" --------------------------------------------------------------------------\n");
+ printk(" | queued read lock tests |\n");
+ printk(" ---------------------------\n");
+ print_testname("hardirq read-lock/lock-read");
+ dotest(queued_read_lock_hardirq_RE_Er, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq lock-read/read-lock");
+ dotest(queued_read_lock_hardirq_ER_rE, SUCCESS, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+
+ print_testname("hardirq inversion");
+ dotest(queued_read_lock_hardirq_inversion, FAILURE, LOCKTYPE_RWLOCK);
+ pr_cont("\n");
+}
+
void locking_selftest(void)
{
/*
@@ -2318,6 +2420,8 @@ void locking_selftest(void)
/*
* queued_read_lock() specific test cases can be put here
*/
+ if (IS_ENABLED(CONFIG_QUEUED_RWLOCKS))
+ queued_read_lock_tests();
if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n");