summaryrefslogtreecommitdiffstats
path: root/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock
diff options
context:
space:
mode:
authorpatrick <patrick@openbsd.org>2020-08-03 14:33:06 +0000
committerpatrick <patrick@openbsd.org>2020-08-03 14:33:06 +0000
commit061da546b983eb767bad15e67af1174fb0bcf31c (patch)
tree83c78b820819d70aa40c36d90447978b300078c5 /gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock
parentImport LLVM 10.0.0 release including clang, lld and lldb. (diff)
downloadwireguard-openbsd-061da546b983eb767bad15e67af1174fb0bcf31c.tar.xz
wireguard-openbsd-061da546b983eb767bad15e67af1174fb0bcf31c.zip
Import LLVM 10.0.0 release including clang, lld and lldb.
ok hackroom tested by plenty
Diffstat (limited to 'gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock')
-rw-r--r--gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile5
-rw-r--r--gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py57
-rw-r--r--gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp78
3 files changed, 140 insertions, 0 deletions
diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
new file mode 100644
index 00000000000..4b3467bc4e8
--- /dev/null
+++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
@@ -0,0 +1,5 @@
+CXX_SOURCES := locking.cpp
+CXXFLAGS_EXTRAS := -std=c++11
+ENABLE_THREADS := YES
+
+include Makefile.rules
diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
new file mode 100644
index 00000000000..d7d963390b0
--- /dev/null
+++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
@@ -0,0 +1,57 @@
+"""
+Test that expr will time out and allow other threads to run if it blocks.
+"""
+
+from __future__ import print_function
+
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class ExprDoesntDeadlockTestCase(TestBase):
+
+ mydir = TestBase.compute_mydir(__file__)
+
+ @expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17946')
+ @add_test_categories(["basic_process"])
+ def test_with_run_command(self):
+ """Test that expr will time out and allow other threads to run if it blocks."""
+ self.build()
+ exe = self.getBuildArtifact("a.out")
+
+ # Create a target by the debugger.
+ target = self.dbg.CreateTarget(exe)
+ self.assertTrue(target, VALID_TARGET)
+
+ # Now create a breakpoint at source line before call_me_to_get_lock
+ # gets called.
+
+ main_file_spec = lldb.SBFileSpec("locking.cpp")
+ breakpoint = target.BreakpointCreateBySourceRegex(
+ 'Break here', main_file_spec)
+ if self.TraceOn():
+ print("breakpoint:", breakpoint)
+ self.assertTrue(breakpoint and
+ breakpoint.GetNumLocations() == 1,
+ VALID_BREAKPOINT)
+
+ # Now launch the process, and do not stop at entry point.
+ process = target.LaunchSimple(
+ None, None, self.get_process_working_directory())
+ self.assertTrue(process, PROCESS_IS_VALID)
+
+ # Frame #0 should be on self.line1 and the break condition should hold.
+ from lldbsuite.test.lldbutil import get_stopped_thread
+ thread = get_stopped_thread(process, lldb.eStopReasonBreakpoint)
+ self.assertTrue(
+ thread.IsValid(),
+ "There should be a thread stopped due to breakpoint condition")
+
+ frame0 = thread.GetFrameAtIndex(0)
+
+ var = frame0.EvaluateExpression("call_me_to_get_lock(get_int())")
+ self.assertTrue(var.IsValid())
+ self.assertEqual(var.GetValueAsSigned(0), 567)
diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
new file mode 100644
index 00000000000..8288a668fe8
--- /dev/null
+++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
@@ -0,0 +1,78 @@
+#include <stdio.h>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}