diff options
Diffstat (limited to 'gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server')
46 files changed, 8040 insertions, 0 deletions
diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format new file mode 100644 index 00000000000..9b3aa8b7213 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/.clang-format @@ -0,0 +1 @@ +BasedOnStyle: LLVM diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/Makefile new file mode 100644 index 00000000000..0c441eda98b --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/Makefile @@ -0,0 +1,6 @@ +CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS +ENABLE_THREADS := YES +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py new file mode 100644 index 00000000000..5ea55c3f644 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestAppleSimulatorOSType.py @@ -0,0 +1,122 @@ + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +import json + +class TestAppleSimulatorOSType(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def check_simulator_ostype(self, sdk, platform, arch='x86_64'): + sim_devices_str = subprocess.check_output(['xcrun', 'simctl', 'list', + '-j', 'devices']).decode("utf-8") + sim_devices = json.loads(sim_devices_str)['devices'] + # Find an available simulator for the requested platform + deviceUDID = None + for simulator in sim_devices: + if isinstance(simulator,dict): + runtime = simulator['name'] + devices = simulator['devices'] + else: + runtime = simulator + devices = sim_devices[simulator] + if not platform in runtime.lower(): + continue + for device in devices: + if 'availability' in device and device['availability'] != '(available)': + continue + if 'isAvailable' in device and device['isAvailable'] != True: + continue + deviceUDID = device['udid'] + break + if deviceUDID != None: + break + + # Launch the process using simctl + self.assertIsNotNone(deviceUDID) + exe_name = 'test_simulator_platform_{}'.format(platform) + sdkroot = subprocess.check_output(['xcrun', '--show-sdk-path', '--sdk', + sdk]).decode("utf-8") + self.build(dictionary={ 'EXE': exe_name, 'SDKROOT': sdkroot.strip(), + 'ARCH': arch }) + exe_path = self.getBuildArtifact(exe_name) + sim_launcher = subprocess.Popen(['xcrun', 'simctl', 'spawn', '-s', + deviceUDID, exe_path, + 'print-pid', 'sleep:10'], + stderr=subprocess.PIPE) + # Get the PID from the process output + pid = None + while not pid: + stderr = sim_launcher.stderr.readline().decode("utf-8") + if stderr == '': + continue + m = re.match(r"PID: (.*)", stderr) + self.assertIsNotNone(m) + pid = int(m.group(1)) + + # Launch debug monitor attaching to the simulated process + self.init_debugserver_test() + server = self.connect_to_debug_monitor(attach_pid=pid) + + # Setup packet sequences + self.add_no_ack_remote_stream() + self.add_process_info_collection_packets() + self.test_sequence.add_log_lines( + ["read packet: " + + "$jGetLoadedDynamicLibrariesInfos:{\"fetch_all_solibs\" : true}]#ce", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "dylib_info_raw"}}], + True) + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Check that ostype is correct + self.assertEquals(process_info['ostype'], platform) + + # Now for dylibs + dylib_info_raw = context.get("dylib_info_raw") + dylib_info = json.loads(self.decode_gdbremote_binary(dylib_info_raw)) + images = dylib_info['images'] + + image_info = None + for image in images: + if image['pathname'] != exe_path: + continue + image_info = image + break + + self.assertIsNotNone(image_info) + self.assertEquals(image['min_version_os_name'], platform) + + + @apple_simulator_test('iphone') + @debugserver_test + @skipIfRemote + def test_simulator_ostype_ios(self): + self.check_simulator_ostype(sdk='iphonesimulator', + platform='ios') + + @apple_simulator_test('appletv') + @debugserver_test + @skipIfRemote + def test_simulator_ostype_tvos(self): + self.check_simulator_ostype(sdk='appletvsimulator', + platform='tvos') + + @apple_simulator_test('watch') + @debugserver_test + @skipIfRemote + def test_simulator_ostype_watchos(self): + self.check_simulator_ostype(sdk='watchsimulator', + platform='watchos', arch='i386') diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py new file mode 100644 index 00000000000..dbb83d63480 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAttach.py @@ -0,0 +1,67 @@ + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def attach_with_vAttach(self): + # Start the inferior, start the debug monitor, nothing is attached yet. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["sleep:60"]) + self.assertIsNotNone(procs) + + # Make sure the target process has been launched. + inferior = procs.get("inferior") + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + self.assertTrue( + lldbgdbserverutils.process_is_running( + inferior.pid, True)) + + # Add attach packets. + self.test_sequence.add_log_lines([ + # Do the attach. + "read packet: $vAttach;{:x}#00".format(inferior.pid), + # Expect a stop notification from the attach. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$", + "capture": {1: "stop_signal_hex"}}, + ], True) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, inferior.pid) + + @debugserver_test + def test_attach_with_vAttach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() + + @expectedFailureNetBSD + @llgs_test + def test_attach_with_vAttach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach_manually() + self.attach_with_vAttach() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py new file mode 100644 index 00000000000..1a3a2b29365 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteAuxvSupport.py @@ -0,0 +1,227 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read" + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def has_auxv_support(self): + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + # Don't do anything until we match the launched inferior main entry output. + # Then immediately interrupt the process. + # This prevents auxv data being asked for before it's ready and leaves + # us in a stopped state. + self.test_sequence.add_log_lines([ + # Start the inferior... + "read packet: $c#63", + # ... match output.... + {"type": "output_match", "regex": self.maybe_strict_output_regex( + r"message:main entered\r\n")}, + ], True) + # ... then interrupt. + self.add_interrupt_packets() + self.add_qSupported_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + features = self.parse_qSupported_response(context) + return self.AUXV_SUPPORT_FEATURE_NAME in features and features[ + self.AUXV_SUPPORT_FEATURE_NAME] == "+" + + def get_raw_auxv_data(self): + # Start up llgs and inferior, and check for auxv support. + if not self.has_auxv_support(): + self.skipTest("auxv data not supported") + + # Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target. + # Auxv is specified in terms of pairs of unsigned longs. + self.reset_test_sequence() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + proc_info = self.parse_process_info_response(context) + self.assertIsNotNone(proc_info) + self.assertTrue("ptrsize" in proc_info) + word_size = int(proc_info["ptrsize"]) + + OFFSET = 0 + LENGTH = 0x400 + + # Grab the auxv data. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $qXfer:auxv:read::{:x},{:x}:#00".format( + OFFSET, + LENGTH), + { + "direction": "send", + "regex": re.compile( + r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", + re.MULTILINE | re.DOTALL), + "capture": { + 1: "response_type", + 2: "content_raw"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure we end up with all auxv data in one packet. + # FIXME don't assume it all comes back in one packet. + self.assertEqual(context.get("response_type"), "l") + + # Decode binary data. + content_raw = context.get("content_raw") + self.assertIsNotNone(content_raw) + return (word_size, self.decode_gdbremote_binary(content_raw)) + + def supports_auxv(self): + # When non-auxv platforms support llgs, skip the test on platforms + # that don't support auxv. + self.assertTrue(self.has_auxv_support()) + + # + # We skip the "supports_auxv" test on debugserver. The rest of the tests + # appropriately skip the auxv tests if the support flag is not present + # in the qSupported response, so the debugserver test bits are still there + # in case debugserver code one day does have auxv support and thus those + # tests don't get skipped. + # + + @skipIfWindows # no auxv support. + @llgs_test + def test_supports_auxv_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.supports_auxv() + + def auxv_data_is_correct_size(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Ensure auxv data is a multiple of 2*word_size (there should be two + # unsigned long fields per auxv entry). + self.assertEqual(len(auxv_data) % (2 * word_size), 0) + # print("auxv contains {} entries".format(len(auxv_data) / (2*word_size))) + + @debugserver_test + def test_auxv_data_is_correct_size_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + @skipIfWindows + @expectedFailureNetBSD + @llgs_test + def test_auxv_data_is_correct_size_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_data_is_correct_size() + + def auxv_keys_look_valid(self): + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + # Verify keys look reasonable. + for auxv_key in auxv_dict: + self.assertTrue(auxv_key >= 1) + self.assertTrue(auxv_key <= 1000) + # print("auxv dict: {}".format(auxv_dict)) + + @debugserver_test + def test_auxv_keys_look_valid_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + @skipIfWindows + @expectedFailureNetBSD + @llgs_test + def test_auxv_keys_look_valid_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_keys_look_valid() + + def auxv_chunked_reads_work(self): + # Verify that multiple smaller offset,length reads of auxv data + # return the same data as a single larger read. + + # Grab the auxv data with a single large read here. + (word_size, auxv_data) = self.get_raw_auxv_data() + self.assertIsNotNone(auxv_data) + + # Grab endian. + self.reset_test_sequence() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data) + self.assertIsNotNone(auxv_dict) + + iterated_auxv_data = self.read_binary_data_in_chunks( + "qXfer:auxv:read::", 2 * word_size) + self.assertIsNotNone(iterated_auxv_data) + + auxv_dict_iterated = self.build_auxv_dict( + endian, word_size, iterated_auxv_data) + self.assertIsNotNone(auxv_dict_iterated) + + # Verify both types of data collection returned same content. + self.assertEqual(auxv_dict_iterated, auxv_dict) + + @debugserver_test + def test_auxv_chunked_reads_work_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() + + @skipIfWindows + @expectedFailureNetBSD + @llgs_test + def test_auxv_chunked_reads_work_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.auxv_chunked_reads_work() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py new file mode 100644 index 00000000000..24fb0d58b8f --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExitCode.py @@ -0,0 +1,126 @@ + +# lldb test suite imports +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import TestBase + +# gdb-remote-specific imports +import lldbgdbserverutils +from gdbremote_testcase import GdbRemoteTestCaseBase + + +class TestGdbRemoteExitCode(GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + FAILED_LAUNCH_CODE = "E08" + + def get_launch_fail_reason(self): + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $qLaunchSuccess#00"], + True) + self.test_sequence.add_log_lines( + [{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "launch_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + return context.get("launch_result")[1:] + + def start_inferior(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["read packet: %s" % lldbgdbserverutils.build_gdbremote_A_packet( + launch_args)], + True) + self.test_sequence.add_log_lines( + [{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "A_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + launch_result = context.get("A_result") + self.assertIsNotNone(launch_result) + if launch_result == self.FAILED_LAUNCH_CODE: + fail_reason = self.get_launch_fail_reason() + self.fail("failed to launch inferior: " + fail_reason) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_start_inferior_debugserver(self): + self.init_debugserver_test() + self.build() + self.start_inferior() + + @llgs_test + def test_start_inferior_llgs(self): + self.init_llgs_test() + self.build() + self.start_inferior() + + def inferior_exit_0(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_inferior_exit_0_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_exit_0() + + @llgs_test + def test_inferior_exit_0_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_exit_0() + + def inferior_exit_42(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + RETVAL = 42 + + # build launch args + launch_args += ["retval:%d" % RETVAL] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W{0:02x}#00".format(RETVAL)], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_inferior_exit_42_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_exit_42() + + @llgs_test + def test_inferior_exit_42_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_exit_42() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py new file mode 100644 index 00000000000..7d8e28c745c --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteExpeditedRegisters.py @@ -0,0 +1,162 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteExpeditedRegisters( + gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + + def gather_expedited_registers(self): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.test_sequence.add_log_lines([ + # Start up the inferior. + "read packet: $c#63", + # Immediately tell it to stop. We want to see what it reports. + "read packet: {}".format(chr(3)), + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_result", + 2: "key_vals_text"}}, + ], True) + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out expedited registers. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + + expedited_registers = self.extract_registers_from_stop_notification( + key_vals_text) + self.assertIsNotNone(expedited_registers) + + return expedited_registers + + def stop_notification_contains_generic_register( + self, generic_register_name): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + self.assertIsNotNone(expedited_registers) + self.assertTrue(len(expedited_registers) > 0) + + # Gather target register infos. + reg_infos = self.gather_register_infos() + + # Find the generic register. + reg_info = self.find_generic_register_with_name( + reg_infos, generic_register_name) + self.assertIsNotNone(reg_info) + + # Ensure the expedited registers contained it. + self.assertTrue(reg_info["lldb_register_index"] in expedited_registers) + # print("{} reg_info:{}".format(generic_register_name, reg_info)) + + def stop_notification_contains_any_registers(self): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + # Verify we have at least one expedited register. + self.assertTrue(len(expedited_registers) > 0) + + @debugserver_test + def test_stop_notification_contains_any_registers_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + @llgs_test + def test_stop_notification_contains_any_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_any_registers() + + def stop_notification_contains_no_duplicate_registers(self): + # Generate a stop reply, parse out expedited registers from stop + # notification. + expedited_registers = self.gather_expedited_registers() + # Verify no expedited register was specified multiple times. + for (reg_num, value) in list(expedited_registers.items()): + if (isinstance(value, list)) and (len(value) > 0): + self.fail( + "expedited register number {} specified more than once ({} times)".format( + reg_num, len(value))) + + @debugserver_test + def test_stop_notification_contains_no_duplicate_registers_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + @llgs_test + def test_stop_notification_contains_no_duplicate_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_no_duplicate_registers() + + def stop_notification_contains_pc_register(self): + self.stop_notification_contains_generic_register("pc") + + @debugserver_test + def test_stop_notification_contains_pc_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + @llgs_test + def test_stop_notification_contains_pc_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_pc_register() + + # powerpc64 has no FP register + @skipIf(triple='^powerpc64') + def stop_notification_contains_fp_register(self): + self.stop_notification_contains_generic_register("fp") + + @debugserver_test + def test_stop_notification_contains_fp_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + @llgs_test + def test_stop_notification_contains_fp_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_fp_register() + + def stop_notification_contains_sp_register(self): + self.stop_notification_contains_generic_register("sp") + + @debugserver_test + def test_stop_notification_contains_sp_register_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() + + @llgs_test + def test_stop_notification_contains_sp_register_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_notification_contains_sp_register() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py new file mode 100644 index 00000000000..832096a0ff5 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteHostInfo.py @@ -0,0 +1,132 @@ +from __future__ import print_function + +# lldb test suite imports +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import TestBase + +# gdb-remote-specific imports +import lldbgdbserverutils +from gdbremote_testcase import GdbRemoteTestCaseBase + + +class TestGdbRemoteHostInfo(GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + KNOWN_HOST_INFO_KEYS = set([ + "arch", + "cputype", + "cpusubtype", + "distribution_id", + "endian", + "hostname", + "ostype", + "os_build", + "os_kernel", + "os_version", + "maccatalyst_version", + "ptrsize", + "triple", + "vendor", + "watchpoint_exceptions_received", + "default_packet_timeout", + ]) + + DARWIN_REQUIRED_HOST_INFO_KEYS = set([ + "cputype", + "cpusubtype", + "endian", + "ostype", + "ptrsize", + "vendor", + "watchpoint_exceptions_received" + ]) + + def add_host_info_collection_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qHostInfo#9b", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "host_info_raw"}}], + True) + + def parse_host_info_response(self, context): + # Ensure we have a host info response. + self.assertIsNotNone(context) + host_info_raw = context.get("host_info_raw") + self.assertIsNotNone(host_info_raw) + + # Pull out key:value; pairs. + host_info_dict = {match.group(1): match.group(2) + for match in re.finditer(r"([^:]+):([^;]+);", + host_info_raw)} + + import pprint + print("\nqHostInfo response:") + pprint.pprint(host_info_dict) + + # Validate keys are known. + for (key, val) in list(host_info_dict.items()): + self.assertTrue(key in self.KNOWN_HOST_INFO_KEYS, + "unknown qHostInfo key: " + key) + self.assertIsNotNone(val) + + # Return the key:val pairs. + return host_info_dict + + def get_qHostInfo_response(self): + # Launch the debug monitor stub, attaching to the inferior. + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + self.add_no_ack_remote_stream() + + # Request qHostInfo and get response + self.add_host_info_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse qHostInfo response. + host_info = self.parse_host_info_response(context) + self.assertIsNotNone(host_info) + self.assertGreater(len(host_info), 0, "qHostInfo should have returned " + "at least one key:val pair.") + return host_info + + def validate_darwin_minimum_host_info_keys(self, host_info_dict): + self.assertIsNotNone(host_info_dict) + missing_keys = [key for key in self.DARWIN_REQUIRED_HOST_INFO_KEYS + if key not in host_info_dict] + self.assertEquals(0, len(missing_keys), + "qHostInfo is missing the following required " + "keys: " + str(missing_keys)) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_qHostInfo_returns_at_least_one_key_val_pair_debugserver(self): + self.init_debugserver_test() + self.build() + self.get_qHostInfo_response() + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @llgs_test + def test_qHostInfo_returns_at_least_one_key_val_pair_llgs(self): + self.init_llgs_test() + self.build() + self.get_qHostInfo_response() + + @skipUnlessDarwin + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_qHostInfo_contains_darwin_required_keys_debugserver(self): + self.init_debugserver_test() + self.build() + host_info_dict = self.get_qHostInfo_response() + self.validate_darwin_minimum_host_info_keys(host_info_dict) + + @skipUnlessDarwin + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @llgs_test + def test_qHostInfo_contains_darwin_required_keys_llgs(self): + self.init_llgs_test() + self.build() + host_info_dict = self.get_qHostInfo_response() + self.validate_darwin_minimum_host_info_keys(host_info_dict) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py new file mode 100644 index 00000000000..48f919aa94b --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteKill.py @@ -0,0 +1,60 @@ + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteKill(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + + def attach_commandline_kill_after_initial_stop(self): + reg_expr = r"^\$[XW][0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}" + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines([ + "read packet: $k#6b", + {"direction": "send", "regex": reg_expr}, + ], True) + + if self.stub_sends_two_stop_notifications_on_kill: + # Add an expectation for a second X result for stubs that send two + # of these. + self.test_sequence.add_log_lines([ + {"direction": "send", "regex": reg_expr}, + ], True) + + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to + # clear. + time.sleep(self._WAIT_TIMEOUT) + + if not lldb.remote_platform: + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not + # running. + self.assertFalse( + lldbgdbserverutils.process_is_running( + procs["inferior"].pid, False)) + + @debugserver_test + def test_attach_commandline_kill_after_initial_stop_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() + + @expectedFailureNetBSD + @llgs_test + def test_attach_commandline_kill_after_initial_stop_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_kill_after_initial_stop() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py new file mode 100644 index 00000000000..5e94dbcf922 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteModuleInfo.py @@ -0,0 +1,43 @@ + +import json +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.support import seven +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteModuleInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def module_info(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + context = self.expect_gdbremote_sequence() + info = self.parse_process_info_response(context) + + self.test_sequence.add_log_lines([ + 'read packet: $jModulesInfo:%s]#00' % json.dumps( + [{"file":lldbutil.append_to_process_working_directory(self, "a.out"), + "triple":seven.unhexlify(info["triple"])}]), + {"direction": "send", + "regex": r'^\$\[{(.*)}\]\]#[0-9A-Fa-f]{2}', + "capture": {1: "spec"}}, + ], True) + + context = self.expect_gdbremote_sequence() + spec = context.get("spec") + self.assertRegexpMatches(spec, '"file_path":".*"') + self.assertRegexpMatches(spec, '"file_offset":\d+') + self.assertRegexpMatches(spec, '"file_size":\d+') + self.assertRegexpMatches(spec, '"triple":"\w*-\w*-.*"') + self.assertRegexpMatches(spec, '"uuid":"[A-Fa-f0-9]+"') + + @llgs_test + def test_module_info(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.module_info() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py new file mode 100644 index 00000000000..bc793a36e99 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteProcessInfo.py @@ -0,0 +1,209 @@ + + + +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def qProcessInfo_returns_running_process(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + pid_text = process_info.get("pid") + self.assertIsNotNone(pid_text) + pid = int(pid_text, base=16) + self.assertNotEqual(0, pid) + + # If possible, verify that the process is running. + self.assertTrue(lldbgdbserverutils.process_is_running(pid, True)) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_returns_running_process_debugserver(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_returns_running_process() + + @llgs_test + def test_qProcessInfo_returns_running_process_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_returns_running_process() + + def attach_commandline_qProcessInfo_reports_correct_pid(self): + procs = self.prep_debug_monitor_and_inferior() + self.assertIsNotNone(procs) + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence(timeout_seconds=self._DEFAULT_TIMEOUT) + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id matches what we expected. + pid_text = process_info.get('pid', None) + self.assertIsNotNone(pid_text) + reported_pid = int(pid_text, base=16) + self.assertEqual(reported_pid, procs["inferior"].pid) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_attach_commandline_qProcessInfo_reports_correct_pid_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + @expectedFailureNetBSD + @llgs_test + def test_attach_commandline_qProcessInfo_reports_correct_pid_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_qProcessInfo_reports_correct_pid() + + def qProcessInfo_reports_valid_endian(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the process id looks reasonable. + endian = process_info.get("endian") + self.assertIsNotNone(endian) + self.assertTrue(endian in ["little", "big", "pdp"]) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_reports_valid_endian_debugserver(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_reports_valid_endian() + + @llgs_test + def test_qProcessInfo_reports_valid_endian_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_reports_valid_endian() + + def qProcessInfo_contains_keys(self, expected_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the expected keys are present and non-None within the process + # info. + missing_key_set = set() + for expected_key in expected_key_set: + if expected_key not in process_info: + missing_key_set.add(expected_key) + + self.assertEqual( + missing_key_set, + set(), + "the listed keys are missing in the qProcessInfo result") + + def qProcessInfo_does_not_contain_keys(self, absent_key_set): + procs = self.prep_debug_monitor_and_inferior() + self.add_process_info_collection_packets() + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info response + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + + # Ensure the unexpected keys are not present + unexpected_key_set = set() + for unexpected_key in absent_key_set: + if unexpected_key in process_info: + unexpected_key_set.add(unexpected_key) + + self.assertEqual( + unexpected_key_set, + set(), + "the listed keys were present but unexpected in qProcessInfo result") + + @skipUnlessDarwin + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_contains_cputype_cpusubtype_debugserver_darwin(self): + self.init_debugserver_test() + self.build() + self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) + + @skipUnlessDarwin + @llgs_test + def test_qProcessInfo_contains_cputype_cpusubtype_llgs_darwin(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype'])) + + @llgs_test + def test_qProcessInfo_contains_triple_ppid_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_contains_keys(set(['triple', 'parent-pid'])) + + @skipUnlessDarwin + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qProcessInfo_does_not_contain_triple_debugserver_darwin(self): + self.init_debugserver_test() + self.build() + # We don't expect to see triple on darwin. If we do, we'll prefer triple + # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup + # for the remote Host and Process. + self.qProcessInfo_does_not_contain_keys(set(['triple'])) + + @skipUnlessDarwin + @llgs_test + def test_qProcessInfo_does_not_contain_triple_llgs_darwin(self): + self.init_llgs_test() + self.build() + # We don't expect to see triple on darwin. If we do, we'll prefer triple + # to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup + # for the remote Host and Process. + self.qProcessInfo_does_not_contain_keys(set(['triple'])) + + @skipIfDarwin + @llgs_test + def test_qProcessInfo_does_not_contain_cputype_cpusubtype_llgs(self): + self.init_llgs_test() + self.build() + self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype'])) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py new file mode 100644 index 00000000000..2543ed6e902 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteRegisterState.py @@ -0,0 +1,128 @@ +from __future__ import print_function + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteRegisterState(gdbremote_testcase.GdbRemoteTestCaseBase): + """Test QSaveRegisterState/QRestoreRegisterState support.""" + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def grp_register_save_restore_works(self, with_suffix): + # Start up the process, use thread suffix, grab main thread id. + inferior_args = ["message:main entered", "sleep:5"] + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + self.add_process_info_collection_packets() + self.add_register_info_collection_packets() + if with_suffix: + self.add_thread_suffix_request_packets() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Pull out the register infos that we think we can bit flip + # successfully. + gpr_reg_infos = [ + reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Gather thread info. + if with_suffix: + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + thread_id = threads[0] + self.assertIsNotNone(thread_id) + # print("Running on thread: 0x{:x}".format(thread_id)) + else: + thread_id = None + + # Save register state. + self.reset_test_sequence() + self.add_QSaveRegisterState_packets(thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + (success, state_id) = self.parse_QSaveRegisterState_response(context) + self.assertTrue(success) + self.assertIsNotNone(state_id) + # print("saved register state id: {}".format(state_id)) + + # Remember initial register values. + initial_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("initial_reg_values: {}".format(initial_reg_values)) + + # Flip gpr register values. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value( + gpr_reg_infos, endian, thread_id=thread_id) + # print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes)) + self.assertTrue(successful_writes > 0) + + flipped_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("flipped_reg_values: {}".format(flipped_reg_values)) + + # Restore register values. + self.reset_test_sequence() + self.add_QRestoreRegisterState_packets(state_id, thread_id) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify registers match initial register values. + final_reg_values = self.read_register_values( + gpr_reg_infos, endian, thread_id=thread_id) + # print("final_reg_values: {}".format(final_reg_values)) + self.assertIsNotNone(final_reg_values) + self.assertEqual(final_reg_values, initial_reg_values) + + @debugserver_test + def test_grp_register_save_restore_works_with_suffix_debugserver(self): + USE_THREAD_SUFFIX = True + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + def test_grp_register_save_restore_works_with_suffix_llgs(self): + USE_THREAD_SUFFIX = True + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @debugserver_test + def test_grp_register_save_restore_works_no_suffix_debugserver(self): + USE_THREAD_SUFFIX = False + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) + + @llgs_test + def test_grp_register_save_restore_works_no_suffix_llgs(self): + USE_THREAD_SUFFIX = False + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.grp_register_save_restore_works(USE_THREAD_SUFFIX) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py new file mode 100644 index 00000000000..c6c750299b1 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteSingleStep.py @@ -0,0 +1,39 @@ + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_single_step_only_steps_one_instruction_with_s_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="s") + + @skipIfWindows # No pty support to test any inferior std -i/e/o + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=["arm"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + def test_single_step_only_steps_one_instruction_with_s_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="s") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py new file mode 100644 index 00000000000..f9bd668a6dd --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemoteThreadsInStopReply.py @@ -0,0 +1,315 @@ + +import json +import re + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class TestGdbRemoteThreadsInStopReply( + gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [ + "read packet: $QListThreadsInStopReply#21", + "send packet: $OK#00", + ] + + def gather_stop_reply_fields(self, post_startup_log_lines, thread_count, + field_names): + # Set up the inferior args. + inferior_args = [] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + if post_startup_log_lines: + self.test_sequence.add_log_lines(post_startup_log_lines, True) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + hw_info = self.parse_hw_info(context) + + # Give threads time to start up, then break. + time.sleep(self._WAIT_TIMEOUT) + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: {}".format( + chr(3)), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, + timeout_seconds=self._WAIT_TIMEOUT) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), thread_count) + + # Run, then stop the process, grab the stop reply content. + self.reset_test_sequence() + self.test_sequence.add_log_lines(["read packet: $c#63", + "read packet: {}".format(chr(3)), + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse the stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + result = dict(); + result["pc_register"] = hw_info["pc_register"] + result["little_endian"] = hw_info["little_endian"] + for key_field in field_names: + result[key_field] = kv_dict.get(key_field) + + return result + + def gather_stop_reply_threads(self, post_startup_log_lines, thread_count): + # Pull out threads from stop response. + stop_reply_threads_text = self.gather_stop_reply_fields( + post_startup_log_lines, thread_count, ["threads"])["threads"] + if stop_reply_threads_text: + return [int(thread_id, 16) + for thread_id in stop_reply_threads_text.split(",")] + else: + return [] + + def gather_stop_reply_pcs(self, post_startup_log_lines, thread_count): + results = self.gather_stop_reply_fields( post_startup_log_lines, + thread_count, ["threads", "thread-pcs"]) + if not results: + return [] + + threads_text = results["threads"] + pcs_text = results["thread-pcs"] + thread_ids = threads_text.split(",") + pcs = pcs_text.split(",") + self.assertTrue(len(thread_ids) == len(pcs)) + + thread_pcs = dict() + for i in range(0, len(pcs)): + thread_pcs[int(thread_ids[i], 16)] = pcs[i] + + result = dict() + result["thread_pcs"] = thread_pcs + result["pc_register"] = results["pc_register"] + result["little_endian"] = results["little_endian"] + return result + + def switch_endian(self, egg): + return "".join(reversed(re.findall("..", egg))) + + def parse_hw_info(self, context): + self.assertIsNotNone(context) + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + reg_info = self.parse_register_info_packets(context) + (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_info) + + hw_info = dict() + hw_info["pc_register"] = pc_lldb_reg_index + hw_info["little_endian"] = (endian == "little") + return hw_info + + def gather_threads_info_pcs(self, pc_register, little_endian): + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $jThreadsInfo#c1", + { + "direction": "send", + "regex": r"^\$(.*)#[0-9a-fA-F]{2}$", + "capture": { + 1: "threads_info"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + threads_info = context.get("threads_info") + register = str(pc_register) + # The jThreadsInfo response is not valid JSON data, so we have to + # clean it up first. + jthreads_info = json.loads(re.sub(r"}]", "}", threads_info)) + thread_pcs = dict() + for thread_info in jthreads_info: + tid = thread_info["tid"] + pc = thread_info["registers"][register] + thread_pcs[tid] = self.switch_endian(pc) if little_endian else pc + + return thread_pcs + + def QListThreadsInStopReply_supported(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_QListThreadsInStopReply_supported_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + @llgs_test + def test_QListThreadsInStopReply_supported_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.QListThreadsInStopReply_supported() + + def stop_reply_reports_multiple_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is + # enabled. + stop_reply_threads = self.gather_stop_reply_threads( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEqual(len(stop_reply_threads), thread_count) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_reports_multiple_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + # In current implementation of llgs on Windows, as a response to '\x03' packet, the debugger + # of the native process will trigger a call to DebugBreakProcess that will create a new thread + # to handle the exception debug event. So one more stop thread will be notified to the + # delegate, e.g. llgs. So tests below to assert the stop threads number will all fail. + @expectedFailureAll(oslist=["windows"]) + @skipIfNetBSD + @llgs_test + def test_stop_reply_reports_multiple_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_multiple_threads(5) + + def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is not + # enabled. + stop_reply_threads = self.gather_stop_reply_threads(None, thread_count) + self.assertEqual(len(stop_reply_threads), 0) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + @expectedFailureAll(oslist=["windows"]) + @skipIfNetBSD + @llgs_test + def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.no_QListThreadsInStopReply_supplies_no_threads(5) + + def stop_reply_reports_correct_threads(self, thread_count): + # Gather threads from stop notification when QThreadsInStopReply is + # enabled. + stop_reply_threads = self.gather_stop_reply_threads( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + self.assertEqual(len(stop_reply_threads), thread_count) + + # Gather threads from q{f,s}ThreadInfo. + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), thread_count) + + # Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads + for tid in threads: + self.assertTrue(tid in stop_reply_threads) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_reports_correct_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + @expectedFailureAll(oslist=["windows"]) + @skipIfNetBSD + @llgs_test + def test_stop_reply_reports_correct_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_reports_correct_threads(5) + + def stop_reply_contains_thread_pcs(self, thread_count): + results = self.gather_stop_reply_pcs( + self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count) + stop_reply_pcs = results["thread_pcs"] + pc_register = results["pc_register"] + little_endian = results["little_endian"] + self.assertEqual(len(stop_reply_pcs), thread_count) + + threads_info_pcs = self.gather_threads_info_pcs(pc_register, + little_endian) + + self.assertEqual(len(threads_info_pcs), thread_count) + for thread_id in stop_reply_pcs: + self.assertTrue(thread_id in threads_info_pcs) + self.assertTrue(int(stop_reply_pcs[thread_id], 16) + == int(threads_info_pcs[thread_id], 16)) + + @expectedFailureAll(oslist=["windows"]) + @skipIfNetBSD + @llgs_test + def test_stop_reply_contains_thread_pcs_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_contains_thread_pcs(5) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @debugserver_test + def test_stop_reply_contains_thread_pcs_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.stop_reply_contains_thread_pcs(5) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py new file mode 100644 index 00000000000..51dd0cb1a3b --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_qThreadStopInfo.py @@ -0,0 +1,203 @@ + + + +import unittest2 +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + THREAD_COUNT = 5 + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + @skipIfDarwinEmbedded # <rdar://problem/27005337> + def gather_stop_replies_via_qThreadStopInfo(self, thread_count): + # Set up the inferior args. + inferior_args = [] + for i in range(thread_count - 1): + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + + # Assumes test_sequence has anything added needed to setup the initial state. + # (Like optionally enabling QThreadsInStopReply.) + self.test_sequence.add_log_lines([ + "read packet: $c#63" + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Give threads time to start up, then break. + time.sleep(self._WAIT_TIMEOUT) + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: {}".format( + chr(3)), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Wait until all threads have started. + threads = self.wait_for_thread_count(thread_count, + timeout_seconds=self._WAIT_TIMEOUT) + self.assertIsNotNone(threads) + + # On Windows, there could be more threads spawned. For example, DebugBreakProcess will + # create a new thread from the debugged process to handle an exception event. So here we + # assert 'GreaterEqual' condition. + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-windows", triple): + self.assertGreaterEqual(len(threads), thread_count) + else: + self.assertEqual(len(threads), thread_count) + + # Grab stop reply for each thread via qThreadStopInfo{tid:hex}. + stop_replies = {} + thread_dicts = {} + for thread in threads: + # Run the qThreadStopInfo command. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $qThreadStopInfo{:x}#00".format(thread), + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "stop_result", + 2: "key_vals_text"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Parse stop reply contents. + key_vals_text = context.get("key_vals_text") + self.assertIsNotNone(key_vals_text) + kv_dict = self.parse_key_val_dict(key_vals_text) + self.assertIsNotNone(kv_dict) + + # Verify there is a thread and that it matches the expected thread + # id. + kv_thread = kv_dict.get("thread") + self.assertIsNotNone(kv_thread) + kv_thread_id = int(kv_thread, 16) + self.assertEqual(kv_thread_id, thread) + + # Grab the stop id reported. + stop_result_text = context.get("stop_result") + self.assertIsNotNone(stop_result_text) + stop_replies[kv_thread_id] = int(stop_result_text, 16) + + # Hang on to the key-val dictionary for the thread. + thread_dicts[kv_thread_id] = kv_dict + + return (stop_replies, thread_dicts) + + def qThreadStopInfo_works_for_multiple_threads(self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + triple = self.dbg.GetSelectedPlatform().GetTriple() + # Consider one more thread created by calling DebugBreakProcess. + if re.match(".*-.*-windows", triple): + self.assertGreaterEqual(len(stop_replies), thread_count) + else: + self.assertEqual(len(stop_replies), thread_count) + + @debugserver_test + def test_qThreadStopInfo_works_for_multiple_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + @llgs_test + @skipIfNetBSD + def test_qThreadStopInfo_works_for_multiple_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT) + + def qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self, thread_count): + (stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(stop_replies) + + no_stop_reason_count = sum( + 1 for stop_reason in list( + stop_replies.values()) if stop_reason == 0) + with_stop_reason_count = sum( + 1 for stop_reason in list( + stop_replies.values()) if stop_reason != 0) + + # All but one thread should report no stop reason. + triple = self.dbg.GetSelectedPlatform().GetTriple() + + # Consider one more thread created by calling DebugBreakProcess. + if re.match(".*-.*-windows", triple): + self.assertGreaterEqual(no_stop_reason_count, thread_count - 1) + else: + self.assertEqual(no_stop_reason_count, thread_count - 1) + + # Only one thread should should indicate a stop reason. + self.assertEqual(with_stop_reason_count, 1) + + @debugserver_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self.THREAD_COUNT) + + @expectedFailureNetBSD + @llgs_test + def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt( + self.THREAD_COUNT) + + def qThreadStopInfo_has_valid_thread_names( + self, thread_count, expected_thread_name): + (_, thread_dicts) = self.gather_stop_replies_via_qThreadStopInfo(thread_count) + self.assertIsNotNone(thread_dicts) + + for thread_dict in list(thread_dicts.values()): + name = thread_dict.get("name") + self.assertIsNotNone(name) + self.assertEqual(name, expected_thread_name) + + @unittest2.skip("MacOSX doesn't have a default thread name") + @debugserver_test + def test_qThreadStopInfo_has_valid_thread_names_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") + + # test requires OS with set, equal thread names by default. + # Windows thread does not have name property, equal names as the process's by default. + @skipUnlessPlatform(["linux", "windows"]) + @llgs_test + def test_qThreadStopInfo_has_valid_thread_names_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py new file mode 100644 index 00000000000..3511b014d56 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vCont.py @@ -0,0 +1,156 @@ + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemote_vCont(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def vCont_supports_mode(self, mode, inferior_args=None): + # Setup the stub and set the gdb remote command stream. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + self.add_vCont_query_packets() + + # Run the gdb remote command stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Pull out supported modes. + supported_vCont_modes = self.parse_vCont_query_response(context) + self.assertIsNotNone(supported_vCont_modes) + + # Verify we support the given mode. + self.assertTrue(mode in supported_vCont_modes) + + def vCont_supports_c(self): + self.vCont_supports_mode("c") + + def vCont_supports_C(self): + self.vCont_supports_mode("C") + + def vCont_supports_s(self): + self.vCont_supports_mode("s") + + def vCont_supports_S(self): + self.vCont_supports_mode("S") + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_c_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_c() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_c_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_c() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_C_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_C() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_C_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_C() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_s_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_s() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_s_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_s() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_vCont_supports_S_debugserver(self): + self.init_debugserver_test() + self.build() + self.vCont_supports_S() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @llgs_test + def test_vCont_supports_S_llgs(self): + self.init_llgs_test() + self.build() + self.vCont_supports_S() + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="vCont;s") + + @skipIfWindows # No pty support to test O* & I* notification packets. + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=["arm"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=True, step_instruction="vCont;s") + + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + @debugserver_test + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=False, step_instruction="vCont;s:{thread}") + + @skipIfWindows # No pty support to test O* & I* notification packets. + @llgs_test + @expectedFailureAndroid( + bugnumber="llvm.org/pr24739", + archs=[ + "arm", + "aarch64"]) + @expectedFailureAll( + oslist=["linux"], + archs=["arm"], + bugnumber="llvm.org/pr24739") + @skipIf(triple='^mips') + @expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337") + def test_single_step_only_steps_one_instruction_with_vCont_s_thread_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.single_step_only_steps_one_instruction( + use_Hc_packet=False, step_instruction="vCont;s:{thread}") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vContThreads.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vContThreads.py new file mode 100644 index 00000000000..e16a28a335a --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestGdbRemote_vContThreads.py @@ -0,0 +1,148 @@ + +import json +import re + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class TestGdbRemote_vContThreads(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + def start_threads(self, num): + procs = self.prep_debug_monitor_and_inferior( + inferior_args=['thread:new'] * num + ['@started']) + # start the process and wait for output + self.test_sequence.add_log_lines([ + "read packet: $c#63", + {"type": "output_match", "regex": self.maybe_strict_output_regex( + r"@started\r\n")}, + ], True) + # then interrupt it + self.add_interrupt_packets() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + self.assertEqual(len(threads), num + 1) + + self.reset_test_sequence() + return threads + + def signal_one_thread(self): + threads = self.start_threads(1) + # try sending a signal to one of the two threads + self.test_sequence.add_log_lines([ + "read packet: $vCont;C{0:x}:{1:x};c#00".format( + lldbutil.get_signal_number('SIGUSR1'), threads[0]), + {"direction": "send", "regex": r"^\$W00#b7$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipUnlessPlatform(["netbsd"]) + @debugserver_test + def test_signal_one_thread_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.signal_one_thread() + + @skipUnlessPlatform(["netbsd"]) + @llgs_test + def test_signal_one_thread_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.signal_one_thread() + + def signal_all_threads(self): + threads = self.start_threads(1) + # try sending a signal to two threads (= the process) + self.test_sequence.add_log_lines([ + "read packet: $vCont;C{0:x}:{1:x};C{0:x}:{2:x}#00".format( + lldbutil.get_signal_number('SIGUSR1'), + threads[0], threads[1]), + {"direction": "send", "regex": r"^\$W00#b7$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipUnlessPlatform(["netbsd"]) + @debugserver_test + def test_signal_all_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.signal_all_threads() + + @skipUnlessPlatform(["netbsd"]) + @llgs_test + def test_signal_all_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.signal_all_threads() + + def signal_two_of_three_threads(self): + threads = self.start_threads(2) + # try sending a signal to 2 out of 3 threads + self.test_sequence.add_log_lines([ + "read packet: $vCont;C{0:x}:{1:x};C{0:x}:{2:x};c#00".format( + lldbutil.get_signal_number('SIGUSR1'), + threads[1], threads[2]), + {"direction": "send", "regex": r"^\$E1e#db$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipUnlessPlatform(["netbsd"]) + @debugserver_test + def test_signal_two_of_three_threads_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.signal_two_of_three_threads() + + @skipUnlessPlatform(["netbsd"]) + @llgs_test + def test_signal_two_of_three_threads_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.signal_two_of_three_threads() + + def signal_two_signals(self): + threads = self.start_threads(1) + # try sending two different signals to two threads + self.test_sequence.add_log_lines([ + "read packet: $vCont;C{0:x}:{1:x};C{2:x}:{3:x}#00".format( + lldbutil.get_signal_number('SIGUSR1'), threads[0], + lldbutil.get_signal_number('SIGUSR2'), threads[1]), + {"direction": "send", "regex": r"^\$E1e#db$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @skipUnlessPlatform(["netbsd"]) + @debugserver_test + def test_signal_two_signals_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.signal_two_signals() + + @skipUnlessPlatform(["netbsd"]) + @llgs_test + def test_signal_two_signals_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.signal_two_signals() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py new file mode 100644 index 00000000000..2b7f28a3aef --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/TestLldbGdbServer.py @@ -0,0 +1,1590 @@ +""" +Test case for testing the gdbremote protocol. + +Tests run against debugserver and lldb-server (llgs). +lldb-server tests run where the lldb-server exe is +available. + +This class will be broken into smaller test case classes by +gdb remote packet functional areas. For now it contains +the initial set of tests implemented. +""" + +from __future__ import division, print_function + + +import unittest2 +import gdbremote_testcase +import lldbgdbserverutils +from lldbsuite.support import seven +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test.lldbdwarf import * +from lldbsuite.test import lldbutil + + +class LldbGdbServerTestCase(gdbremote_testcase.GdbRemoteTestCaseBase, DwarfOpcodeParser): + + mydir = TestBase.compute_mydir(__file__) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_exe_starts_debugserver(self): + self.init_debugserver_test() + server = self.connect_to_debug_monitor() + + @llgs_test + def test_exe_starts_llgs(self): + self.init_llgs_test() + server = self.connect_to_debug_monitor() + + def start_no_ack_mode(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_start_no_ack_mode_debugserver(self): + self.init_debugserver_test() + self.start_no_ack_mode() + + @llgs_test + def test_start_no_ack_mode_llgs(self): + self.init_llgs_test() + self.start_no_ack_mode() + + def thread_suffix_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-server < 26> read packet: $QThreadSuffixSupported#e4", + "lldb-server < 6> send packet: $OK#9a"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_thread_suffix_supported_debugserver(self): + self.init_debugserver_test() + self.thread_suffix_supported() + + @llgs_test + def test_thread_suffix_supported_llgs(self): + self.init_llgs_test() + self.thread_suffix_supported() + + def list_threads_in_stop_reply_supported(self): + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.test_sequence.add_log_lines( + ["lldb-server < 27> read packet: $QListThreadsInStopReply#21", + "lldb-server < 6> send packet: $OK#9a"], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_list_threads_in_stop_reply_supported_debugserver(self): + self.init_debugserver_test() + self.list_threads_in_stop_reply_supported() + + @llgs_test + def test_list_threads_in_stop_reply_supported_llgs(self): + self.init_llgs_test() + self.list_threads_in_stop_reply_supported() + + def c_packet_works(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $c#63", + "send packet: $W00#00"], + True) + + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_c_packet_works_debugserver(self): + self.init_debugserver_test() + self.build() + self.c_packet_works() + + @llgs_test + def test_c_packet_works_llgs(self): + self.init_llgs_test() + self.build() + self.c_packet_works() + + def inferior_print_exit(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args += ["hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"hello, world\r\n")}, + "send packet: $W00#00"], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_inferior_print_exit_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_print_exit() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_inferior_print_exit_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_print_exit() + + def first_launch_stop_reply_thread_matches_first_qC(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # build launch args + launch_args += ["hello, world"] + + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines(["read packet: $qC#00", + {"direction": "send", + "regex": r"^\$QC([0-9a-fA-F]+)#", + "capture": {1: "thread_id"}}, + "read packet: $?#00", + {"direction": "send", + "regex": r"^\$T[0-9a-fA-F]{2}thread:([0-9a-fA-F]+)", + "expect_captures": {1: "thread_id"}}], + True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_first_launch_stop_reply_thread_matches_first_qC_debugserver(self): + self.init_debugserver_test() + self.build() + self.first_launch_stop_reply_thread_matches_first_qC() + + @llgs_test + def test_first_launch_stop_reply_thread_matches_first_qC_llgs(self): + self.init_llgs_test() + self.build() + self.first_launch_stop_reply_thread_matches_first_qC() + + def attach_commandline_continue_app_exits(self): + procs = self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W00#00"], + True) + self.expect_gdbremote_sequence() + + # Wait a moment for completed and now-detached inferior process to + # clear. + time.sleep(1) + + if not lldb.remote_platform: + # Process should be dead now. Reap results. + poll_result = procs["inferior"].poll() + self.assertIsNotNone(poll_result) + + # Where possible, verify at the system level that the process is not + # running. + self.assertFalse( + lldbgdbserverutils.process_is_running( + procs["inferior"].pid, False)) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_attach_commandline_continue_app_exits_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + @expectedFailureNetBSD + @llgs_test + def test_attach_commandline_continue_app_exits_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.attach_commandline_continue_app_exits() + + def qRegisterInfo_returns_one_valid_result(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.test_sequence.add_log_lines( + ["read packet: $qRegisterInfo0#00", + {"direction": "send", "regex": r"^\$(.+);#[0-9A-Fa-f]{2}", "capture": {1: "reginfo_0"}}], + True) + + # Run the stream + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_info_packet = context.get("reginfo_0") + self.assertIsNotNone(reg_info_packet) + self.assert_valid_reg_info( + lldbgdbserverutils.parse_reg_info_response(reg_info_packet)) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_returns_one_valid_result_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_returns_one_valid_result() + + @llgs_test + def test_qRegisterInfo_returns_one_valid_result_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_returns_one_valid_result() + + def qRegisterInfo_returns_all_valid_results(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream. + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Validate that each register info returned validates. + for reg_info in self.parse_register_info_packets(context): + self.assert_valid_reg_info(reg_info) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_returns_all_valid_results_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_returns_all_valid_results() + + @llgs_test + def test_qRegisterInfo_returns_all_valid_results_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_returns_all_valid_results() + + def qRegisterInfo_contains_required_generics(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generic registers found. + generic_regs = { + reg_info['generic']: 1 for reg_info in reg_infos if 'generic' in reg_info} + + # Ensure we have a program counter register. + self.assertTrue('pc' in generic_regs) + + # Ensure we have a frame pointer register. PPC64le's FP is the same as SP + if self.getArchitecture() != 'powerpc64le': + self.assertTrue('fp' in generic_regs) + + # Ensure we have a stack pointer register. + self.assertTrue('sp' in generic_regs) + + # Ensure we have a flags register. + self.assertTrue('flags' in generic_regs) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_contains_required_generics_debugserver(self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_contains_required_generics() + + @llgs_test + def test_qRegisterInfo_contains_required_generics_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_required_generics() + + def qRegisterInfo_contains_at_least_one_register_set(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all register sets found. + register_sets = { + reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info} + self.assertTrue(len(register_sets) >= 1) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qRegisterInfo_contains_at_least_one_register_set_debugserver( + self): + self.init_debugserver_test() + self.build() + self.qRegisterInfo_contains_at_least_one_register_set() + + @llgs_test + def test_qRegisterInfo_contains_at_least_one_register_set_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_at_least_one_register_set() + + def targetHasAVX(self): + triple = self.dbg.GetSelectedPlatform().GetTriple() + + # TODO other platforms, please implement this function + if not re.match(".*-.*-linux", triple): + return True + + # Need to do something different for non-Linux/Android targets + if lldb.remote_platform: + self.runCmd('platform get-file "/proc/cpuinfo" "cpuinfo"') + cpuinfo_path = "cpuinfo" + self.addTearDownHook(lambda: os.unlink("cpuinfo")) + else: + cpuinfo_path = "/proc/cpuinfo" + + f = open(cpuinfo_path, 'r') + cpuinfo = f.read() + f.close() + return " avx " in cpuinfo + + def qRegisterInfo_contains_avx_registers(self): + launch_args = self.install_and_create_launch_args() + + server = self.connect_to_debug_monitor() + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + self.add_verified_launch_packets(launch_args) + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + + # Collect all generics found. + register_sets = { + reg_info['set']: 1 for reg_info in reg_infos if 'set' in reg_info} + self.assertEqual( + self.targetHasAVX(), + "Advanced Vector Extensions" in register_sets) + + @expectedFailureAll(oslist=["windows"]) # no avx for now. + @expectedFailureNetBSD + @llgs_test + def test_qRegisterInfo_contains_avx_registers_llgs(self): + self.init_llgs_test() + self.build() + self.qRegisterInfo_contains_avx_registers() + + def qThreadInfo_contains_thread(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_threadinfo_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread. + self.assertEqual(len(threads), 1) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_contains_thread_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @llgs_test + def test_qThreadInfo_contains_thread_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_contains_thread() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_contains_thread_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + @expectedFailureAll(oslist=["windows"]) # expect one more thread stopped + @expectedFailureNetBSD + @llgs_test + def test_qThreadInfo_contains_thread_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_contains_thread() + + def qThreadInfo_matches_qC(self): + procs = self.prep_debug_monitor_and_inferior() + + self.add_threadinfo_collection_packets() + self.test_sequence.add_log_lines( + ["read packet: $qC#00", + {"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}} + ], True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather threadinfo entries. + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + # We should have exactly one thread from threadinfo. + self.assertEqual(len(threads), 1) + + # We should have a valid thread_id from $QC. + QC_thread_id_hex = context.get("thread_id") + self.assertIsNotNone(QC_thread_id_hex) + QC_thread_id = int(QC_thread_id_hex, 16) + + # Those two should be the same. + self.assertEqual(threads[0], QC_thread_id) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_matches_qC_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @llgs_test + def test_qThreadInfo_matches_qC_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qThreadInfo_matches_qC() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qThreadInfo_matches_qC_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + @expectedFailureAll(oslist=["windows"]) # expect one more thread stopped + @expectedFailureNetBSD + @llgs_test + def test_qThreadInfo_matches_qC_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.qThreadInfo_matches_qC() + + def p_returns_correct_data_size_for_each_qRegisterInfo(self): + procs = self.prep_debug_monitor_and_inferior() + self.add_register_info_collection_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.assertTrue(len(reg_infos) > 0) + + byte_order = self.get_target_byte_order() + + # Read value for each register. + reg_index = 0 + for reg_info in reg_infos: + # Skip registers that don't have a register set. For x86, these are + # the DRx registers, which have no LLDB-kind register number and thus + # cannot be read via normal + # NativeRegisterContext::ReadRegister(reg_info,...) calls. + if not "set" in reg_info: + continue + + # Clear existing packet expectations. + self.reset_test_sequence() + + # Run the register query + self.test_sequence.add_log_lines( + ["read packet: $p{0:x}#00".format(reg_index), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + + if "dynamic_size_dwarf_expr_bytes" in reg_info: + self.updateRegInfoBitsize(reg_info, byte_order) + self.assertEqual(len(p_response), 2 * int(reg_info["bitsize"]) / 8) + + # Increment loop + reg_index += 1 + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @expectedFailureNetBSD + @llgs_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_launch_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + @expectedFailureNetBSD + @llgs_test + def test_p_returns_correct_data_size_for_each_qRegisterInfo_attach_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.p_returns_correct_data_size_for_each_qRegisterInfo() + + def Hg_switches_to_3_threads(self): + # Startup the inferior with three threads (main + 2 new ones). + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["thread:new", "thread:new"]) + + # Let the inferior process have a few moments to start up the thread + # when launched. (The launch scenario has no time to run, so threads + # won't be there yet.) + self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=self._WAIT_TIMEOUT) + self.assertEqual(len(threads), 3) + + # verify we can $H to each thead, and $qC matches the thread we set. + for thread in threads: + # Change to each thread, verify current thread id. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $Hg{0:x}#00".format(thread), # Set current thread. + "send packet: $OK#00", + "read packet: $qC#00", + {"direction": "send", "regex": r"^\$QC([0-9a-fA-F]+)#", "capture": {1: "thread_id"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the thread id. + self.assertIsNotNone(context.get("thread_id")) + self.assertEqual(int(context.get("thread_id"), 16), thread) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_Hg_switches_to_3_threads_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @expectedFailureAll(oslist=["windows"]) # expect 4 threads + @llgs_test + def test_Hg_switches_to_3_threads_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.Hg_switches_to_3_threads() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_Hg_switches_to_3_threads_attach_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + @expectedFailureAll(oslist=["windows"]) # expecting one more thread + @expectedFailureNetBSD + @llgs_test + def test_Hg_switches_to_3_threads_attach_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_attach() + self.Hg_switches_to_3_threads() + + def Hc_then_Csignal_signals_correct_thread(self, segfault_signo): + # NOTE only run this one in inferior-launched mode: we can't grab inferior stdout when running attached, + # and the test requires getting stdout from the exe. + + NUM_THREADS = 3 + + # Startup the inferior with three threads (main + NUM_THREADS-1 worker threads). + # inferior_args=["thread:print-ids"] + inferior_args = ["thread:segfault"] + for i in range(NUM_THREADS - 1): + # if i > 0: + # Give time between thread creation/segfaulting for the handler to work. + # inferior_args.append("sleep:1") + inferior_args.append("thread:new") + inferior_args.append("sleep:10") + + # Launch/attach. (In our case, this should only ever be launched since + # we need inferior stdout/stderr). + procs = self.prep_debug_monitor_and_inferior( + inferior_args=inferior_args) + self.test_sequence.add_log_lines(["read packet: $c#63"], True) + context = self.expect_gdbremote_sequence() + + # Let the inferior process have a few moments to start up the thread when launched. + # context = self.run_process_then_stop(run_seconds=1) + + # Wait at most x seconds for all threads to be present. + # threads = self.wait_for_thread_count(NUM_THREADS, timeout_seconds=5) + # self.assertEquals(len(threads), NUM_THREADS) + + signaled_tids = {} + print_thread_ids = {} + + # Switch to each thread, deliver a signal, and verify signal delivery + for i in range(NUM_THREADS - 1): + # Run until SIGSEGV comes in. + self.reset_test_sequence() + self.test_sequence.add_log_lines([{"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "signo", + 2: "thread_id"}}], + True) + + context = self.expect_gdbremote_sequence(timeout_seconds=self._DEFAULT_TIMEOUT) + self.assertIsNotNone(context) + signo = context.get("signo") + self.assertEqual(int(signo, 16), segfault_signo) + + # Ensure we haven't seen this tid yet. + thread_id = int(context.get("thread_id"), 16) + self.assertFalse(thread_id in signaled_tids) + signaled_tids[thread_id] = 1 + + # Send SIGUSR1 to the thread that signaled the SIGSEGV. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Set the continue thread. + # Set current thread. + "read packet: $Hc{0:x}#00".format(thread_id), + "send packet: $OK#00", + + # Continue sending the signal number to the continue thread. + # The commented out packet is a way to do this same operation without using + # a $Hc (but this test is testing $Hc, so we'll stick with the former). + "read packet: $C{0:x}#00".format(lldbutil.get_signal_number('SIGUSR1')), + # "read packet: $vCont;C{0:x}:{1:x};c#00".format(lldbutil.get_signal_number('SIGUSR1'), thread_id), + + # FIXME: Linux does not report the thread stop on the delivered signal (SIGUSR1 here). MacOSX debugserver does. + # But MacOSX debugserver isn't guaranteeing the thread the signal handler runs on, so currently its an XFAIL. + # Need to rectify behavior here. The linux behavior is more intuitive to me since we're essentially swapping out + # an about-to-be-delivered signal (for which we already sent a stop packet) to a different signal. + # {"direction":"send", "regex":r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture":{1:"stop_signo", 2:"stop_thread_id"} }, + # "read packet: $c#63", + {"type": "output_match", "regex": r"^received SIGUSR1 on thread id: ([0-9a-fA-F]+)\r\nthread ([0-9a-fA-F]+): past SIGSEGV\r\n", "capture": {1: "print_thread_id", 2: "post_handle_thread_id"}}, + ], + True) + + # Run the sequence. + context = self.expect_gdbremote_sequence( + timeout_seconds=self._DEFAULT_TIMEOUT) + self.assertIsNotNone(context) + + # Ensure the stop signal is the signal we delivered. + # stop_signo = context.get("stop_signo") + # self.assertIsNotNone(stop_signo) + # self.assertEquals(int(stop_signo,16), lldbutil.get_signal_number('SIGUSR1')) + + # Ensure the stop thread is the thread to which we delivered the signal. + # stop_thread_id = context.get("stop_thread_id") + # self.assertIsNotNone(stop_thread_id) + # self.assertEquals(int(stop_thread_id,16), thread_id) + + # Ensure we haven't seen this thread id yet. The inferior's + # self-obtained thread ids are not guaranteed to match the stub + # tids (at least on MacOSX). + print_thread_id = context.get("print_thread_id") + self.assertIsNotNone(print_thread_id) + print_thread_id = int(print_thread_id, 16) + self.assertFalse(print_thread_id in print_thread_ids) + + # Now remember this print (i.e. inferior-reflected) thread id and + # ensure we don't hit it again. + print_thread_ids[print_thread_id] = 1 + + # Ensure post signal-handle thread id matches the thread that + # initially raised the SIGSEGV. + post_handle_thread_id = context.get("post_handle_thread_id") + self.assertIsNotNone(post_handle_thread_id) + post_handle_thread_id = int(post_handle_thread_id, 16) + self.assertEqual(post_handle_thread_id, print_thread_id) + + @unittest2.expectedFailure() + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_Hc_then_Csignal_signals_correct_thread_launch_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + # Darwin debugserver translates some signals like SIGSEGV into some gdb + # expectations about fixed signal numbers. + self.Hc_then_Csignal_signals_correct_thread(self.TARGET_EXC_BAD_ACCESS) + + @skipIfWindows # no SIGSEGV support + @expectedFailureNetBSD + @llgs_test + def test_Hc_then_Csignal_signals_correct_thread_launch_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.Hc_then_Csignal_signals_correct_thread( + lldbutil.get_signal_number('SIGSEGV')) + + def m_packet_reads_memory(self): + # This is the memory we will write into the inferior and then ensure we + # can read back with $m. + MEMORY_CONTENTS = "Test contents 0123456789 ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz" + + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "set-message:%s" % + MEMORY_CONTENTS, + "get-data-address-hex:g_message", + "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "message_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Grab contents from the inferior. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(message_address, len(MEMORY_CONTENTS)), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "read_contents"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + self.assertIsNotNone(context.get("read_contents")) + read_contents = seven.unhexlify(context.get("read_contents")) + self.assertEqual(read_contents, MEMORY_CONTENTS) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_m_packet_reads_memory_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + def test_m_packet_reads_memory_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.m_packet_reads_memory() + + def qMemoryRegionInfo_is_supported(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior() + + # Ask if it supports $qMemoryRegionInfo. + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo#00", + "send packet: $OK#00" + ], True) + self.expect_gdbremote_sequence() + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_is_supported_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + @llgs_test + def test_qMemoryRegionInfo_is_supported_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_is_supported() + + def qMemoryRegionInfo_reports_code_address_as_executable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-code-address-hex:hello", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "code_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the code address. + self.assertIsNotNone(context.get("code_address")) + code_address = int(context.get("code_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(code_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure code address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("x" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(code_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_code_address_as_executable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + def test_qMemoryRegionInfo_reports_code_address_as_executable_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_code_address_as_executable() + + def qMemoryRegionInfo_reports_stack_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-stack-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"stack address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "stack_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("stack_address")) + stack_address = int(context.get("stack_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(stack_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region( + stack_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + def test_qMemoryRegionInfo_reports_stack_address_as_readable_writeable_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_stack_address_as_readable_writeable() + + def qMemoryRegionInfo_reports_heap_address_as_readable_writeable(self): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["get-heap-address-hex:", "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"heap address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "heap_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the address. + self.assertIsNotNone(context.get("heap_address")) + heap_address = int(context.get("heap_address"), 16) + + # Grab memory region info from the inferior. + self.reset_test_sequence() + self.add_query_memory_region_packets(heap_address) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + mem_region_dict = self.parse_memory_region_packet(context) + + # Ensure there are no errors reported. + self.assertFalse("error" in mem_region_dict) + + # Ensure address is readable and executable. + self.assertTrue("permissions" in mem_region_dict) + self.assertTrue("r" in mem_region_dict["permissions"]) + self.assertTrue("w" in mem_region_dict["permissions"]) + + # Ensure the start address and size encompass the address we queried. + self.assert_address_within_memory_region(heap_address, mem_region_dict) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_debugserver( + self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + def test_qMemoryRegionInfo_reports_heap_address_as_readable_writeable_llgs( + self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qMemoryRegionInfo_reports_heap_address_as_readable_writeable() + + def breakpoint_set_and_remove_work(self, want_hardware=False): + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "get-code-address-hex:hello", + "sleep:1", + "call-function:hello"]) + + # Run the process + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + self.test_sequence.add_log_lines( + [ # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"code address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "function_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather process info - we need endian of target to handle register + # value conversions. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Gather register info entries. + reg_infos = self.parse_register_info_packets(context) + (pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_infos) + self.assertIsNotNone(pc_lldb_reg_index) + self.assertIsNotNone(pc_reg_info) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Get current target architecture + target_arch = self.getArchitecture() + + # Set the breakpoint. + if (target_arch == "arm") or (target_arch == "aarch64"): + # TODO: Handle case when setting breakpoint in thumb code + BREAKPOINT_KIND = 4 + else: + BREAKPOINT_KIND = 1 + + # Set default packet type to Z0 (software breakpoint) + z_packet_type = 0 + + # If hardware breakpoint is requested set packet type to Z1 + if want_hardware == True: + z_packet_type = 1 + + self.reset_test_sequence() + self.add_set_breakpoint_packets( + function_address, + z_packet_type, + do_continue=True, + breakpoint_kind=BREAKPOINT_KIND) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the stop signal reported was the breakpoint signal number. + stop_signo = context.get("stop_signo") + self.assertIsNotNone(stop_signo) + self.assertEqual(int(stop_signo, 16), + lldbutil.get_signal_number('SIGTRAP')) + + # Ensure we did not receive any output. If the breakpoint was not set, we would + # see output (from a launched process with captured stdio) printing a hello, world message. + # That would indicate the breakpoint didn't take. + self.assertEqual(len(context["O_content"]), 0) + + # Verify that the PC for the main thread is where we expect it - right at the breakpoint address. + # This acts as a another validation on the register reading code. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + # Print the PC. This should match the breakpoint address. + "read packet: $p{0:x}#00".format(pc_lldb_reg_index), + # Capture $p results. + {"direction": "send", + "regex": r"^\$([0-9a-fA-F]+)#", + "capture": {1: "p_response"}}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the PC is where we expect. Note response is in endianness of + # the inferior. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + + # Convert from target endian to int. + returned_pc = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + self.assertEqual(returned_pc, function_address) + + # Verify that a breakpoint remove and continue gets us the expected + # output. + self.reset_test_sequence() + + # Add breakpoint remove packets + self.add_remove_breakpoint_packets( + function_address, + z_packet_type, + breakpoint_kind=BREAKPOINT_KIND) + + self.test_sequence.add_log_lines( + [ + # Continue running. + "read packet: $c#63", + # We should now receive the output from the call. + {"type": "output_match", "regex": r"^hello, world\r\n$"}, + # And wait for program completion. + {"direction": "send", "regex": r"^\$W00(.*)#[0-9a-fA-F]{2}$"}, + ], True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_software_breakpoint_set_and_remove_work_debugserver(self): + self.init_debugserver_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=False) + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_software_breakpoint_set_and_remove_work_llgs(self): + self.init_llgs_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=False) + + @debugserver_test + @skipUnlessPlatform(oslist=['linux']) + @expectedFailureAndroid + @skipIf(archs=no_match(['arm', 'aarch64'])) + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_hardware_breakpoint_set_and_remove_work_debugserver(self): + self.init_debugserver_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=True) + + @llgs_test + @skipUnlessPlatform(oslist=['linux']) + @skipIf(archs=no_match(['arm', 'aarch64'])) + def test_hardware_breakpoint_set_and_remove_work_llgs(self): + self.init_llgs_test() + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + self.build(dictionary={'CFLAGS_EXTRAS': '-marm'}) + else: + self.build() + self.set_inferior_startup_launch() + self.breakpoint_set_and_remove_work(want_hardware=True) + + def qSupported_returns_known_stub_features(self): + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior() + self.add_qSupported_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Retrieve the qSupported features. + supported_dict = self.parse_qSupported_response(context) + self.assertIsNotNone(supported_dict) + self.assertTrue(len(supported_dict) > 0) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_qSupported_returns_known_stub_features_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + @llgs_test + def test_qSupported_returns_known_stub_features_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.qSupported_returns_known_stub_features() + + def written_M_content_reads_back_correctly(self): + TEST_MESSAGE = "Hello, memory" + + # Start up the stub and start/prep the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "set-message:xxxxxxxxxxxxxX", + "get-data-address-hex:g_message", + "sleep:1", + "print-message:"]) + self.test_sequence.add_log_lines( + [ + # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the message buffer within the inferior. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": self.maybe_strict_output_regex(r"data address: 0x([0-9a-fA-F]+)\r\n"), + "capture": {1: "message_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the message address. + self.assertIsNotNone(context.get("message_address")) + message_address = int(context.get("message_address"), 16) + + # Hex-encode the test message, adding null termination. + hex_encoded_message = seven.hexlify(TEST_MESSAGE) + + # Write the message to the inferior. Verify that we can read it with the hex-encoded (m) + # and binary (x) memory read packets. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $M{0:x},{1:x}:{2}#00".format(message_address, len(TEST_MESSAGE), hex_encoded_message), + "send packet: $OK#00", + "read packet: $m{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)), + "send packet: ${0}#00".format(hex_encoded_message), + "read packet: $x{0:x},{1:x}#00".format(message_address, len(TEST_MESSAGE)), + "send packet: ${0}#00".format(TEST_MESSAGE), + "read packet: $m{0:x},4#00".format(message_address), + "send packet: ${0}#00".format(hex_encoded_message[0:8]), + "read packet: $x{0:x},4#00".format(message_address), + "send packet: ${0}#00".format(TEST_MESSAGE[0:4]), + "read packet: $c#63", + {"type": "output_match", "regex": r"^message: (.+)\r\n$", "capture": {1: "printed_message"}}, + "send packet: $W00#00", + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure what we read from inferior memory is what we wrote. + printed_message = context.get("printed_message") + self.assertIsNotNone(printed_message) + self.assertEqual(printed_message, TEST_MESSAGE + "X") + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_written_M_content_reads_back_correctly_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + @skipIfWindows # No pty support to test any inferior output + @llgs_test + @expectedFlakeyLinux("llvm.org/pr25652") + def test_written_M_content_reads_back_correctly_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.written_M_content_reads_back_correctly() + + def P_writes_all_gpr_registers(self): + # Start inferior debug session, grab all register info. + procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"]) + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Process register infos. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + # Process endian. + process_info = self.parse_process_info_response(context) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + # Pull out the register infos that we think we can bit flip + # successfully,. + gpr_reg_infos = [ + reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)] + self.assertTrue(len(gpr_reg_infos) > 0) + + # Write flipped bit pattern of existing value to each register. + (successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value( + gpr_reg_infos, endian) + # print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes)) + self.assertTrue(successful_writes > 0) + + # Note: as of this moment, a hefty number of the GPR writes are failing with E32 (everything except rax-rdx, rdi, rsi, rbp). + # Come back to this. I have the test rigged to verify that at least some + # of the bit-flip writes work. + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_P_writes_all_gpr_registers_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + @llgs_test + def test_P_writes_all_gpr_registers_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.P_writes_all_gpr_registers() + + def P_and_p_thread_suffix_work(self): + # Startup the inferior with three threads. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["thread:new", "thread:new"]) + self.add_thread_suffix_request_packets() + self.add_register_info_collection_packets() + self.add_process_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + process_info = self.parse_process_info_response(context) + self.assertIsNotNone(process_info) + endian = process_info.get("endian") + self.assertIsNotNone(endian) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + reg_index = self.select_modifiable_register(reg_infos) + self.assertIsNotNone(reg_index) + reg_byte_size = int(reg_infos[reg_index]["bitsize"]) // 8 + self.assertTrue(reg_byte_size > 0) + + # Run the process a bit so threads can start up, and collect register + # info. + context = self.run_process_then_stop(run_seconds=1) + self.assertIsNotNone(context) + + # Wait for 3 threads to be present. + threads = self.wait_for_thread_count(3, timeout_seconds=self._WAIT_TIMEOUT) + self.assertEqual(len(threads), 3) + + expected_reg_values = [] + register_increment = 1 + next_value = None + + # Set the same register in each of 3 threads to a different value. + # Verify each one has the unique value. + for thread in threads: + # If we don't have a next value yet, start it with the initial read + # value + 1 + if not next_value: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Set the next value to use for writing as the increment plus + # current value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + next_value = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + + # Set new value using P and thread suffix. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $P{0:x}={1};thread:{2:x}#00".format( + reg_index, + lldbgdbserverutils.pack_register_hex( + endian, + next_value, + byte_size=reg_byte_size), + thread), + "send packet: $OK#00", + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Save the value we set. + expected_reg_values.append(next_value) + + # Increment value for next thread to use (we want them all + # different so we can verify they wrote to each thread correctly + # next.) + next_value += register_increment + + # Revisit each thread and verify they have the expected value set for + # the register we wrote. + thread_index = 0 + for thread in threads: + # Read pre-existing register value. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $p{0:x};thread:{1:x}#00".format(reg_index, thread), + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Get the register value. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + read_value = lldbgdbserverutils.unpack_register_hex_unsigned( + endian, p_response) + + # Make sure we read back what we wrote. + self.assertEqual(read_value, expected_reg_values[thread_index]) + thread_index += 1 + + # Note: as of this moment, a hefty number of the GPR writes are failing + # with E32 (everything except rax-rdx, rdi, rsi, rbp). + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_P_and_p_thread_suffix_work_debugserver(self): + self.init_debugserver_test() + self.build() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() + + @skipIfWindows + @llgs_test + def test_P_and_p_thread_suffix_work_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.P_and_p_thread_suffix_work() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py new file mode 100644 index 00000000000..664b6001d8d --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubReverseConnect.py @@ -0,0 +1,97 @@ +from __future__ import print_function + +import gdbremote_testcase +import lldbgdbserverutils +import re +import select +import socket +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestStubReverseConnect(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + _DEFAULT_TIMEOUT = 20 * (10 if ('ASAN_OPTIONS' in os.environ) else 1) + + def setUp(self): + # Set up the test. + gdbremote_testcase.GdbRemoteTestCaseBase.setUp(self) + + # Create a listener on a local port. + self.listener_socket = self.create_listener_socket() + self.assertIsNotNone(self.listener_socket) + self.listener_port = self.listener_socket.getsockname()[1] + + def create_listener_socket(self, timeout_seconds=_DEFAULT_TIMEOUT): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.assertIsNotNone(sock) + + sock.settimeout(timeout_seconds) + sock.bind(("127.0.0.1", 0)) + sock.listen(1) + + def tear_down_listener(): + try: + sock.shutdown(socket.SHUT_RDWR) + except: + # ignore + None + + self.addTearDownHook(tear_down_listener) + return sock + + def reverse_connect_works(self): + # Indicate stub startup should do a reverse connect. + appended_stub_args = ["--reverse-connect"] + if self.debug_monitor_extra_args: + self.debug_monitor_extra_args += appended_stub_args + else: + self.debug_monitor_extra_args = appended_stub_args + + self.stub_hostname = "127.0.0.1" + self.port = self.listener_port + + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-.*-android", triple): + self.forward_adb_port( + self.port, + self.port, + "reverse", + self.stub_device) + + # Start the stub. + server = self.launch_debug_monitor(logfile=sys.stdout) + self.assertIsNotNone(server) + self.assertTrue( + lldbgdbserverutils.process_is_running( + server.pid, True)) + + # Listen for the stub's connection to us. + (stub_socket, address) = self.listener_socket.accept() + self.assertIsNotNone(stub_socket) + self.assertIsNotNone(address) + print("connected to stub {} on {}".format( + address, stub_socket.getsockname())) + + # Verify we can do the handshake. If that works, we'll call it good. + self.do_handshake(stub_socket, timeout_seconds=self._DEFAULT_TIMEOUT) + + # Clean up. + stub_socket.shutdown(socket.SHUT_RDWR) + + @debugserver_test + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def test_reverse_connect_works_debugserver(self): + self.init_debugserver_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() + + @llgs_test + @skipIfRemote # reverse connect is not a supported use case for now + def test_reverse_connect_works_llgs(self): + self.init_llgs_test(use_named_pipe=False) + self.set_inferior_startup_launch() + self.reverse_connect_works() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py new file mode 100644 index 00000000000..4641b175bca --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/commandline/TestStubSetSID.py @@ -0,0 +1,86 @@ + + +import gdbremote_testcase +import lldbgdbserverutils +import os +import select +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestStubSetSIDTestCase(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def get_stub_sid(self, extra_stub_args=None): + # Launch debugserver + if extra_stub_args: + self.debug_monitor_extra_args += extra_stub_args + + server = self.launch_debug_monitor() + self.assertIsNotNone(server) + self.assertTrue( + lldbgdbserverutils.process_is_running( + server.pid, True)) + + # Get the process id for the stub. + return os.getsid(server.pid) + + def sid_is_same_without_setsid(self): + stub_sid = self.get_stub_sid() + self.assertEqual(stub_sid, os.getsid(0)) + + def sid_is_different_with_setsid(self): + stub_sid = self.get_stub_sid(["--setsid"]) + self.assertNotEqual(stub_sid, os.getsid(0)) + + def sid_is_different_with_S(self): + stub_sid = self.get_stub_sid(["-S"]) + self.assertNotEqual(stub_sid, os.getsid(0)) + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_same_without_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @skipIfWindows + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + @expectedFailureAll(oslist=['freebsd']) + def test_sid_is_same_without_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_same_without_setsid() + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_setsid_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @skipIfWindows + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_setsid_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_setsid() + + @debugserver_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_S_debugserver(self): + self.init_debugserver_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() + + @skipIfWindows + @llgs_test + @skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target + def test_sid_is_different_with_S_llgs(self): + self.init_llgs_test() + self.set_inferior_startup_launch() + self.sid_is_different_with_S() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py new file mode 100644 index 00000000000..ac611bcca16 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/gdbremote_testcase.py @@ -0,0 +1,1684 @@ +""" +Base class for gdb-remote test cases. +""" + +from __future__ import division, print_function + + +import errno +import os +import os.path +import random +import re +import select +import socket +import subprocess +import sys +import tempfile +import time +from lldbsuite.test import configuration +from lldbsuite.test.lldbtest import * +from lldbsuite.support import seven +from lldbgdbserverutils import * +import logging + + +class _ConnectionRefused(IOError): + pass + + +class GdbRemoteTestCaseBase(TestBase): + + NO_DEBUG_INFO_TESTCASE = True + + _TIMEOUT_SECONDS = 120 * (10 if ('ASAN_OPTIONS' in os.environ) else 1) + _DEFAULT_TIMEOUT = 10 * (10 if ('ASAN_OPTIONS' in os.environ) else 1) + _READ_TIMEOUT = 5 * (10 if ('ASAN_OPTIONS' in os.environ) else 1) + _WAIT_TIMEOUT = 5 * (10 if ('ASAN_OPTIONS' in os.environ) else 1) + + _GDBREMOTE_KILL_PACKET = "$k#6b" + + # Start the inferior separately, attach to the inferior on the stub + # command line. + _STARTUP_ATTACH = "attach" + # Start the inferior separately, start the stub without attaching, allow + # the test to attach to the inferior however it wants (e.g. $vAttach;pid). + _STARTUP_ATTACH_MANUALLY = "attach_manually" + # Start the stub, and launch the inferior with an $A packet via the + # initial packet stream. + _STARTUP_LAUNCH = "launch" + + # GDB Signal numbers that are not target-specific used for common + # exceptions + TARGET_EXC_BAD_ACCESS = 0x91 + TARGET_EXC_BAD_INSTRUCTION = 0x92 + TARGET_EXC_ARITHMETIC = 0x93 + TARGET_EXC_EMULATION = 0x94 + TARGET_EXC_SOFTWARE = 0x95 + TARGET_EXC_BREAKPOINT = 0x96 + + _verbose_log_handler = None + _log_formatter = logging.Formatter( + fmt='%(asctime)-15s %(levelname)-8s %(message)s') + + def setUpBaseLogging(self): + self.logger = logging.getLogger(__name__) + + if len(self.logger.handlers) > 0: + return # We have set up this handler already + + self.logger.propagate = False + self.logger.setLevel(logging.DEBUG) + + # log all warnings to stderr + handler = logging.StreamHandler() + handler.setLevel(logging.WARNING) + handler.setFormatter(self._log_formatter) + self.logger.addHandler(handler) + + def isVerboseLoggingRequested(self): + # We will report our detailed logs if the user requested that the "gdb-remote" channel is + # logged. + return any(("gdb-remote" in channel) + for channel in lldbtest_config.channels) + + def setUp(self): + TestBase.setUp(self) + + self.setUpBaseLogging() + self.debug_monitor_extra_args = [] + self._pump_queues = socket_packet_pump.PumpQueues() + + if self.isVerboseLoggingRequested(): + # If requested, full logs go to a log file + self._verbose_log_handler = logging.FileHandler( + self.log_basename + "-host.log") + self._verbose_log_handler.setFormatter(self._log_formatter) + self._verbose_log_handler.setLevel(logging.DEBUG) + self.logger.addHandler(self._verbose_log_handler) + + self.test_sequence = GdbRemoteTestSequence(self.logger) + self.set_inferior_startup_launch() + self.port = self.get_next_port() + self.named_pipe_path = None + self.named_pipe = None + self.named_pipe_fd = None + self.stub_sends_two_stop_notifications_on_kill = False + if configuration.lldb_platform_url: + if configuration.lldb_platform_url.startswith('unix-'): + url_pattern = '(.+)://\[?(.+?)\]?/.*' + else: + url_pattern = '(.+)://(.+):\d+' + scheme, host = re.match( + url_pattern, configuration.lldb_platform_url).groups() + if configuration.lldb_platform_name == 'remote-android' and host != 'localhost': + self.stub_device = host + self.stub_hostname = 'localhost' + else: + self.stub_device = None + self.stub_hostname = host + else: + self.stub_hostname = "localhost" + + def tearDown(self): + self._pump_queues.verify_queues_empty() + + self.logger.removeHandler(self._verbose_log_handler) + self._verbose_log_handler = None + TestBase.tearDown(self) + + def getLocalServerLogFile(self): + return self.log_basename + "-server.log" + + def setUpServerLogging(self, is_llgs): + if len(lldbtest_config.channels) == 0: + return # No logging requested + + if lldb.remote_platform: + log_file = lldbutil.join_remote_paths( + lldb.remote_platform.GetWorkingDirectory(), "server.log") + else: + log_file = self.getLocalServerLogFile() + + if is_llgs: + self.debug_monitor_extra_args.append("--log-file=" + log_file) + self.debug_monitor_extra_args.append( + "--log-channels={}".format(":".join(lldbtest_config.channels))) + else: + self.debug_monitor_extra_args = [ + "--log-file=" + log_file, "--log-flags=0x800000"] + + def get_next_port(self): + return 12000 + random.randint(0, 3999) + + def reset_test_sequence(self): + self.test_sequence = GdbRemoteTestSequence(self.logger) + + def create_named_pipe(self): + # Create a temp dir and name for a pipe. + temp_dir = tempfile.mkdtemp() + named_pipe_path = os.path.join(temp_dir, "stub_port_number") + + # Create the named pipe. + os.mkfifo(named_pipe_path) + + # Open the read side of the pipe in non-blocking mode. This will + # return right away, ready or not. + named_pipe_fd = os.open(named_pipe_path, os.O_RDONLY | os.O_NONBLOCK) + + # Create the file for the named pipe. Note this will follow semantics of + # a non-blocking read side of a named pipe, which has different semantics + # than a named pipe opened for read in non-blocking mode. + named_pipe = os.fdopen(named_pipe_fd, "r") + self.assertIsNotNone(named_pipe) + + def shutdown_named_pipe(): + # Close the pipe. + try: + named_pipe.close() + except: + print("failed to close named pipe") + None + + # Delete the pipe. + try: + os.remove(named_pipe_path) + except: + print("failed to delete named pipe: {}".format(named_pipe_path)) + None + + # Delete the temp directory. + try: + os.rmdir(temp_dir) + except: + print( + "failed to delete temp dir: {}, directory contents: '{}'".format( + temp_dir, os.listdir(temp_dir))) + None + + # Add the shutdown hook to clean up the named pipe. + self.addTearDownHook(shutdown_named_pipe) + + # Clear the port so the stub selects a port number. + self.port = 0 + + return (named_pipe_path, named_pipe, named_pipe_fd) + + def get_stub_port_from_named_socket(self, read_timeout_seconds): + # Wait for something to read with a max timeout. + (ready_readers, _, _) = select.select( + [self.named_pipe_fd], [], [], read_timeout_seconds) + self.assertIsNotNone( + ready_readers, + "write side of pipe has not written anything - stub isn't writing to pipe.") + self.assertNotEqual( + len(ready_readers), + 0, + "write side of pipe has not written anything - stub isn't writing to pipe.") + + # Read the port from the named pipe. + stub_port_raw = self.named_pipe.read() + self.assertIsNotNone(stub_port_raw) + self.assertNotEqual( + len(stub_port_raw), + 0, + "no content to read on pipe") + + # Trim null byte, convert to int. + stub_port_raw = stub_port_raw[:-1] + stub_port = int(stub_port_raw) + self.assertTrue(stub_port > 0) + + return stub_port + + def init_llgs_test(self, use_named_pipe=True): + if lldb.remote_platform: + # Remote platforms don't support named pipe based port negotiation + use_named_pipe = False + + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-windows", triple): + self.skipTest("Remotely testing is not supported on Windows yet.") + + # Grab the ppid from /proc/[shell pid]/stat + err, retcode, shell_stat = self.run_platform_command( + "cat /proc/$$/stat") + self.assertTrue( + err.Success() and retcode == 0, + "Failed to read file /proc/$$/stat: %s, retcode: %d" % + (err.GetCString(), + retcode)) + + # [pid] ([executable]) [state] [*ppid*] + pid = re.match(r"^\d+ \(.+\) . (\d+)", shell_stat).group(1) + err, retcode, ls_output = self.run_platform_command( + "ls -l /proc/%s/exe" % pid) + self.assertTrue( + err.Success() and retcode == 0, + "Failed to read file /proc/%s/exe: %s, retcode: %d" % + (pid, + err.GetCString(), + retcode)) + exe = ls_output.split()[-1] + + # If the binary has been deleted, the link name has " (deleted)" appended. + # Remove if it's there. + self.debug_monitor_exe = re.sub(r' \(deleted\)$', '', exe) + else: + # Need to figure out how to create a named pipe on Windows. + if platform.system() == 'Windows': + use_named_pipe = False + + self.debug_monitor_exe = get_lldb_server_exe() + if not self.debug_monitor_exe: + self.skipTest("lldb-server exe not found") + + self.debug_monitor_extra_args = ["gdbserver"] + self.setUpServerLogging(is_llgs=True) + + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, + self.named_pipe_fd) = self.create_named_pipe() + + def init_debugserver_test(self, use_named_pipe=True): + self.debug_monitor_exe = get_debugserver_exe() + if not self.debug_monitor_exe: + self.skipTest("debugserver exe not found") + self.setUpServerLogging(is_llgs=False) + if use_named_pipe: + (self.named_pipe_path, self.named_pipe, + self.named_pipe_fd) = self.create_named_pipe() + # The debugserver stub has a race on handling the 'k' command, so it sends an X09 right away, then sends the real X notification + # when the process truly dies. + self.stub_sends_two_stop_notifications_on_kill = True + + def forward_adb_port(self, source, target, direction, device): + adb = ['adb'] + (['-s', device] if device else []) + [direction] + + def remove_port_forward(): + subprocess.call(adb + ["--remove", "tcp:%d" % source]) + + subprocess.call(adb + ["tcp:%d" % source, "tcp:%d" % target]) + self.addTearDownHook(remove_port_forward) + + def _verify_socket(self, sock): + # Normally, when the remote stub is not ready, we will get ECONNREFUSED during the + # connect() attempt. However, due to the way how ADB forwarding works, on android targets + # the connect() will always be successful, but the connection will be immediately dropped + # if ADB could not connect on the remote side. This function tries to detect this + # situation, and report it as "connection refused" so that the upper layers attempt the + # connection again. + triple = self.dbg.GetSelectedPlatform().GetTriple() + if not re.match(".*-.*-.*-android", triple): + return # Not android. + can_read, _, _ = select.select([sock], [], [], 0.1) + if sock not in can_read: + return # Data is not available, but the connection is alive. + if len(sock.recv(1, socket.MSG_PEEK)) == 0: + raise _ConnectionRefused() # Got EOF, connection dropped. + + def create_socket(self): + sock = socket.socket() + logger = self.logger + + triple = self.dbg.GetSelectedPlatform().GetTriple() + if re.match(".*-.*-.*-android", triple): + self.forward_adb_port( + self.port, + self.port, + "forward", + self.stub_device) + + logger.info( + "Connecting to debug monitor on %s:%d", + self.stub_hostname, + self.port) + connect_info = (self.stub_hostname, self.port) + try: + sock.connect(connect_info) + except socket.error as serr: + if serr.errno == errno.ECONNREFUSED: + raise _ConnectionRefused() + raise serr + + def shutdown_socket(): + if sock: + try: + # send the kill packet so lldb-server shuts down gracefully + sock.sendall(GdbRemoteTestCaseBase._GDBREMOTE_KILL_PACKET) + except: + logger.warning( + "failed to send kill packet to debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + + try: + sock.close() + except: + logger.warning( + "failed to close socket to debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + + self.addTearDownHook(shutdown_socket) + + self._verify_socket(sock) + + return sock + + def set_inferior_startup_launch(self): + self._inferior_startup = self._STARTUP_LAUNCH + + def set_inferior_startup_attach(self): + self._inferior_startup = self._STARTUP_ATTACH + + def set_inferior_startup_attach_manually(self): + self._inferior_startup = self._STARTUP_ATTACH_MANUALLY + + def get_debug_monitor_command_line_args(self, attach_pid=None): + if lldb.remote_platform: + commandline_args = self.debug_monitor_extra_args + \ + ["*:{}".format(self.port)] + else: + commandline_args = self.debug_monitor_extra_args + \ + ["127.0.0.1:{}".format(self.port)] + + if attach_pid: + commandline_args += ["--attach=%d" % attach_pid] + if self.named_pipe_path: + commandline_args += ["--named-pipe", self.named_pipe_path] + return commandline_args + + def get_target_byte_order(self): + inferior_exe_path = self.getBuildArtifact("a.out") + target = self.dbg.CreateTarget(inferior_exe_path) + return target.GetByteOrder() + + def launch_debug_monitor(self, attach_pid=None, logfile=None): + # Create the command line. + commandline_args = self.get_debug_monitor_command_line_args( + attach_pid=attach_pid) + + # Start the server. + server = self.spawnSubprocess( + self.debug_monitor_exe, + commandline_args, + install_remote=False) + self.addTearDownHook(self.cleanupSubprocesses) + self.assertIsNotNone(server) + + # If we're receiving the stub's listening port from the named pipe, do + # that here. + if self.named_pipe: + self.port = self.get_stub_port_from_named_socket(self._READ_TIMEOUT) + + return server + + def connect_to_debug_monitor(self, attach_pid=None): + if self.named_pipe: + # Create the stub. + server = self.launch_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + def shutdown_debug_monitor(): + try: + server.terminate() + except: + logger.warning( + "failed to terminate server for debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + + # Attach to the stub and return a socket opened to it. + self.sock = self.create_socket() + return server + + # We're using a random port algorithm to try not to collide with other ports, + # and retry a max # times. + attempts = 0 + MAX_ATTEMPTS = 20 + + while attempts < MAX_ATTEMPTS: + server = self.launch_debug_monitor(attach_pid=attach_pid) + + # Schedule debug monitor to be shut down during teardown. + logger = self.logger + + def shutdown_debug_monitor(): + try: + server.terminate() + except: + logger.warning( + "failed to terminate server for debug monitor: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_debug_monitor) + + connect_attemps = 0 + MAX_CONNECT_ATTEMPTS = 10 + + while connect_attemps < MAX_CONNECT_ATTEMPTS: + # Create a socket to talk to the server + try: + logger.info("Connect attempt %d", connect_attemps + 1) + self.sock = self.create_socket() + return server + except _ConnectionRefused as serr: + # Ignore, and try again. + pass + time.sleep(0.5) + connect_attemps += 1 + + # We should close the server here to be safe. + server.terminate() + + # Increment attempts. + print( + "connect to debug monitor on port %d failed, attempt #%d of %d" % + (self.port, attempts + 1, MAX_ATTEMPTS)) + attempts += 1 + + # And wait a random length of time before next attempt, to avoid + # collisions. + time.sleep(random.randint(1, 5)) + + # Now grab a new port number. + self.port = self.get_next_port() + + raise Exception( + "failed to create a socket to the launched debug monitor after %d tries" % + attempts) + + def launch_process_for_attach( + self, + inferior_args=None, + sleep_seconds=3, + exe_path=None): + # We're going to start a child process that the debug monitor stub can later attach to. + # This process needs to be started so that it just hangs around for a while. We'll + # have it sleep. + if not exe_path: + exe_path = self.getBuildArtifact("a.out") + + args = [] + if inferior_args: + args.extend(inferior_args) + if sleep_seconds: + args.append("sleep:%d" % sleep_seconds) + + inferior = self.spawnSubprocess(exe_path, args) + + def shutdown_process_for_attach(): + try: + inferior.terminate() + except: + logger.warning( + "failed to terminate inferior process for attach: {}; ignoring".format( + sys.exc_info()[0])) + self.addTearDownHook(shutdown_process_for_attach) + return inferior + + def prep_debug_monitor_and_inferior( + self, + inferior_args=None, + inferior_sleep_seconds=3, + inferior_exe_path=None, + inferior_env=None): + """Prep the debug monitor, the inferior, and the expected packet stream. + + Handle the separate cases of using the debug monitor in attach-to-inferior mode + and in launch-inferior mode. + + For attach-to-inferior mode, the inferior process is first started, then + the debug monitor is started in attach to pid mode (using --attach on the + stub command line), and the no-ack-mode setup is appended to the packet + stream. The packet stream is not yet executed, ready to have more expected + packet entries added to it. + + For launch-inferior mode, the stub is first started, then no ack mode is + setup on the expected packet stream, then the verified launch packets are added + to the expected socket stream. The packet stream is not yet executed, ready + to have more expected packet entries added to it. + + The return value is: + {inferior:<inferior>, server:<server>} + """ + inferior = None + attach_pid = None + + if self._inferior_startup == self._STARTUP_ATTACH or self._inferior_startup == self._STARTUP_ATTACH_MANUALLY: + # Launch the process that we'll use as the inferior. + inferior = self.launch_process_for_attach( + inferior_args=inferior_args, + sleep_seconds=inferior_sleep_seconds, + exe_path=inferior_exe_path) + self.assertIsNotNone(inferior) + self.assertTrue(inferior.pid > 0) + if self._inferior_startup == self._STARTUP_ATTACH: + # In this case, we want the stub to attach via the command + # line, so set the command line attach pid here. + attach_pid = inferior.pid + + if self._inferior_startup == self._STARTUP_LAUNCH: + # Build launch args + if not inferior_exe_path: + inferior_exe_path = self.getBuildArtifact("a.out") + + if lldb.remote_platform: + remote_path = lldbutil.append_to_process_working_directory(self, + os.path.basename(inferior_exe_path)) + remote_file_spec = lldb.SBFileSpec(remote_path, False) + err = lldb.remote_platform.Install(lldb.SBFileSpec( + inferior_exe_path, True), remote_file_spec) + if err.Fail(): + raise Exception( + "remote_platform.Install('%s', '%s') failed: %s" % + (inferior_exe_path, remote_path, err)) + inferior_exe_path = remote_path + + launch_args = [inferior_exe_path] + if inferior_args: + launch_args.extend(inferior_args) + + # Launch the debug monitor stub, attaching to the inferior. + server = self.connect_to_debug_monitor(attach_pid=attach_pid) + self.assertIsNotNone(server) + + # Build the expected protocol stream + self.add_no_ack_remote_stream() + if inferior_env: + for name, value in inferior_env.items(): + self.add_set_environment_packets(name, value) + if self._inferior_startup == self._STARTUP_LAUNCH: + self.add_verified_launch_packets(launch_args) + + return {"inferior": inferior, "server": server} + + def expect_socket_recv( + self, + sock, + expected_content_regex, + timeout_seconds): + response = "" + timeout_time = time.time() + timeout_seconds + + while not expected_content_regex.match( + response) and time.time() < timeout_time: + can_read, _, _ = select.select([sock], [], [], timeout_seconds) + if can_read and sock in can_read: + recv_bytes = sock.recv(4096) + if recv_bytes: + response += seven.bitcast_to_string(recv_bytes) + + self.assertTrue(expected_content_regex.match(response)) + + def expect_socket_send(self, sock, content, timeout_seconds): + request_bytes_remaining = content + timeout_time = time.time() + timeout_seconds + + while len(request_bytes_remaining) > 0 and time.time() < timeout_time: + _, can_write, _ = select.select([], [sock], [], timeout_seconds) + if can_write and sock in can_write: + written_byte_count = sock.send(request_bytes_remaining.encode()) + request_bytes_remaining = request_bytes_remaining[ + written_byte_count:] + self.assertEqual(len(request_bytes_remaining), 0) + + def do_handshake(self, stub_socket, timeout_seconds=None): + if not timeout_seconds: + timeout_seconds = self._WAIT_TIMEOUT + + # Write the ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + # Send the start no ack mode packet. + NO_ACK_MODE_REQUEST = "$QStartNoAckMode#b0" + bytes_sent = stub_socket.send(NO_ACK_MODE_REQUEST.encode()) + self.assertEqual(bytes_sent, len(NO_ACK_MODE_REQUEST)) + + # Receive the ack and "OK" + self.expect_socket_recv(stub_socket, re.compile( + r"^\+\$OK#[0-9a-fA-F]{2}$"), timeout_seconds) + + # Send the final ack. + self.expect_socket_send(stub_socket, "+", timeout_seconds) + + def add_no_ack_remote_stream(self): + self.test_sequence.add_log_lines( + ["read packet: +", + "read packet: $QStartNoAckMode#b0", + "send packet: +", + "send packet: $OK#9a", + "read packet: +"], + True) + + def add_verified_launch_packets(self, launch_args): + self.test_sequence.add_log_lines( + ["read packet: %s" % build_gdbremote_A_packet(launch_args), + "send packet: $OK#00", + "read packet: $qLaunchSuccess#a5", + "send packet: $OK#00"], + True) + + def add_thread_suffix_request_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $QThreadSuffixSupported#e4", + "send packet: $OK#00", + ], True) + + def add_process_info_collection_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qProcessInfo#dc", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "process_info_raw"}}], + True) + + def add_set_environment_packets(self, name, value): + self.test_sequence.add_log_lines( + ["read packet: $QEnvironment:" + name + "=" + value + "#00", + "send packet: $OK#00", + ], True) + + _KNOWN_PROCESS_INFO_KEYS = [ + "pid", + "parent-pid", + "real-uid", + "real-gid", + "effective-uid", + "effective-gid", + "cputype", + "cpusubtype", + "ostype", + "triple", + "vendor", + "endian", + "elf_abi", + "ptrsize" + ] + + def parse_process_info_response(self, context): + # Ensure we have a process info response. + self.assertIsNotNone(context) + process_info_raw = context.get("process_info_raw") + self.assertIsNotNone(process_info_raw) + + # Pull out key:value; pairs. + process_info_dict = { + match.group(1): match.group(2) for match in re.finditer( + r"([^:]+):([^;]+);", process_info_raw)} + + # Validate keys are known. + for (key, val) in list(process_info_dict.items()): + self.assertTrue(key in self._KNOWN_PROCESS_INFO_KEYS) + self.assertIsNotNone(val) + + return process_info_dict + + def add_register_info_collection_packets(self): + self.test_sequence.add_log_lines( + [{"type": "multi_response", "query": "qRegisterInfo", "append_iteration_suffix": True, + "end_regex": re.compile(r"^\$(E\d+)?#[0-9a-fA-F]{2}$"), + "save_key": "reg_info_responses"}], + True) + + def parse_register_info_packets(self, context): + """Return an array of register info dictionaries, one per register info.""" + reg_info_responses = context.get("reg_info_responses") + self.assertIsNotNone(reg_info_responses) + + # Parse register infos. + return [parse_reg_info_response(reg_info_response) + for reg_info_response in reg_info_responses] + + def expect_gdbremote_sequence(self, timeout_seconds=None): + if not timeout_seconds: + timeout_seconds = self._TIMEOUT_SECONDS + return expect_lldb_gdbserver_replay( + self, + self.sock, + self.test_sequence, + self._pump_queues, + timeout_seconds, + self.logger) + + _KNOWN_REGINFO_KEYS = [ + "name", + "alt-name", + "bitsize", + "offset", + "encoding", + "format", + "set", + "gcc", + "ehframe", + "dwarf", + "generic", + "container-regs", + "invalidate-regs", + "dynamic_size_dwarf_expr_bytes", + "dynamic_size_dwarf_len" + ] + + def assert_valid_reg_info(self, reg_info): + # Assert we know about all the reginfo keys parsed. + for key in reg_info: + self.assertTrue(key in self._KNOWN_REGINFO_KEYS) + + # Check the bare-minimum expected set of register info keys. + self.assertTrue("name" in reg_info) + self.assertTrue("bitsize" in reg_info) + self.assertTrue("offset" in reg_info) + self.assertTrue("encoding" in reg_info) + self.assertTrue("format" in reg_info) + + def find_pc_reg_info(self, reg_infos): + lldb_reg_index = 0 + for reg_info in reg_infos: + if ("generic" in reg_info) and (reg_info["generic"] == "pc"): + return (lldb_reg_index, reg_info) + lldb_reg_index += 1 + + return (None, None) + + def add_lldb_register_index(self, reg_infos): + """Add a "lldb_register_index" key containing the 0-baed index of each reg_infos entry. + + We'll use this when we want to call packets like P/p with a register index but do so + on only a subset of the full register info set. + """ + self.assertIsNotNone(reg_infos) + + reg_index = 0 + for reg_info in reg_infos: + reg_info["lldb_register_index"] = reg_index + reg_index += 1 + + def add_query_memory_region_packets(self, address): + self.test_sequence.add_log_lines( + ["read packet: $qMemoryRegionInfo:{0:x}#00".format(address), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "memory_region_response"}}], + True) + + def parse_key_val_dict(self, key_val_text, allow_dupes=True): + self.assertIsNotNone(key_val_text) + kv_dict = {} + for match in re.finditer(r";?([^:]+):([^;]+)", key_val_text): + key = match.group(1) + val = match.group(2) + if key in kv_dict: + if allow_dupes: + if isinstance(kv_dict[key], list): + kv_dict[key].append(val) + else: + # Promote to list + kv_dict[key] = [kv_dict[key], val] + else: + self.fail( + "key '{}' already present when attempting to add value '{}' (text='{}', dict={})".format( + key, val, key_val_text, kv_dict)) + else: + kv_dict[key] = val + return kv_dict + + def parse_memory_region_packet(self, context): + # Ensure we have a context. + self.assertIsNotNone(context.get("memory_region_response")) + + # Pull out key:value; pairs. + mem_region_dict = self.parse_key_val_dict( + context.get("memory_region_response")) + + # Validate keys are known. + for (key, val) in list(mem_region_dict.items()): + self.assertTrue( + key in [ + "start", + "size", + "permissions", + "name", + "error"]) + self.assertIsNotNone(val) + + mem_region_dict["name"] = seven.unhexlify(mem_region_dict.get("name", "")) + # Return the dictionary of key-value pairs for the memory region. + return mem_region_dict + + def assert_address_within_memory_region( + self, test_address, mem_region_dict): + self.assertIsNotNone(mem_region_dict) + self.assertTrue("start" in mem_region_dict) + self.assertTrue("size" in mem_region_dict) + + range_start = int(mem_region_dict["start"], 16) + range_size = int(mem_region_dict["size"], 16) + range_end = range_start + range_size + + if test_address < range_start: + self.fail( + "address 0x{0:x} comes before range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format( + test_address, + range_start, + range_end, + range_size)) + elif test_address >= range_end: + self.fail( + "address 0x{0:x} comes after range 0x{1:x} - 0x{2:x} (size 0x{3:x})".format( + test_address, + range_start, + range_end, + range_size)) + + def add_threadinfo_collection_packets(self): + self.test_sequence.add_log_lines( + [{"type": "multi_response", "first_query": "qfThreadInfo", "next_query": "qsThreadInfo", + "append_iteration_suffix": False, "end_regex": re.compile(r"^\$(l)?#[0-9a-fA-F]{2}$"), + "save_key": "threadinfo_responses"}], + True) + + def parse_threadinfo_packets(self, context): + """Return an array of thread ids (decimal ints), one per thread.""" + threadinfo_responses = context.get("threadinfo_responses") + self.assertIsNotNone(threadinfo_responses) + + thread_ids = [] + for threadinfo_response in threadinfo_responses: + new_thread_infos = parse_threadinfo_response(threadinfo_response) + thread_ids.extend(new_thread_infos) + return thread_ids + + def wait_for_thread_count(self, thread_count, timeout_seconds=None): + if not timeout_seconds: + timeout_seconds = self._WAIT_TIMEOUT + start_time = time.time() + timeout_time = start_time + timeout_seconds + + actual_thread_count = 0 + while actual_thread_count < thread_count: + self.reset_test_sequence() + self.add_threadinfo_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + + actual_thread_count = len(threads) + + if time.time() > timeout_time: + raise Exception( + 'timed out after {} seconds while waiting for theads: waiting for at least {} threads, found {}'.format( + timeout_seconds, thread_count, actual_thread_count)) + + return threads + + def add_set_breakpoint_packets( + self, + address, + z_packet_type=0, + do_continue=True, + breakpoint_kind=1): + self.test_sequence.add_log_lines( + [ # Set the breakpoint. + "read packet: $Z{2},{0:x},{1}#00".format( + address, breakpoint_kind, z_packet_type), + # Verify the stub could set it. + "send packet: $OK#00", + ], True) + + if (do_continue): + self.test_sequence.add_log_lines( + [ # Continue the inferior. + "read packet: $c#63", + # Expect a breakpoint stop report. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "stop_signo", + 2: "stop_thread_id"}}, + ], True) + + def add_remove_breakpoint_packets( + self, + address, + z_packet_type=0, + breakpoint_kind=1): + self.test_sequence.add_log_lines( + [ # Remove the breakpoint. + "read packet: $z{2},{0:x},{1}#00".format( + address, breakpoint_kind, z_packet_type), + # Verify the stub could unset it. + "send packet: $OK#00", + ], True) + + def add_qSupported_packets(self): + self.test_sequence.add_log_lines( + ["read packet: $qSupported#00", + {"direction": "send", "regex": r"^\$(.*)#[0-9a-fA-F]{2}", "capture": {1: "qSupported_response"}}, + ], True) + + _KNOWN_QSUPPORTED_STUB_FEATURES = [ + "augmented-libraries-svr4-read", + "PacketSize", + "QStartNoAckMode", + "QThreadSuffixSupported", + "QListThreadsInStopReply", + "qXfer:auxv:read", + "qXfer:libraries:read", + "qXfer:libraries-svr4:read", + "qXfer:features:read", + "qEcho", + "QPassSignals" + ] + + def parse_qSupported_response(self, context): + self.assertIsNotNone(context) + + raw_response = context.get("qSupported_response") + self.assertIsNotNone(raw_response) + + # For values with key=val, the dict key and vals are set as expected. For feature+, feature- and feature?, the + # +,-,? is stripped from the key and set as the value. + supported_dict = {} + for match in re.finditer(r";?([^=;]+)(=([^;]+))?", raw_response): + key = match.group(1) + val = match.group(3) + + # key=val: store as is + if val and len(val) > 0: + supported_dict[key] = val + else: + if len(key) < 2: + raise Exception( + "singular stub feature is too short: must be stub_feature{+,-,?}") + supported_type = key[-1] + key = key[:-1] + if not supported_type in ["+", "-", "?"]: + raise Exception( + "malformed stub feature: final character {} not in expected set (+,-,?)".format(supported_type)) + supported_dict[key] = supported_type + # Ensure we know the supported element + if key not in self._KNOWN_QSUPPORTED_STUB_FEATURES: + raise Exception( + "unknown qSupported stub feature reported: %s" % + key) + + return supported_dict + + def run_process_then_stop(self, run_seconds=1): + # Tell the stub to continue. + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8"], + True) + context = self.expect_gdbremote_sequence() + + # Wait for run_seconds. + time.sleep(run_seconds) + + # Send an interrupt, capture a T response. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: {}".format(chr(3)), + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$", "capture": {1: "stop_result"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_result")) + + return context + + def continue_process_and_wait_for_stop(self): + self.test_sequence.add_log_lines( + [ + "read packet: $vCont;c#a8", + { + "direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_signo", 2: "stop_key_val_text"}, + }, + ], + True, + ) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + return self.parse_interrupt_packets(context) + + def select_modifiable_register(self, reg_infos): + """Find a register that can be read/written freely.""" + PREFERRED_REGISTER_NAMES = set(["rax", ]) + + # First check for the first register from the preferred register name + # set. + alternative_register_index = None + + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("name" in reg_info) and ( + reg_info["name"] in PREFERRED_REGISTER_NAMES): + # We found a preferred register. Use it. + return reg_info["lldb_register_index"] + if ("generic" in reg_info) and (reg_info["generic"] == "fp" or + reg_info["generic"] == "arg1"): + # A frame pointer or first arg register will do as a + # register to modify temporarily. + alternative_register_index = reg_info["lldb_register_index"] + + # We didn't find a preferred register. Return whatever alternative register + # we found, if any. + return alternative_register_index + + def extract_registers_from_stop_notification(self, stop_key_vals_text): + self.assertIsNotNone(stop_key_vals_text) + kv_dict = self.parse_key_val_dict(stop_key_vals_text) + + registers = {} + for (key, val) in list(kv_dict.items()): + if re.match(r"^[0-9a-fA-F]+$", key): + registers[int(key, 16)] = val + return registers + + def gather_register_infos(self): + self.reset_test_sequence() + self.add_register_info_collection_packets() + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + + return reg_infos + + def find_generic_register_with_name(self, reg_infos, generic_name): + self.assertIsNotNone(reg_infos) + for reg_info in reg_infos: + if ("generic" in reg_info) and ( + reg_info["generic"] == generic_name): + return reg_info + return None + + def decode_gdbremote_binary(self, encoded_bytes): + decoded_bytes = "" + i = 0 + while i < len(encoded_bytes): + if encoded_bytes[i] == "}": + # Handle escaped char. + self.assertTrue(i + 1 < len(encoded_bytes)) + decoded_bytes += chr(ord(encoded_bytes[i + 1]) ^ 0x20) + i += 2 + elif encoded_bytes[i] == "*": + # Handle run length encoding. + self.assertTrue(len(decoded_bytes) > 0) + self.assertTrue(i + 1 < len(encoded_bytes)) + repeat_count = ord(encoded_bytes[i + 1]) - 29 + decoded_bytes += decoded_bytes[-1] * repeat_count + i += 2 + else: + decoded_bytes += encoded_bytes[i] + i += 1 + return decoded_bytes + + def build_auxv_dict(self, endian, word_size, auxv_data): + self.assertIsNotNone(endian) + self.assertIsNotNone(word_size) + self.assertIsNotNone(auxv_data) + + auxv_dict = {} + + # PowerPC64le's auxvec has a special key that must be ignored. + # This special key may be used multiple times, resulting in + # multiple key/value pairs with the same key, which would otherwise + # break this test check for repeated keys. + # + # AT_IGNOREPPC = 22 + ignored_keys_for_arch = { 'powerpc64le' : [22] } + arch = self.getArchitecture() + ignore_keys = None + if arch in ignored_keys_for_arch: + ignore_keys = ignored_keys_for_arch[arch] + + while len(auxv_data) > 0: + # Chop off key. + raw_key = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Chop of value. + raw_value = auxv_data[:word_size] + auxv_data = auxv_data[word_size:] + + # Convert raw text from target endian. + key = unpack_endian_binary_string(endian, raw_key) + value = unpack_endian_binary_string(endian, raw_value) + + if ignore_keys and key in ignore_keys: + continue + + # Handle ending entry. + if key == 0: + self.assertEqual(value, 0) + return auxv_dict + + # The key should not already be present. + self.assertFalse(key in auxv_dict) + auxv_dict[key] = value + + self.fail( + "should not reach here - implies required double zero entry not found") + return auxv_dict + + def read_binary_data_in_chunks(self, command_prefix, chunk_length): + """Collect command_prefix{offset:x},{chunk_length:x} until a single 'l' or 'l' with data is returned.""" + offset = 0 + done = False + decoded_data = "" + + while not done: + # Grab the next iteration of data. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: ${}{:x},{:x}:#00".format( + command_prefix, + offset, + chunk_length), + { + "direction": "send", + "regex": re.compile( + r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", + re.MULTILINE | re.DOTALL), + "capture": { + 1: "response_type", + 2: "content_raw"}}], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + response_type = context.get("response_type") + self.assertIsNotNone(response_type) + self.assertTrue(response_type in ["l", "m"]) + + # Move offset along. + offset += chunk_length + + # Figure out if we're done. We're done if the response type is l. + done = response_type == "l" + + # Decode binary data. + content_raw = context.get("content_raw") + if content_raw and len(content_raw) > 0: + self.assertIsNotNone(content_raw) + decoded_data += self.decode_gdbremote_binary(content_raw) + return decoded_data + + def add_interrupt_packets(self): + self.test_sequence.add_log_lines([ + # Send the intterupt. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})(.*)#[0-9a-fA-F]{2}$", + "capture": {1: "stop_signo", + 2: "stop_key_val_text"}}, + ], True) + + def parse_interrupt_packets(self, context): + self.assertIsNotNone(context.get("stop_signo")) + self.assertIsNotNone(context.get("stop_key_val_text")) + return (int(context["stop_signo"], 16), self.parse_key_val_dict( + context["stop_key_val_text"])) + + def add_QSaveRegisterState_packets(self, thread_id): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QSaveRegisterState;thread:{:x}#00".format( + thread_id) + else: + request = "read packet: $QSaveRegisterState#00" + + self.test_sequence.add_log_lines([request, + {"direction": "send", + "regex": r"^\$(E?.*)#[0-9a-fA-F]{2}$", + "capture": {1: "save_response"}}, + ], + True) + + def parse_QSaveRegisterState_response(self, context): + self.assertIsNotNone(context) + + save_response = context.get("save_response") + self.assertIsNotNone(save_response) + + if len(save_response) < 1 or save_response[0] == "E": + # error received + return (False, None) + else: + return (True, int(save_response)) + + def add_QRestoreRegisterState_packets(self, save_id, thread_id=None): + if thread_id: + # Use the thread suffix form. + request = "read packet: $QRestoreRegisterState:{};thread:{:x}#00".format( + save_id, thread_id) + else: + request = "read packet: $QRestoreRegisterState:{}#00".format( + save_id) + + self.test_sequence.add_log_lines([ + request, + "send packet: $OK#00" + ], True) + + def flip_all_bits_in_each_register_value( + self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + + successful_writes = 0 + failed_writes = 0 + + for reg_info in reg_infos: + # Use the lldb register index added to the reg info. We're not necessarily + # working off a full set of register infos, so an inferred register + # index could be wrong. + reg_index = reg_info["lldb_register_index"] + self.assertIsNotNone(reg_index) + + reg_byte_size = int(reg_info["bitsize"]) // 8 + self.assertTrue(reg_byte_size > 0) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format( + reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read the existing value. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify the response length. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + initial_reg_value = unpack_register_hex_unsigned( + endian, p_response) + + # Flip the value by xoring with all 1s + all_one_bits_raw = "ff" * (int(reg_info["bitsize"]) // 8) + flipped_bits_int = initial_reg_value ^ int(all_one_bits_raw, 16) + # print("reg (index={}, name={}): val={}, flipped bits (int={}, hex={:x})".format(reg_index, reg_info["name"], initial_reg_value, flipped_bits_int, flipped_bits_int)) + + # Handle thread suffix for P. + if thread_id: + P_request = "read packet: $P{:x}={};thread:{:x}#00".format( + reg_index, pack_register_hex( + endian, flipped_bits_int, byte_size=reg_byte_size), thread_id) + else: + P_request = "read packet: $P{:x}={}#00".format( + reg_index, pack_register_hex( + endian, flipped_bits_int, byte_size=reg_byte_size)) + + # Write the flipped value to the register. + self.reset_test_sequence() + self.test_sequence.add_log_lines([P_request, + {"direction": "send", + "regex": r"^\$(OK|E[0-9a-fA-F]+)#[0-9a-fA-F]{2}", + "capture": {1: "P_response"}}, + ], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Determine if the write succeeded. There are a handful of registers that can fail, or partially fail + # (e.g. flags, segment selectors, etc.) due to register value restrictions. Don't worry about them + # all flipping perfectly. + P_response = context.get("P_response") + self.assertIsNotNone(P_response) + if P_response == "OK": + successful_writes += 1 + else: + failed_writes += 1 + # print("reg (index={}, name={}) write FAILED (error: {})".format(reg_index, reg_info["name"], P_response)) + + # Read back the register value, ensure it matches the flipped + # value. + if P_response == "OK": + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + verify_p_response_raw = context.get("p_response") + self.assertIsNotNone(verify_p_response_raw) + verify_bits = unpack_register_hex_unsigned( + endian, verify_p_response_raw) + + if verify_bits != flipped_bits_int: + # Some registers, like mxcsrmask and others, will permute what's written. Adjust succeed/fail counts. + # print("reg (index={}, name={}): read verify FAILED: wrote {:x}, verify read back {:x}".format(reg_index, reg_info["name"], flipped_bits_int, verify_bits)) + successful_writes -= 1 + failed_writes += 1 + + return (successful_writes, failed_writes) + + def is_bit_flippable_register(self, reg_info): + if not reg_info: + return False + if not "set" in reg_info: + return False + if reg_info["set"] != "General Purpose Registers": + return False + if ("container-regs" in reg_info) and ( + len(reg_info["container-regs"]) > 0): + # Don't try to bit flip registers contained in another register. + return False + if re.match("^.s$", reg_info["name"]): + # This is a 2-letter register name that ends in "s", like a segment register. + # Don't try to bit flip these. + return False + if re.match("^(c|)psr$", reg_info["name"]): + # This is an ARM program status register; don't flip it. + return False + # Okay, this looks fine-enough. + return True + + def read_register_values(self, reg_infos, endian, thread_id=None): + self.assertIsNotNone(reg_infos) + values = {} + + for reg_info in reg_infos: + # We append a register index when load reg infos so we can work + # with subsets. + reg_index = reg_info.get("lldb_register_index") + self.assertIsNotNone(reg_index) + + # Handle thread suffix. + if thread_id: + p_request = "read packet: $p{:x};thread:{:x}#00".format( + reg_index, thread_id) + else: + p_request = "read packet: $p{:x}#00".format(reg_index) + + # Read it with p. + self.reset_test_sequence() + self.test_sequence.add_log_lines([ + p_request, + {"direction": "send", "regex": r"^\$([0-9a-fA-F]+)#", "capture": {1: "p_response"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Convert value from target endian to integral. + p_response = context.get("p_response") + self.assertIsNotNone(p_response) + self.assertTrue(len(p_response) > 0) + self.assertFalse(p_response[0] == "E") + + values[reg_index] = unpack_register_hex_unsigned( + endian, p_response) + + return values + + def add_vCont_query_packets(self): + self.test_sequence.add_log_lines(["read packet: $vCont?#49", + {"direction": "send", + "regex": r"^\$(vCont)?(.*)#[0-9a-fA-F]{2}$", + "capture": {2: "vCont_query_response"}}, + ], + True) + + def parse_vCont_query_response(self, context): + self.assertIsNotNone(context) + vCont_query_response = context.get("vCont_query_response") + + # Handle case of no vCont support at all - in which case the capture + # group will be none or zero length. + if not vCont_query_response or len(vCont_query_response) == 0: + return {} + + return {key: 1 for key in vCont_query_response.split( + ";") if key and len(key) > 0} + + def count_single_steps_until_true( + self, + thread_id, + predicate, + args, + max_step_count=100, + use_Hc_packet=True, + step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + single_step_count = 0 + + while single_step_count < max_step_count: + self.assertIsNotNone(thread_id) + + # Build the packet for the single step instruction. We replace + # {thread}, if present, with the thread_id. + step_packet = "read packet: ${}#00".format( + re.sub(r"{thread}", "{:x}".format(thread_id), step_instruction)) + # print("\nstep_packet created: {}\n".format(step_packet)) + + # Single step. + self.reset_test_sequence() + if use_Hc_packet: + self.test_sequence.add_log_lines( + [ # Set the continue thread. + "read packet: $Hc{0:x}#00".format(thread_id), + "send packet: $OK#00", + ], True) + self.test_sequence.add_log_lines([ + # Single step. + step_packet, + # "read packet: $vCont;s:{0:x}#00".format(thread_id), + # Expect a breakpoint stop report. + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", + "capture": {1: "stop_signo", + 2: "stop_thread_id"}}, + ], True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + self.assertIsNotNone(context.get("stop_signo")) + self.assertEqual(int(context.get("stop_signo"), 16), + lldbutil.get_signal_number('SIGTRAP')) + + single_step_count += 1 + + # See if the predicate is true. If so, we're done. + if predicate(args): + return (True, single_step_count) + + # The predicate didn't return true within the runaway step count. + return (False, single_step_count) + + def g_c1_c2_contents_are(self, args): + """Used by single step test that appears in a few different contexts.""" + g_c1_address = args["g_c1_address"] + g_c2_address = args["g_c2_address"] + expected_g_c1 = args["expected_g_c1"] + expected_g_c2 = args["expected_g_c2"] + + # Read g_c1 and g_c2 contents. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + ["read packet: $m{0:x},{1:x}#00".format(g_c1_address, 1), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c1_contents"}}, + "read packet: $m{0:x},{1:x}#00".format(g_c2_address, 1), + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", "capture": {1: "g_c2_contents"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Check if what we read from inferior memory is what we are expecting. + self.assertIsNotNone(context.get("g_c1_contents")) + self.assertIsNotNone(context.get("g_c2_contents")) + + return (seven.unhexlify(context.get("g_c1_contents")) == expected_g_c1) and ( + seven.unhexlify(context.get("g_c2_contents")) == expected_g_c2) + + def single_step_only_steps_one_instruction( + self, use_Hc_packet=True, step_instruction="s"): + """Used by single step test that appears in a few different contexts.""" + # Start up the inferior. + procs = self.prep_debug_monitor_and_inferior( + inferior_args=[ + "get-code-address-hex:swap_chars", + "get-data-address-hex:g_c1", + "get-data-address-hex:g_c2", + "sleep:1", + "call-function:swap_chars", + "sleep:5"]) + + # Run the process + self.test_sequence.add_log_lines( + [ # Start running after initial stop. + "read packet: $c#63", + # Match output line that prints the memory address of the function call entry point. + # Note we require launch-only testing so we can get inferior otuput. + {"type": "output_match", "regex": r"^code address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\ndata address: 0x([0-9a-fA-F]+)\r\n$", + "capture": {1: "function_address", 2: "g_c1_address", 3: "g_c2_address"}}, + # Now stop the inferior. + "read packet: {}".format(chr(3)), + # And wait for the stop notification. + {"direction": "send", "regex": r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+);", "capture": {1: "stop_signo", 2: "stop_thread_id"}}], + True) + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Grab the main thread id. + self.assertIsNotNone(context.get("stop_thread_id")) + main_thread_id = int(context.get("stop_thread_id"), 16) + + # Grab the function address. + self.assertIsNotNone(context.get("function_address")) + function_address = int(context.get("function_address"), 16) + + # Grab the data addresses. + self.assertIsNotNone(context.get("g_c1_address")) + g_c1_address = int(context.get("g_c1_address"), 16) + + self.assertIsNotNone(context.get("g_c2_address")) + g_c2_address = int(context.get("g_c2_address"), 16) + + # Set a breakpoint at the given address. + if self.getArchitecture() == "arm": + # TODO: Handle case when setting breakpoint in thumb code + BREAKPOINT_KIND = 4 + else: + BREAKPOINT_KIND = 1 + self.reset_test_sequence() + self.add_set_breakpoint_packets( + function_address, + do_continue=True, + breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Remove the breakpoint. + self.reset_test_sequence() + self.add_remove_breakpoint_packets( + function_address, breakpoint_kind=BREAKPOINT_KIND) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Verify g_c1 and g_c2 match expected initial state. + args = {} + args["g_c1_address"] = g_c1_address + args["g_c2_address"] = g_c2_address + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + + self.assertTrue(self.g_c1_c2_contents_are(args)) + + # Verify we take only a small number of steps to hit the first state. + # Might need to work through function entry prologue code. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "1" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=25, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + + # Verify we hit the next state. + args["expected_g_c1"] = "1" + args["expected_g_c2"] = "0" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + expected_step_count = 1 + arch = self.getArchitecture() + + # MIPS required "3" (ADDIU, SB, LD) machine instructions for updation + # of variable value + if re.match("mips", arch): + expected_step_count = 3 + # S390X requires "2" (LARL, MVI) machine instructions for updation of + # variable value + if re.match("s390x", arch): + expected_step_count = 2 + self.assertEqual(step_count, expected_step_count) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "0" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEqual(step_count, expected_step_count) + + # Verify we hit the next state. + args["expected_g_c1"] = "0" + args["expected_g_c2"] = "1" + (state_reached, + step_count) = self.count_single_steps_until_true(main_thread_id, + self.g_c1_c2_contents_are, + args, + max_step_count=5, + use_Hc_packet=use_Hc_packet, + step_instruction=step_instruction) + self.assertTrue(state_reached) + self.assertEqual(step_count, expected_step_count) + + def maybe_strict_output_regex(self, regex): + return '.*' + regex + \ + '.*' if lldbplatformutil.hasChattyStderr(self) else '^' + regex + '$' + + def install_and_create_launch_args(self): + exe_path = self.getBuildArtifact("a.out") + if not lldb.remote_platform: + return [exe_path] + remote_path = lldbutil.append_to_process_working_directory(self, + os.path.basename(exe_path)) + remote_file_spec = lldb.SBFileSpec(remote_path, False) + err = lldb.remote_platform.Install(lldb.SBFileSpec(exe_path, True), + remote_file_spec) + if err.Fail(): + raise Exception("remote_platform.Install('%s', '%s') failed: %s" % + (exe_path, remote_path, err)) + return [remote_path] diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile new file mode 100644 index 00000000000..536d2e8db48 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/Makefile @@ -0,0 +1,6 @@ +CFLAGS_EXTRAS := -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -std=c++11 +# LD_EXTRAS := -lpthread +CXX_SOURCES := main.cpp +MAKE_DSYM :=NO + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py new file mode 100644 index 00000000000..5292913aa42 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteAbort.py @@ -0,0 +1,45 @@ + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def inferior_abort_received(self): + procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), + lldbutil.get_signal_number('SIGABRT')) + + @debugserver_test + def test_inferior_abort_received_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_abort_received() + + @skipIfWindows # No signal is sent on Windows. + @llgs_test + # std::abort() on <= API 16 raises SIGSEGV - b.android.com/179836 + @expectedFailureAndroid(api_levels=list(range(16 + 1))) + def test_inferior_abort_received_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_abort_received() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py new file mode 100644 index 00000000000..e0ba3d7eb68 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/TestGdbRemoteSegFault.py @@ -0,0 +1,45 @@ + + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91 + + @skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet + def inferior_seg_fault_received(self, expected_signo): + procs = self.prep_debug_monitor_and_inferior( + inferior_args=["segfault"]) + self.assertIsNotNone(procs) + + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), expected_signo) + + @debugserver_test + def test_inferior_seg_fault_received_debugserver(self): + self.init_debugserver_test() + self.build() + self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS) + + @skipIfWindows # No signal is sent on Windows. + @llgs_test + def test_inferior_seg_fault_received_llgs(self): + self.init_llgs_test() + self.build() + self.inferior_seg_fault_received(lldbutil.get_signal_number('SIGSEGV')) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp new file mode 100644 index 00000000000..ced7f712508 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/inferior-crash/main.cpp @@ -0,0 +1,31 @@ +#include <cstdlib> +#include <cstring> +#include <iostream> + +namespace { +const char *const SEGFAULT_COMMAND = "segfault"; +const char *const ABORT_COMMAND = "abort"; +} + +int main(int argc, char **argv) { + if (argc < 2) { + std::cout << "expected at least one command provided on the command line" + << std::endl; + } + + // Process command line args. + for (int i = 1; i < argc; ++i) { + const char *const command = argv[i]; + if (std::strstr(command, SEGFAULT_COMMAND)) { + // Perform a null pointer access. + int *const null_int_ptr = nullptr; + *null_int_ptr = 0xDEAD; + } else if (std::strstr(command, ABORT_COMMAND)) { + std::abort(); + } else { + std::cout << "Unsupported command: " << command << std::endl; + } + } + + return 0; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/Makefile new file mode 100644 index 00000000000..5b5c1dcef78 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/Makefile @@ -0,0 +1,19 @@ +LIB_PREFIX := svr4lib +LD_EXTRAS := -L. -lsvr4lib_a -lsvr4lib_b\" +CXX_SOURCES := main.cpp +USE_LIBDL := 1 +MAKE_DSYM := NO + +a.out: svr4lib_a svr4lib_b_quote + +include Makefile.rules + +svr4lib_a: + $(MAKE) -f $(MAKEFILE_RULES) \ + DYLIB_NAME=svr4lib_a DYLIB_CXX_SOURCES=svr4lib_a.cpp \ + DYLIB_ONLY=YES + +svr4lib_b_quote: + $(MAKE) -f $(MAKEFILE_RULES) \ + DYLIB_NAME=svr4lib_b\\\" DYLIB_CXX_SOURCES=svr4lib_b_quote.cpp \ + DYLIB_ONLY=YES diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py new file mode 100644 index 00000000000..2081d9f34c5 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/TestGdbRemoteLibrariesSvr4Support.py @@ -0,0 +1,133 @@ +import xml.etree.ElementTree as ET + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * + + +class TestGdbRemoteLibrariesSvr4Support(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + FEATURE_NAME = "qXfer:libraries-svr4:read" + + def setup_test(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + env = {} + env[self.dylibPath] = self.getBuildDir() + self.prep_debug_monitor_and_inferior(inferior_env=env) + self.continue_process_and_wait_for_stop() + + def get_expected_libs(self): + return ["libsvr4lib_a.so", 'libsvr4lib_b".so'] + + def has_libraries_svr4_support(self): + self.add_qSupported_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + features = self.parse_qSupported_response(context) + return self.FEATURE_NAME in features and features[self.FEATURE_NAME] == "+" + + def get_libraries_svr4_data(self): + # Start up llgs and inferior, and check for libraries-svr4 support. + if not self.has_libraries_svr4_support(): + self.skipTest("libraries-svr4 not supported") + + # Grab the libraries-svr4 data. + self.reset_test_sequence() + self.test_sequence.add_log_lines( + [ + "read packet: $qXfer:libraries-svr4:read::0,ffff:#00", + { + "direction": "send", + "regex": re.compile( + r"^\$([^E])(.*)#[0-9a-fA-F]{2}$", re.MULTILINE | re.DOTALL + ), + "capture": {1: "response_type", 2: "content_raw"}, + }, + ], + True, + ) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Ensure we end up with all libraries-svr4 data in one packet. + self.assertEqual(context.get("response_type"), "l") + + # Decode binary data. + content_raw = context.get("content_raw") + self.assertIsNotNone(content_raw) + return content_raw + + def get_libraries_svr4_xml(self): + libraries_svr4 = self.get_libraries_svr4_data() + xml_root = None + try: + xml_root = ET.fromstring(libraries_svr4) + except xml.etree.ElementTree.ParseError: + pass + self.assertIsNotNone(xml_root, "Malformed libraries-svr4 XML") + return xml_root + + def libraries_svr4_well_formed(self): + xml_root = self.get_libraries_svr4_xml() + self.assertEqual(xml_root.tag, "library-list-svr4") + for child in xml_root: + self.assertEqual(child.tag, "library") + self.assertItemsEqual(child.attrib.keys(), ["name", "lm", "l_addr", "l_ld"]) + + def libraries_svr4_has_correct_load_addr(self): + xml_root = self.get_libraries_svr4_xml() + for child in xml_root: + name = child.attrib.get("name") + base_name = os.path.basename(name) + if os.path.basename(name) not in self.get_expected_libs(): + continue + load_addr = int(child.attrib.get("l_addr"), 16) + self.reset_test_sequence() + self.add_query_memory_region_packets(load_addr) + context = self.expect_gdbremote_sequence() + mem_region = self.parse_memory_region_packet(context) + self.assertEqual(load_addr, int(mem_region.get("start", 0), 16)) + self.assertEqual( + os.path.realpath(name), os.path.realpath(mem_region.get("name", "")) + ) + + def libraries_svr4_libs_present(self): + xml_root = self.get_libraries_svr4_xml() + libraries_svr4_names = [] + for child in xml_root: + name = child.attrib.get("name") + libraries_svr4_names.append(os.path.realpath(name)) + for lib in self.get_expected_libs(): + self.assertIn(self.getBuildDir() + "/" + lib, libraries_svr4_names) + + @llgs_test + @skipUnlessPlatform(["linux", "android", "netbsd"]) + def test_supports_libraries_svr4(self): + self.setup_test() + self.assertTrue(self.has_libraries_svr4_support()) + + @llgs_test + @skipUnlessPlatform(["linux", "android", "netbsd"]) + @expectedFailureNetBSD + def test_libraries_svr4_well_formed(self): + self.setup_test() + self.libraries_svr4_well_formed() + + @llgs_test + @skipUnlessPlatform(["linux", "android", "netbsd"]) + @expectedFailureNetBSD + def test_libraries_svr4_load_addr(self): + self.setup_test() + self.libraries_svr4_has_correct_load_addr() + + @llgs_test + @skipUnlessPlatform(["linux", "android", "netbsd"]) + @expectedFailureNetBSD + def test_libraries_svr4_libs_present(self): + self.setup_test() + self.libraries_svr4_libs_present() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/main.cpp new file mode 100644 index 00000000000..b62ca71b561 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/main.cpp @@ -0,0 +1,15 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +int main(int argc, char **argv) { + // Perform a null pointer access. + int *const null_int_ptr = nullptr; + *null_int_ptr = 0xDEAD; + + return 0; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_a.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_a.cpp new file mode 100644 index 00000000000..47d4b979d92 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_a.cpp @@ -0,0 +1,9 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +int svr4lib_a() { return 42; } diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_b_quote.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_b_quote.cpp new file mode 100644 index 00000000000..bd8eb0068e9 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/libraries-svr4/svr4lib_b_quote.cpp @@ -0,0 +1,9 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +int svr4lib_b_quote() { return 42; } diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py new file mode 100644 index 00000000000..815ba3491c1 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/lldbgdbserverutils.py @@ -0,0 +1,950 @@ +"""Module for supporting unit testing of the lldb-server debug monitor exe. +""" + +from __future__ import division, print_function + + +import os +import os.path +import platform +import re +import six +import socket_packet_pump +import subprocess +from lldbsuite.test.lldbtest import * + +from six.moves import queue + + +def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename): + """Return the debug monitor exe path given the lldb exe path. + + This method attempts to construct a valid debug monitor exe name + from a given lldb exe name. It will return None if the synthesized + debug monitor name is not found to exist. + + The debug monitor exe path is synthesized by taking the directory + of the lldb exe, and replacing the portion of the base name that + matches "lldb" (case insensitive) and replacing with the value of + debug_monitor_basename. + + Args: + lldb_exe: the path to an lldb executable. + + debug_monitor_basename: the base name portion of the debug monitor + that will replace 'lldb'. + + Returns: + A path to the debug monitor exe if it is found to exist; otherwise, + returns None. + + """ + if not lldb_exe: + return None + + exe_dir = os.path.dirname(lldb_exe) + exe_base = os.path.basename(lldb_exe) + + # we'll rebuild the filename by replacing lldb with + # the debug monitor basename, keeping any prefix or suffix in place. + regex = re.compile(r"lldb", re.IGNORECASE) + new_base = regex.sub(debug_monitor_basename, exe_base) + + debug_monitor_exe = os.path.join(exe_dir, new_base) + if os.path.exists(debug_monitor_exe): + return debug_monitor_exe + + new_base = regex.sub( + 'LLDB.framework/Versions/A/Resources/' + + debug_monitor_basename, + exe_base) + debug_monitor_exe = os.path.join(exe_dir, new_base) + if os.path.exists(debug_monitor_exe): + return debug_monitor_exe + + return None + + +def get_lldb_server_exe(): + """Return the lldb-server exe path. + + Returns: + A path to the lldb-server exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + + return _get_debug_monitor_from_lldb( + lldbtest_config.lldbExec, "lldb-server") + + +def get_debugserver_exe(): + """Return the debugserver exe path. + + Returns: + A path to the debugserver exe if it is found to exist; otherwise, + returns None. + """ + if "LLDB_DEBUGSERVER_PATH" in os.environ: + return os.environ["LLDB_DEBUGSERVER_PATH"] + + return _get_debug_monitor_from_lldb( + lldbtest_config.lldbExec, "debugserver") + +_LOG_LINE_REGEX = re.compile(r'^(lldb-server|debugserver)\s+<\s*(\d+)>' + + '\s+(read|send)\s+packet:\s+(.+)$') + + +def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read): + """Return whether a given packet is input for lldb-gdbserver. + + Args: + packet_type: a string indicating 'send' or 'receive', from a + gdbremote packet protocol log. + + llgs_input_is_read: true if lldb-gdbserver input (content sent to + lldb-gdbserver) is listed as 'read' or 'send' in the packet + log entry. + + Returns: + True if the packet should be considered input for lldb-gdbserver; False + otherwise. + """ + if packet_type == 'read': + # when llgs is the read side, then a read packet is meant for + # input to llgs (when captured from the llgs/debugserver exe). + return llgs_input_is_read + elif packet_type == 'send': + # when llgs is the send side, then a send packet is meant to + # be input to llgs (when captured from the lldb exe). + return not llgs_input_is_read + else: + # don't understand what type of packet this is + raise "Unknown packet type: {}".format(packet_type) + + +def handle_O_packet(context, packet_contents, logger): + """Handle O packets.""" + if (not packet_contents) or (len(packet_contents) < 1): + return False + elif packet_contents[0] != "O": + return False + elif packet_contents == "OK": + return False + + new_text = gdbremote_hex_decode_string(packet_contents[1:]) + context["O_content"] += new_text + context["O_count"] += 1 + + if logger: + logger.debug( + "text: new \"{}\", cumulative: \"{}\"".format( + new_text, context["O_content"])) + + return True + +_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$') +_STRIP_COMMAND_PREFIX_REGEX = re.compile(r"^\$") +_STRIP_COMMAND_PREFIX_M_REGEX = re.compile(r"^\$m") + + +def assert_packets_equal(asserter, actual_packet, expected_packet): + # strip off the checksum digits of the packet. When we're in + # no-ack mode, the # checksum is ignored, and should not be cause + # for a mismatched packet. + actual_stripped = _STRIP_CHECKSUM_REGEX.sub('', actual_packet) + expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet) + asserter.assertEqual(actual_stripped, expected_stripped) + + +def expect_lldb_gdbserver_replay( + asserter, + sock, + test_sequence, + pump_queues, + timeout_seconds, + logger=None): + """Replay socket communication with lldb-gdbserver and verify responses. + + Args: + asserter: the object providing assertEqual(first, second, msg=None), e.g. TestCase instance. + + sock: the TCP socket connected to the lldb-gdbserver exe. + + test_sequence: a GdbRemoteTestSequence instance that describes + the messages sent to the gdb remote and the responses + expected from it. + + timeout_seconds: any response taking more than this number of + seconds will cause an exception to be raised. + + logger: a Python logger instance. + + Returns: + The context dictionary from running the given gdbremote + protocol sequence. This will contain any of the capture + elements specified to any GdbRemoteEntry instances in + test_sequence. + + The context will also contain an entry, context["O_content"] + which contains the text from the inferior received via $O + packets. $O packets should not attempt to be matched + directly since they are not entirely deterministic as to + how many arrive and how much text is in each one. + + context["O_count"] will contain an integer of the number of + O packets received. + """ + + # Ensure we have some work to do. + if len(test_sequence.entries) < 1: + return {} + + context = {"O_count": 0, "O_content": ""} + with socket_packet_pump.SocketPacketPump(sock, pump_queues, logger) as pump: + # Grab the first sequence entry. + sequence_entry = test_sequence.entries.pop(0) + + # While we have an active sequence entry, send messages + # destined for the stub and collect/match/process responses + # expected from the stub. + while sequence_entry: + if sequence_entry.is_send_to_remote(): + # This is an entry to send to the remote debug monitor. + send_packet = sequence_entry.get_send_packet() + if logger: + if len(send_packet) == 1 and send_packet[0] == chr(3): + packet_desc = "^C" + else: + packet_desc = send_packet + logger.info( + "sending packet to remote: {}".format(packet_desc)) + sock.sendall(send_packet.encode()) + else: + # This is an entry expecting to receive content from the remote + # debug monitor. + + # We'll pull from (and wait on) the queue appropriate for the type of matcher. + # We keep separate queues for process output (coming from non-deterministic + # $O packet division) and for all other packets. + if sequence_entry.is_output_matcher(): + try: + # Grab next entry from the output queue. + content = pump_queues.output_queue().get(True, timeout_seconds) + except queue.Empty: + if logger: + logger.warning( + "timeout waiting for stub output (accumulated output:{})".format( + pump.get_accumulated_output())) + raise Exception( + "timed out while waiting for output match (accumulated output: {})".format( + pump.get_accumulated_output())) + else: + try: + content = pump_queues.packet_queue().get(True, timeout_seconds) + except queue.Empty: + if logger: + logger.warning( + "timeout waiting for packet match (receive buffer: {})".format( + pump.get_receive_buffer())) + raise Exception( + "timed out while waiting for packet match (receive buffer: {})".format( + pump.get_receive_buffer())) + + # Give the sequence entry the opportunity to match the content. + # Output matchers might match or pass after more output accumulates. + # Other packet types generally must match. + asserter.assertIsNotNone(content) + context = sequence_entry.assert_match( + asserter, content, context=context) + + # Move on to next sequence entry as needed. Some sequence entries support executing multiple + # times in different states (for looping over query/response + # packets). + if sequence_entry.is_consumed(): + if len(test_sequence.entries) > 0: + sequence_entry = test_sequence.entries.pop(0) + else: + sequence_entry = None + + # Fill in the O_content entries. + context["O_count"] = 1 + context["O_content"] = pump.get_accumulated_output() + + return context + + +def gdbremote_hex_encode_string(str): + output = '' + for c in str: + output += '{0:02x}'.format(ord(c)) + return output + + +def gdbremote_hex_decode_string(str): + return str.decode("hex") + + +def gdbremote_packet_encode_string(str): + checksum = 0 + for c in str: + checksum += ord(c) + return '$' + str + '#{0:02x}'.format(checksum % 256) + + +def build_gdbremote_A_packet(args_list): + """Given a list of args, create a properly-formed $A packet containing each arg. + """ + payload = "A" + + # build the arg content + arg_index = 0 + for arg in args_list: + # Comma-separate the args. + if arg_index > 0: + payload += ',' + + # Hex-encode the arg. + hex_arg = gdbremote_hex_encode_string(arg) + + # Build the A entry. + payload += "{},{},{}".format(len(hex_arg), arg_index, hex_arg) + + # Next arg index, please. + arg_index += 1 + + # return the packetized payload + return gdbremote_packet_encode_string(payload) + + +def parse_reg_info_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Build keyval pairs + values = {} + for kv in response_packet.split(";"): + if len(kv) < 1: + continue + (key, val) = kv.split(':') + values[key] = val + + return values + + +def parse_threadinfo_response(response_packet): + if not response_packet: + raise Exception("response_packet cannot be None") + + # Strip off prefix $ and suffix #xx if present. + response_packet = _STRIP_COMMAND_PREFIX_M_REGEX.sub("", response_packet) + response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet) + + # Return list of thread ids + return [int(thread_id_hex, 16) for thread_id_hex in response_packet.split( + ",") if len(thread_id_hex) > 0] + + +def unpack_endian_binary_string(endian, value_string): + """Unpack a gdb-remote binary (post-unescaped, i.e. not escaped) response to an unsigned int given endianness of the inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (ord(value_string[0]) << i) + value_string = value_string[1:] + i += 8 + return value + elif endian == 'big': + value = 0 + while len(value_string) > 0: + value = (value << 8) + ord(value_string[0]) + value_string = value_string[1:] + return value + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +def unpack_register_hex_unsigned(endian, value_string): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + if not value_string or len(value_string) < 1: + raise Exception("value_string cannot be None or empty") + + if endian == 'little': + value = 0 + i = 0 + while len(value_string) > 0: + value += (int(value_string[0:2], 16) << i) + value_string = value_string[2:] + i += 8 + return value + elif endian == 'big': + return int(value_string, 16) + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +def pack_register_hex(endian, value, byte_size=None): + """Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior.""" + if not endian: + raise Exception("endian cannot be None") + + if endian == 'little': + # Create the litt-endian return value. + retval = "" + while value != 0: + retval = retval + "{:02x}".format(value & 0xff) + value = value >> 8 + if byte_size: + # Add zero-fill to the right/end (MSB side) of the value. + retval += "00" * (byte_size - len(retval) // 2) + return retval + + elif endian == 'big': + retval = "" + while value != 0: + retval = "{:02x}".format(value & 0xff) + retval + value = value >> 8 + if byte_size: + # Add zero-fill to the left/front (MSB side) of the value. + retval = ("00" * (byte_size - len(retval) // 2)) + retval + return retval + + else: + # pdp is valid but need to add parse code once needed. + raise Exception("unsupported endian:{}".format(endian)) + + +class GdbRemoteEntryBase(object): + + def is_output_matcher(self): + return False + + +class GdbRemoteEntry(GdbRemoteEntryBase): + + def __init__( + self, + is_send_to_remote=True, + exact_payload=None, + regex=None, + capture=None, + expect_captures=None): + """Create an entry representing one piece of the I/O to/from a gdb remote debug monitor. + + Args: + + is_send_to_remote: True if this entry is a message to be + sent to the gdbremote debug monitor; False if this + entry represents text to be matched against the reply + from the gdbremote debug monitor. + + exact_payload: if not None, then this packet is an exact + send (when sending to the remote) or an exact match of + the response from the gdbremote. The checksums are + ignored on exact match requests since negotiation of + no-ack makes the checksum content essentially + undefined. + + regex: currently only valid for receives from gdbremote. + When specified (and only if exact_payload is None), + indicates the gdbremote response must match the given + regex. Match groups in the regex can be used for two + different purposes: saving the match (see capture + arg), or validating that a match group matches a + previously established value (see expect_captures). It + is perfectly valid to have just a regex arg and to + specify neither capture or expect_captures args. This + arg only makes sense if exact_payload is not + specified. + + capture: if specified, is a dictionary of regex match + group indices (should start with 1) to variable names + that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture + group 1's content in the context dictionary where + "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a + expect_captures expression. This arg only makes sense + when regex is specified. + + expect_captures: if specified, is a dictionary of regex + match group indices (should start with 1) to variable + names, where the match group should match the value + existing in the context at the given variable name. + For example, {2:"thread_id"} indicates that the second + match group must match the value stored under the + context's previously stored "thread_id" key. This arg + only makes sense when regex is specified. + """ + self._is_send_to_remote = is_send_to_remote + self.exact_payload = exact_payload + self.regex = regex + self.capture = capture + self.expect_captures = expect_captures + + def is_send_to_remote(self): + return self._is_send_to_remote + + def is_consumed(self): + # For now, all packets are consumed after first use. + return True + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception( + "get_send_packet() called on GdbRemoteEntry that is not a send-to-remote packet") + if not self.exact_payload: + raise Exception( + "get_send_packet() called on GdbRemoteEntry but it doesn't have an exact payload") + return self.exact_payload + + def _assert_exact_payload_match(self, asserter, actual_packet): + assert_packets_equal(asserter, actual_packet, self.exact_payload) + return None + + def _assert_regex_match(self, asserter, actual_packet, context): + # Ensure the actual packet matches from the start of the actual packet. + match = self.regex.match(actual_packet) + if not match: + asserter.fail( + "regex '{}' failed to match against content '{}'".format( + self.regex.pattern, actual_packet)) + + if self.capture: + # Handle captures. + for group_index, var_name in list(self.capture.items()): + capture_text = match.group(group_index) + # It is okay for capture text to be None - which it will be if it is a group that can match nothing. + # The user must be okay with it since the regex itself matched + # above. + context[var_name] = capture_text + + if self.expect_captures: + # Handle comparing matched groups to context dictionary entries. + for group_index, var_name in list(self.expect_captures.items()): + capture_text = match.group(group_index) + if not capture_text: + raise Exception( + "No content to expect for group index {}".format(group_index)) + asserter.assertEqual(capture_text, context[var_name]) + + return context + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the + # remote debug monitor. + if self.is_send_to_remote(): + raise Exception( + "Attempted to match a packet being sent to the remote debug monitor, doesn't make sense.") + + # Create a new context if needed. + if not context: + context = {} + + # If this is an exact payload, ensure they match exactly, + # ignoring the packet checksum which is optional for no-ack + # mode. + if self.exact_payload: + self._assert_exact_payload_match(asserter, actual_packet) + return context + elif self.regex: + return self._assert_regex_match(asserter, actual_packet, context) + else: + raise Exception( + "Don't know how to match a remote-sent packet when exact_payload isn't specified.") + + +class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase): + """Represents a query/response style packet. + + Assumes the first item is sent to the gdb remote. + An end sequence regex indicates the end of the query/response + packet sequence. All responses up through (but not including) the + end response are stored in a context variable. + + Settings accepted from params: + + next_query or query: required. The typical query packet without the $ prefix or #xx suffix. + If there is a special first packet to start the iteration query, see the + first_query key. + + first_query: optional. If the first query requires a special query command, specify + it with this key. Do not specify the $ prefix or #xx suffix. + + append_iteration_suffix: defaults to False. Specify True if the 0-based iteration + index should be appended as a suffix to the command. e.g. qRegisterInfo with + this key set true will generate query packets of qRegisterInfo0, qRegisterInfo1, + etc. + + end_regex: required. Specifies a compiled regex object that will match the full text + of any response that signals an end to the iteration. It must include the + initial $ and ending #xx and must match the whole packet. + + save_key: required. Specifies the key within the context where an array will be stored. + Each packet received from the gdb remote that does not match the end_regex will get + appended to the array stored within the context at that key. + + runaway_response_count: optional. Defaults to 10000. If this many responses are retrieved, + assume there is something wrong with either the response collection or the ending + detection regex and throw an exception. + """ + + def __init__(self, params): + self._next_query = params.get("next_query", params.get("query")) + if not self._next_query: + raise "either next_query or query key must be specified for MultiResponseGdbRemoteEntry" + + self._first_query = params.get("first_query", self._next_query) + self._append_iteration_suffix = params.get( + "append_iteration_suffix", False) + self._iteration = 0 + self._end_regex = params["end_regex"] + self._save_key = params["save_key"] + self._runaway_response_count = params.get( + "runaway_response_count", 10000) + self._is_send_to_remote = True + self._end_matched = False + + def is_send_to_remote(self): + return self._is_send_to_remote + + def get_send_packet(self): + if not self.is_send_to_remote(): + raise Exception( + "get_send_packet() called on MultiResponseGdbRemoteEntry that is not in the send state") + if self._end_matched: + raise Exception( + "get_send_packet() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Choose the first or next query for the base payload. + if self._iteration == 0 and self._first_query: + payload = self._first_query + else: + payload = self._next_query + + # Append the suffix as needed. + if self._append_iteration_suffix: + payload += "%x" % self._iteration + + # Keep track of the iteration. + self._iteration += 1 + + # Now that we've given the query packet, flip the mode to + # receive/match. + self._is_send_to_remote = False + + # Return the result, converted to packet form. + return gdbremote_packet_encode_string(payload) + + def is_consumed(self): + return self._end_matched + + def assert_match(self, asserter, actual_packet, context=None): + # This only makes sense for matching lines coming from the remote debug + # monitor. + if self.is_send_to_remote(): + raise Exception( + "assert_match() called on MultiResponseGdbRemoteEntry but state is set to send a query packet.") + + if self._end_matched: + raise Exception( + "assert_match() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.") + + # Set up a context as needed. + if not context: + context = {} + + # Check if the packet matches the end condition. + match = self._end_regex.match(actual_packet) + if match: + # We're done iterating. + self._end_matched = True + return context + + # Not done iterating - save the packet. + context[self._save_key] = context.get(self._save_key, []) + context[self._save_key].append(actual_packet) + + # Check for a runaway response cycle. + if len(context[self._save_key]) >= self._runaway_response_count: + raise Exception( + "runaway query/response cycle detected: %d responses captured so far. Last response: %s" % + (len( + context[ + self._save_key]), context[ + self._save_key][ + -1])) + + # Flip the mode to send for generating the query. + self._is_send_to_remote = True + return context + + +class MatchRemoteOutputEntry(GdbRemoteEntryBase): + """Waits for output from the debug monitor to match a regex or time out. + + This entry type tries to match each time new gdb remote output is accumulated + using a provided regex. If the output does not match the regex within the + given timeframe, the command fails the playback session. If the regex does + match, any capture fields are recorded in the context. + + Settings accepted from params: + + regex: required. Specifies a compiled regex object that must either succeed + with re.match or re.search (see regex_mode below) within the given timeout + (see timeout_seconds below) or cause the playback to fail. + + regex_mode: optional. Available values: "match" or "search". If "match", the entire + stub output as collected so far must match the regex. If search, then the regex + must match starting somewhere within the output text accumulated thus far. + Default: "match" (i.e. the regex must match the entirety of the accumulated output + buffer, so unexpected text will generally fail the match). + + capture: optional. If specified, is a dictionary of regex match group indices (should start + with 1) to variable names that will store the capture group indicated by the + index. For example, {1:"thread_id"} will store capture group 1's content in the + context dictionary where "thread_id" is the key and the match group value is + the value. The value stored off can be used later in a expect_captures expression. + This arg only makes sense when regex is specified. + """ + + def __init__(self, regex=None, regex_mode="match", capture=None): + self._regex = regex + self._regex_mode = regex_mode + self._capture = capture + self._matched = False + + if not self._regex: + raise Exception("regex cannot be None") + + if not self._regex_mode in ["match", "search"]: + raise Exception( + "unsupported regex mode \"{}\": must be \"match\" or \"search\"".format( + self._regex_mode)) + + def is_output_matcher(self): + return True + + def is_send_to_remote(self): + # This is always a "wait for remote" command. + return False + + def is_consumed(self): + return self._matched + + def assert_match(self, asserter, accumulated_output, context): + # Validate args. + if not accumulated_output: + raise Exception("accumulated_output cannot be none") + if not context: + raise Exception("context cannot be none") + + # Validate that we haven't already matched. + if self._matched: + raise Exception( + "invalid state - already matched, attempting to match again") + + # If we don't have any content yet, we don't match. + if len(accumulated_output) < 1: + return context + + # Check if we match + if self._regex_mode == "match": + match = self._regex.match(accumulated_output) + elif self._regex_mode == "search": + match = self._regex.search(accumulated_output) + else: + raise Exception( + "Unexpected regex mode: {}".format( + self._regex_mode)) + + # If we don't match, wait to try again after next $O content, or time + # out. + if not match: + # print("re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output)) + return context + + # We do match. + self._matched = True + # print("re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output)) + + # Collect up any captures into the context. + if self._capture: + # Handle captures. + for group_index, var_name in list(self._capture.items()): + capture_text = match.group(group_index) + if not capture_text: + raise Exception( + "No content for group index {}".format(group_index)) + context[var_name] = capture_text + + return context + + +class GdbRemoteTestSequence(object): + + _LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$') + + def __init__(self, logger): + self.entries = [] + self.logger = logger + + def add_log_lines(self, log_lines, remote_input_is_read): + for line in log_lines: + if isinstance(line, str): + # Handle log line import + # if self.logger: + # self.logger.debug("processing log line: {}".format(line)) + match = self._LOG_LINE_REGEX.match(line) + if match: + playback_packet = match.group(2) + direction = match.group(1) + if _is_packet_lldb_gdbserver_input( + direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed packet to send to remote: {}".format(playback_packet)) + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=True, + exact_payload=playback_packet)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet)) + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=False, + exact_payload=playback_packet)) + else: + raise Exception( + "failed to interpret log line: {}".format(line)) + elif isinstance(line, dict): + entry_type = line.get("type", "regex_capture") + if entry_type == "regex_capture": + # Handle more explicit control over details via dictionary. + direction = line.get("direction", None) + regex = line.get("regex", None) + capture = line.get("capture", None) + expect_captures = line.get("expect_captures", None) + + # Compile the regex. + if regex and (isinstance(regex, str)): + regex = re.compile(regex) + + if _is_packet_lldb_gdbserver_input( + direction, remote_input_is_read): + # Handle as something to send to the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to send to remote") + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=True, + regex=regex, + capture=capture, + expect_captures=expect_captures)) + else: + # Log line represents content to be expected from the remote debug monitor. + # if self.logger: + # self.logger.info("processed dict sequence to match receiving from remote") + self.entries.append( + GdbRemoteEntry( + is_send_to_remote=False, + regex=regex, + capture=capture, + expect_captures=expect_captures)) + elif entry_type == "multi_response": + self.entries.append(MultiResponseGdbRemoteEntry(line)) + elif entry_type == "output_match": + + regex = line.get("regex", None) + # Compile the regex. + if regex and (isinstance(regex, str)): + regex = re.compile(regex, re.DOTALL) + + regex_mode = line.get("regex_mode", "match") + capture = line.get("capture", None) + self.entries.append( + MatchRemoteOutputEntry( + regex=regex, + regex_mode=regex_mode, + capture=capture)) + else: + raise Exception("unknown entry type \"%s\"" % entry_type) + + +def process_is_running(pid, unknown_value=True): + """If possible, validate that the given pid represents a running process on the local system. + + Args: + + pid: an OS-specific representation of a process id. Should be an integral value. + + unknown_value: value used when we cannot determine how to check running local + processes on the OS. + + Returns: + + If we can figure out how to check running process ids on the given OS: + return True if the process is running, or False otherwise. + + If we don't know how to check running process ids on the given OS: + return the value provided by the unknown_value arg. + """ + if not isinstance(pid, six.integer_types): + raise Exception( + "pid must be an integral type (actual type: %s)" % str( + type(pid))) + + process_ids = [] + + if lldb.remote_platform: + # Don't know how to get list of running process IDs on a remote + # platform + return unknown_value + elif platform.system() in ['Darwin', 'Linux', 'FreeBSD', 'NetBSD']: + # Build the list of running process ids + output = subprocess.check_output( + "ps ax | awk '{ print $1; }'", shell=True).decode("utf-8") + text_process_ids = output.split('\n')[1:] + # Convert text pids to ints + process_ids = [int(text_pid) + for text_pid in text_process_ids if text_pid != ''] + elif platform.system() == 'Windows': + output = subprocess.check_output( + "for /f \"tokens=2 delims=,\" %F in ('tasklist /nh /fi \"PID ne 0\" /fo csv') do @echo %~F", shell=True).decode("utf-8") + text_process_ids = output.split('\n')[1:] + process_ids = [int(text_pid) + for text_pid in text_process_ids if text_pid != ''] + # elif {your_platform_here}: + # fill in process_ids as a list of int type process IDs running on + # the local system. + else: + # Don't know how to get list of running process IDs on this + # OS, so return the "don't know" value. + return unknown_value + + # Check if the pid is in the process_ids + return pid in process_ids + +if __name__ == '__main__': + EXE_PATH = get_lldb_server_exe() + if EXE_PATH: + print("lldb-server path detected: {}".format(EXE_PATH)) + else: + print("lldb-server could not be found") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp new file mode 100644 index 00000000000..f1d46b85425 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/main.cpp @@ -0,0 +1,370 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include <atomic> +#include <chrono> +#include <cstdlib> +#include <cstring> +#include <errno.h> +#include <inttypes.h> +#include <memory> +#include <mutex> +#if !defined(_WIN32) +#include <pthread.h> +#include <signal.h> +#include <unistd.h> +#endif +#include <setjmp.h> +#include <stdint.h> +#include <stdio.h> +#include <string.h> +#include <thread> +#include <time.h> +#include <vector> + +#if defined(__APPLE__) +__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2) +int pthread_threadid_np(pthread_t, __uint64_t *); +#elif defined(__linux__) +#include <sys/syscall.h> +#elif defined(__NetBSD__) +#include <lwp.h> +#elif defined(_WIN32) +#include <windows.h> +#endif + +static const char *const RETVAL_PREFIX = "retval:"; +static const char *const SLEEP_PREFIX = "sleep:"; +static const char *const STDERR_PREFIX = "stderr:"; +static const char *const SET_MESSAGE_PREFIX = "set-message:"; +static const char *const PRINT_MESSAGE_COMMAND = "print-message:"; +static const char *const GET_DATA_ADDRESS_PREFIX = "get-data-address-hex:"; +static const char *const GET_STACK_ADDRESS_COMMAND = "get-stack-address-hex:"; +static const char *const GET_HEAP_ADDRESS_COMMAND = "get-heap-address-hex:"; + +static const char *const GET_CODE_ADDRESS_PREFIX = "get-code-address-hex:"; +static const char *const CALL_FUNCTION_PREFIX = "call-function:"; + +static const char *const THREAD_PREFIX = "thread:"; +static const char *const THREAD_COMMAND_NEW = "new"; +static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids"; +static const char *const THREAD_COMMAND_SEGFAULT = "segfault"; + +static const char *const PRINT_PID_COMMAND = "print-pid"; + +static bool g_print_thread_ids = false; +static std::mutex g_print_mutex; +static bool g_threads_do_segfault = false; + +static std::mutex g_jump_buffer_mutex; +static jmp_buf g_jump_buffer; +static bool g_is_segfaulting = false; + +static char g_message[256]; + +static volatile char g_c1 = '0'; +static volatile char g_c2 = '1'; + +static void print_pid() { +#if defined(_WIN32) + fprintf(stderr, "PID: %d\n", ::GetCurrentProcessId()); +#else + fprintf(stderr, "PID: %d\n", getpid()); +#endif +} + +static void print_thread_id() { +// Put in the right magic here for your platform to spit out the thread id (tid) +// that debugserver/lldb-gdbserver would see as a TID. Otherwise, let the else +// clause print out the unsupported text so that the unit test knows to skip +// verifying thread ids. +#if defined(__APPLE__) + __uint64_t tid = 0; + pthread_threadid_np(pthread_self(), &tid); + printf("%" PRIx64, tid); +#elif defined(__linux__) + // This is a call to gettid() via syscall. + printf("%" PRIx64, static_cast<uint64_t>(syscall(__NR_gettid))); +#elif defined(__NetBSD__) + // Technically lwpid_t is 32-bit signed integer + printf("%" PRIx64, static_cast<uint64_t>(_lwp_self())); +#elif defined(_WIN32) + printf("%" PRIx64, static_cast<uint64_t>(::GetCurrentThreadId())); +#else + printf("{no-tid-support}"); +#endif +} + +static void signal_handler(int signo) { +#if defined(_WIN32) + // No signal support on Windows. +#else + const char *signal_name = nullptr; + switch (signo) { + case SIGUSR1: + signal_name = "SIGUSR1"; + break; + case SIGSEGV: + signal_name = "SIGSEGV"; + break; + default: + signal_name = nullptr; + } + + // Print notice that we received the signal on a given thread. + { + std::lock_guard<std::mutex> lock(g_print_mutex); + if (signal_name) + printf("received %s on thread id: ", signal_name); + else + printf("received signo %d (%s) on thread id: ", signo, strsignal(signo)); + print_thread_id(); + printf("\n"); + } + + // Reset the signal handler if we're one of the expected signal handlers. + switch (signo) { + case SIGSEGV: + if (g_is_segfaulting) { + // Fix up the pointer we're writing to. This needs to happen if nothing + // intercepts the SIGSEGV (i.e. if somebody runs this from the command + // line). + longjmp(g_jump_buffer, 1); + } + break; + case SIGUSR1: + if (g_is_segfaulting) { + // Fix up the pointer we're writing to. This is used to test gdb remote + // signal delivery. A SIGSEGV will be raised when the thread is created, + // switched out for a SIGUSR1, and then this code still needs to fix the + // seg fault. (i.e. if somebody runs this from the command line). + longjmp(g_jump_buffer, 1); + } + break; + } + + // Reset the signal handler. + sig_t sig_result = signal(signo, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set signal handler: errno=%d\n", errno); + exit(1); + } +#endif +} + +static void swap_chars() { + g_c1 = '1'; + g_c2 = '0'; + + g_c1 = '0'; + g_c2 = '1'; +} + +static void hello() { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("hello, world\n"); +} + +static void *thread_func(void *arg) { + static std::atomic<int> s_thread_index(1); + const int this_thread_index = s_thread_index++; + if (g_print_thread_ids) { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("thread %d id: ", this_thread_index); + print_thread_id(); + printf("\n"); + } + + if (g_threads_do_segfault) { + // Sleep for a number of seconds based on the thread index. + // TODO add ability to send commands to test exe so we can + // handle timing more precisely. This is clunky. All we're + // trying to do is add predictability as to the timing of + // signal generation by created threads. + int sleep_seconds = 2 * (this_thread_index - 1); + std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds)); + + // Test creating a SEGV. + { + std::lock_guard<std::mutex> lock(g_jump_buffer_mutex); + g_is_segfaulting = true; + int *bad_p = nullptr; + if (setjmp(g_jump_buffer) == 0) { + // Force a seg fault signal on this thread. + *bad_p = 0; + } else { + // Tell the system we're no longer seg faulting. + // Used by the SIGUSR1 signal handler that we inject + // in place of the SIGSEGV so it only tries to + // recover from the SIGSEGV if this seg fault code + // was in play. + g_is_segfaulting = false; + } + } + + { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("thread "); + print_thread_id(); + printf(": past SIGSEGV\n"); + } + } + + int sleep_seconds_remaining = 60; + std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds_remaining)); + + return nullptr; +} + +int main(int argc, char **argv) { + lldb_enable_attach(); + + std::vector<std::thread> threads; + std::unique_ptr<uint8_t[]> heap_array_up; + int return_value = 0; + +#if !defined(_WIN32) + // Set the signal handler. + sig_t sig_result = signal(SIGALRM, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGALRM signal handler: errno=%d\n", errno); + exit(1); + } + + sig_result = signal(SIGUSR1, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit(1); + } + + sig_result = signal(SIGSEGV, signal_handler); + if (sig_result == SIG_ERR) { + fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno); + exit(1); + } +#endif + + // Process command line args. + for (int i = 1; i < argc; ++i) { + if (std::strstr(argv[i], STDERR_PREFIX)) { + // Treat remainder as text to go to stderr. + fprintf(stderr, "%s\n", (argv[i] + strlen(STDERR_PREFIX))); + } else if (std::strstr(argv[i], RETVAL_PREFIX)) { + // Treat as the return value for the program. + return_value = std::atoi(argv[i] + strlen(RETVAL_PREFIX)); + } else if (std::strstr(argv[i], SLEEP_PREFIX)) { + // Treat as the amount of time to have this process sleep (in seconds). + int sleep_seconds_remaining = std::atoi(argv[i] + strlen(SLEEP_PREFIX)); + + // Loop around, sleeping until all sleep time is used up. Note that + // signals will cause sleep to end early with the number of seconds + // remaining. + std::this_thread::sleep_for( + std::chrono::seconds(sleep_seconds_remaining)); + + } else if (std::strstr(argv[i], SET_MESSAGE_PREFIX)) { + // Copy the contents after "set-message:" to the g_message buffer. + // Used for reading inferior memory and verifying contents match + // expectations. + strncpy(g_message, argv[i] + strlen(SET_MESSAGE_PREFIX), + sizeof(g_message)); + + // Ensure we're null terminated. + g_message[sizeof(g_message) - 1] = '\0'; + + } else if (std::strstr(argv[i], PRINT_MESSAGE_COMMAND)) { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("message: %s\n", g_message); + } else if (std::strstr(argv[i], GET_DATA_ADDRESS_PREFIX)) { + volatile void *data_p = nullptr; + + if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_message")) + data_p = &g_message[0]; + else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c1")) + data_p = &g_c1; + else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c2")) + data_p = &g_c2; + + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("data address: %p\n", data_p); + } else if (std::strstr(argv[i], GET_HEAP_ADDRESS_COMMAND)) { + // Create a byte array if not already present. + if (!heap_array_up) + heap_array_up.reset(new uint8_t[32]); + + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("heap address: %p\n", heap_array_up.get()); + + } else if (std::strstr(argv[i], GET_STACK_ADDRESS_COMMAND)) { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("stack address: %p\n", &return_value); + } else if (std::strstr(argv[i], GET_CODE_ADDRESS_PREFIX)) { + void (*func_p)() = nullptr; + + if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX), "hello")) + func_p = hello; + else if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX), + "swap_chars")) + func_p = swap_chars; + + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("code address: %p\n", func_p); + } else if (std::strstr(argv[i], CALL_FUNCTION_PREFIX)) { + void (*func_p)() = nullptr; + + // Defaut to providing the address of main. + if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX), "hello") == 0) + func_p = hello; + else if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX), + "swap_chars") == 0) + func_p = swap_chars; + else { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("unknown function: %s\n", + argv[i] + strlen(CALL_FUNCTION_PREFIX)); + } + if (func_p) + func_p(); + } else if (std::strstr(argv[i], THREAD_PREFIX)) { + // Check if we're creating a new thread. + if (std::strstr(argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_NEW)) { + threads.push_back(std::thread(thread_func, nullptr)); + } else if (std::strstr(argv[i] + strlen(THREAD_PREFIX), + THREAD_COMMAND_PRINT_IDS)) { + // Turn on thread id announcing. + g_print_thread_ids = true; + + // And announce us. + { + std::lock_guard<std::mutex> lock(g_print_mutex); + printf("thread 0 id: "); + print_thread_id(); + printf("\n"); + } + } else if (std::strstr(argv[i] + strlen(THREAD_PREFIX), + THREAD_COMMAND_SEGFAULT)) { + g_threads_do_segfault = true; + } else { + // At this point we don't do anything else with threads. + // Later use thread index and send command to thread. + } + } else if (std::strstr(argv[i], PRINT_PID_COMMAND)) { + print_pid(); + } else { + // Treat the argument as text for stdout. + printf("%s\n", argv[i]); + } + } + + // If we launched any threads, join them + for (std::vector<std::thread>::iterator it = threads.begin(); + it != threads.end(); ++it) + it->join(); + + return return_value; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile new file mode 100644 index 00000000000..99998b20bcb --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/Makefile @@ -0,0 +1,3 @@ +CXX_SOURCES := main.cpp + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py new file mode 100644 index 00000000000..ff708310ca9 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/TestPlatformProcessConnect.py @@ -0,0 +1,103 @@ + +import time + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestPlatformProcessConnect(gdbremote_testcase.GdbRemoteTestCaseBase): + mydir = TestBase.compute_mydir(__file__) + + def setUp(self): + super(TestPlatformProcessConnect, self).setUp() + self._initial_platform = lldb.DBG.GetSelectedPlatform() + + def tearDown(self): + lldb.DBG.SetSelectedPlatform(self._initial_platform) + super(TestPlatformProcessConnect, self).tearDown() + + @llgs_test + @no_debug_info_test + @skipIf(remote=False) + @expectedFailureAll(hostoslist=["windows"], triple='.*-android') + def test_platform_process_connect(self): + self.build() + self.init_llgs_test(False) + + working_dir = lldb.remote_platform.GetWorkingDirectory() + src = lldb.SBFileSpec(self.getBuildArtifact("a.out")) + dest = lldb.SBFileSpec(os.path.join(working_dir, "a.out")) + err = lldb.remote_platform.Put(src, dest) + if err.Fail(): + raise RuntimeError( + "Unable copy '%s' to '%s'.\n>>> %s" % + (f, wd, err.GetCString())) + + m = re.search("^(.*)://([^:/]*)", configuration.lldb_platform_url) + protocol = m.group(1) + hostname = m.group(2) + unix_protocol = protocol.startswith("unix-") + if unix_protocol: + p = re.search("^(.*)-connect", protocol) + path = lldbutil.join_remote_paths(configuration.lldb_platform_working_dir, + self.getBuildDirBasename(), "platform-%d.sock" % int(time.time())) + listen_url = "%s://%s" % (p.group(1), path) + else: + listen_url = "*:0" + + port_file = "%s/port" % working_dir + commandline_args = [ + "platform", + "--listen", + listen_url, + "--socket-file", + port_file, + "--", + "%s/a.out" % + working_dir, + "foo"] + self.spawnSubprocess( + self.debug_monitor_exe, + commandline_args, + install_remote=False) + self.addTearDownHook(self.cleanupSubprocesses) + + socket_id = lldbutil.wait_for_file_on_target(self, port_file) + + new_debugger = lldb.SBDebugger.Create() + new_debugger.SetAsync(False) + + def del_debugger(new_debugger=new_debugger): + del new_debugger + self.addTearDownHook(del_debugger) + + new_platform = lldb.SBPlatform(lldb.remote_platform.GetName()) + new_debugger.SetSelectedPlatform(new_platform) + new_interpreter = new_debugger.GetCommandInterpreter() + + if unix_protocol: + connect_url = "%s://%s%s" % (protocol, hostname, socket_id) + else: + connect_url = "%s://%s:%s" % (protocol, hostname, socket_id) + + command = "platform connect %s" % (connect_url) + result = lldb.SBCommandReturnObject() + new_interpreter.HandleCommand(command, result) + self.assertTrue( + result.Succeeded(), + "platform process connect failed: %s" % + result.GetOutput()) + + target = new_debugger.GetSelectedTarget() + process = target.GetProcess() + thread = process.GetThreadAtIndex(0) + + breakpoint = target.BreakpointCreateByName("main") + process.Continue() + + frame = thread.GetFrameAtIndex(0) + self.assertEqual(frame.GetFunction().GetName(), "main") + self.assertEqual(frame.FindVariable("argc").GetValueAsSigned(), 2) + process.Continue() diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp new file mode 100644 index 00000000000..c7ebe0759a4 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/platform-process-connect/main.cpp @@ -0,0 +1,6 @@ +#include <cstdio> + +int main(int argc, char **argv) { + printf("argc: %d\n", argc); + return argv[0][0]; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile new file mode 100644 index 00000000000..99998b20bcb --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/Makefile @@ -0,0 +1,3 @@ +CXX_SOURCES := main.cpp + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py new file mode 100644 index 00000000000..a27cb01e938 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/TestGdbRemoteGPacket.py @@ -0,0 +1,154 @@ + + +import gdbremote_testcase +import textwrap +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +def _extract_register_value(reg_info, reg_bank, byte_order, bytes_per_entry=8): + reg_offset = int(reg_info["offset"])*2 + reg_byte_size = int(2 * int(reg_info["bitsize"]) / 8) + # Create slice with the contents of the register. + reg_slice = reg_bank[reg_offset:reg_offset+reg_byte_size] + + reg_value = [] + # Wrap slice according to bytes_per_entry. + for entry in textwrap.wrap(reg_slice, 2 * bytes_per_entry): + # Invert the bytes order if target uses little-endian. + if byte_order == lldb.eByteOrderLittle: + entry = "".join(reversed([entry[i:i+2] for i in range(0, + len(entry),2)])) + reg_value.append("0x" + entry) + + return reg_value + + +class TestGdbRemoteGPacket(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def run_test_g_packet(self): + self.build() + self.prep_debug_monitor_and_inferior() + self.test_sequence.add_log_lines( + ["read packet: $g#67", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "register_bank"}}], + True) + self.connect_to_debug_monitor() + context = self.expect_gdbremote_sequence() + register_bank = context.get("register_bank") + self.assertTrue(register_bank[0] != 'E') + + self.test_sequence.add_log_lines( + ["read packet: $G" + register_bank + "#00", + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "G_reply"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertTrue(context.get("G_reply")[0] != 'E') + + @skipIfOutOfTreeDebugserver + @debugserver_test + @skipIfDarwinEmbedded + def test_g_packet_debugserver(self): + self.init_debugserver_test() + self.run_test_g_packet() + + @skipIf(archs=no_match(["x86_64"])) + def g_returns_correct_data(self, with_suffix): + procs = self.prep_debug_monitor_and_inferior() + + self.add_register_info_collection_packets() + if with_suffix: + self.add_thread_suffix_request_packets() + self.add_threadinfo_collection_packets() + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Gather register info. + reg_infos = self.parse_register_info_packets(context) + self.assertIsNotNone(reg_infos) + self.add_lldb_register_index(reg_infos) + # Index register info entries by name. + reg_infos = {info['name']: info for info in reg_infos} + + # Gather thread info. + if with_suffix: + threads = self.parse_threadinfo_packets(context) + self.assertIsNotNone(threads) + thread_id = threads[0] + self.assertIsNotNone(thread_id) + else: + thread_id = None + + # Send vCont packet to resume the inferior. + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + # Send g packet to retrieve the register bank + if thread_id: + g_request = "read packet: $g;thread:{:x}#00".format(thread_id) + else: + g_request = "read packet: $g#00" + self.test_sequence.add_log_lines( + [g_request, + {"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$", + "capture": {1: "register_bank"}}], + True) + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + reg_bank = context.get("register_bank") + self.assertTrue(reg_bank[0] != 'E') + + byte_order = self.get_target_byte_order() + get_reg_value = lambda reg_name : _extract_register_value( + reg_infos[reg_name], reg_bank, byte_order) + + self.assertEqual(['0x0102030405060708'], get_reg_value('r8')) + self.assertEqual(['0x1112131415161718'], get_reg_value('r9')) + self.assertEqual(['0x2122232425262728'], get_reg_value('r10')) + self.assertEqual(['0x3132333435363738'], get_reg_value('r11')) + self.assertEqual(['0x4142434445464748'], get_reg_value('r12')) + self.assertEqual(['0x5152535455565758'], get_reg_value('r13')) + self.assertEqual(['0x6162636465666768'], get_reg_value('r14')) + self.assertEqual(['0x7172737475767778'], get_reg_value('r15')) + + self.assertEqual( + ['0x020406080a0c0e01', '0x030507090b0d0f00'], get_reg_value('xmm8')) + self.assertEqual( + ['0x121416181a1c1e11', '0x131517191b1d1f10'], get_reg_value('xmm9')) + self.assertEqual( + ['0x222426282a2c2e21', '0x232527292b2d2f20'], get_reg_value('xmm10')) + self.assertEqual( + ['0x323436383a3c3e31', '0x333537393b3d3f30'], get_reg_value('xmm11')) + self.assertEqual( + ['0x424446484a4c4e41', '0x434547494b4d4f40'], get_reg_value('xmm12')) + self.assertEqual( + ['0x525456585a5c5e51', '0x535557595b5d5f50'], get_reg_value('xmm13')) + self.assertEqual( + ['0x626466686a6c6e61', '0x636567696b6d6f60'], get_reg_value('xmm14')) + self.assertEqual( + ['0x727476787a7c7e71', '0x737577797b7d7f70'], get_reg_value('xmm15')) + + @expectedFailureNetBSD + @llgs_test + def test_g_returns_correct_data_with_suffix_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.g_returns_correct_data(True) + + @expectedFailureNetBSD + @llgs_test + def test_g_returns_correct_data_no_suffix_llgs(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + self.g_returns_correct_data(False) diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp new file mode 100644 index 00000000000..fca0c723bde --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/register-reading/main.cpp @@ -0,0 +1,54 @@ +#include <cstdint> + +int main() { +#if defined(__x86_64__) + struct alignas(16) xmm_t { + uint64_t a, b; + }; + uint64_t r8 = 0x0102030405060708; + uint64_t r9 = 0x1112131415161718; + uint64_t r10 = 0x2122232425262728; + uint64_t r11 = 0x3132333435363738; + uint64_t r12 = 0x4142434445464748; + uint64_t r13 = 0x5152535455565758; + uint64_t r14 = 0x6162636465666768; + uint64_t r15 = 0x7172737475767778; + + xmm_t xmm8 = {0x020406080A0C0E01, 0x030507090B0D0F00}; + xmm_t xmm9 = {0x121416181A1C1E11, 0x131517191B1D1F10}; + xmm_t xmm10 = {0x222426282A2C2E21, 0x232527292B2D2F20}; + xmm_t xmm11 = {0x323436383A3C3E31, 0x333537393B3D3F30}; + xmm_t xmm12 = {0x424446484A4C4E41, 0x434547494B4D4F40}; + xmm_t xmm13 = {0x525456585A5C5E51, 0x535557595B5D5F50}; + xmm_t xmm14 = {0x626466686A6C6E61, 0x636567696B6D6F60}; + xmm_t xmm15 = {0x727476787A7C7E71, 0x737577797B7D7F70}; + + asm volatile("movq %0, %%r8\n\t" + "movq %1, %%r9\n\t" + "movq %2, %%r10\n\t" + "movq %3, %%r11\n\t" + "movq %4, %%r12\n\t" + "movq %5, %%r13\n\t" + "movq %6, %%r14\n\t" + "movq %7, %%r15\n\t" + "\n\t" + "movaps %8, %%xmm8\n\t" + "movaps %9, %%xmm9\n\t" + "movaps %10, %%xmm10\n\t" + "movaps %11, %%xmm11\n\t" + "movaps %12, %%xmm12\n\t" + "movaps %13, %%xmm13\n\t" + "movaps %14, %%xmm14\n\t" + "movaps %15, %%xmm15\n\t" + "\n\t" + "int3" + : + : "g"(r8), "g"(r9), "g"(r10), "g"(r11), "g"(r12), "g"(r13), + "g"(r14), "g"(r15), "m"(xmm8), "m"(xmm9), "m"(xmm10), + "m"(xmm11), "m"(xmm12), "m"(xmm13), "m"(xmm14), "m"(xmm15) + : "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", + "%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13", + "%xmm14", "%xmm15"); +#endif + return 0; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile new file mode 100644 index 00000000000..99998b20bcb --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/Makefile @@ -0,0 +1,3 @@ +CXX_SOURCES := main.cpp + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py new file mode 100644 index 00000000000..72e0d94d4de --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/TestGdbRemote_QPassSignals.py @@ -0,0 +1,116 @@ +# This test makes sure that lldb-server supports and properly handles +# QPassSignals GDB protocol package. + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + +class TestGdbRemote_QPassSignals(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def expect_signal(self, expected_signo): + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$", + "capture": {1: "hex_exit_code"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + hex_exit_code = context.get("hex_exit_code") + self.assertIsNotNone(hex_exit_code) + self.assertEqual(int(hex_exit_code, 16), expected_signo) + + def expect_exit_code(self, exit_code): + self.test_sequence.add_log_lines( + ["read packet: $vCont;c#a8", + "send packet: $W{0:02x}#00".format(exit_code)], + True) + self.expect_gdbremote_sequence() + + + def ignore_signals(self, signals): + def signal_name_to_hex(signame): + return format(lldbutil.get_signal_number(signame), 'x') + signals_str = ";".join(map(signal_name_to_hex, signals)) + + self.test_sequence.add_log_lines(["read packet: $QPassSignals:" + + signals_str + " #00", + "send packet: $OK#00"], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_q_pass_signals(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", + "SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"] + signals_to_ignore = ["SIGUSR1", "SIGUSR2"] + self.ignore_signals(signals_to_ignore) + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + self.expect_exit_code(len(signals_to_ignore)) + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_change_signals_at_runtime(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2", + "SIGALRM", "SIGHUP"] + signals_to_ignore = ["SIGFPE", "SIGBUS", "SIGINT"] + + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + if signal_name == "SIGALRM": + self.ignore_signals(signals_to_ignore) + self.expect_exit_code(len(signals_to_ignore)) + + @skipIfWindows # no signal support + @expectedFailureNetBSD + @llgs_test + def test_default_signals_behavior(self): + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2", + "SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"] + for signal_name in expected_signals: + signo = lldbutil.get_signal_number(signal_name) + self.expect_signal(signo) + self.expect_exit_code(0) + + + @llgs_test + @skipUnlessPlatform(["linux", "android"]) + def test_support_q_pass_signals(self): + self.init_llgs_test() + self.build() + + # Start up the stub and start/prep the inferior. + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + self.add_qSupported_packets() + + # Run the packet stream. + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + # Retrieve the qSupported features and check QPassSignals+ + supported_dict = self.parse_qSupported_response(context) + self.assertEqual(supported_dict["QPassSignals"], "+") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp new file mode 100644 index 00000000000..fe33c291958 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/signal-filtering/main.cpp @@ -0,0 +1,36 @@ +//===-- main.cpp ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include <signal.h> +#include <stdio.h> +#include <vector> + +static int signal_counter = 0; + +static void count_signal(int signo) { + ++signal_counter; + printf("Signal %d\n", signo); +} + +static void raise_signals() { + std::vector<int> signals( + {SIGSEGV, SIGUSR1, SIGUSR2, SIGALRM, SIGFPE, SIGBUS, SIGINT, SIGHUP}); + + for (int signal_num : signals) { + signal(signal_num, count_signal); + } + + for (int signal_num : signals) { + raise(signal_num); + } +} + +int main() { + raise_signals(); + return signal_counter; +} diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py new file mode 100644 index 00000000000..958d6449b51 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/socket_packet_pump.py @@ -0,0 +1,198 @@ + +from __future__ import print_function + + +import re +import select +import threading +import traceback +import codecs + +from six.moves import queue +from lldbsuite.support import seven + + +def _handle_output_packet_string(packet_contents): + if (not packet_contents) or (len(packet_contents) < 1): + return None + elif packet_contents[0] != "O": + return None + elif packet_contents == "OK": + return None + else: + return seven.unhexlify(packet_contents[1:]) + + +def _dump_queue(the_queue): + while not the_queue.empty(): + print(codecs.encode(the_queue.get(True), "string_escape")) + print("\n") + + +class PumpQueues(object): + + def __init__(self): + self._output_queue = queue.Queue() + self._packet_queue = queue.Queue() + + def output_queue(self): + return self._output_queue + + def packet_queue(self): + return self._packet_queue + + def verify_queues_empty(self): + # Warn if there is any content left in any of the queues. + # That would represent unmatched packets. + if not self.output_queue().empty(): + print("warning: output queue entries still exist:") + _dump_queue(self.output_queue()) + print("from here:") + traceback.print_stack() + + if not self.packet_queue().empty(): + print("warning: packet queue entries still exist:") + _dump_queue(self.packet_queue()) + print("from here:") + traceback.print_stack() + + +class SocketPacketPump(object): + """A threaded packet reader that partitions packets into two streams. + + All incoming $O packet content is accumulated with the current accumulation + state put into the OutputQueue. + + All other incoming packets are placed in the packet queue. + + A select thread can be started and stopped, and runs to place packet + content into the two queues. + """ + + _GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}') + + def __init__(self, pump_socket, pump_queues, logger=None): + if not pump_socket: + raise Exception("pump_socket cannot be None") + + self._thread = None + self._stop_thread = False + self._socket = pump_socket + self._logger = logger + self._receive_buffer = "" + self._accumulated_output = "" + self._pump_queues = pump_queues + + def __enter__(self): + """Support the python 'with' statement. + + Start the pump thread.""" + self.start_pump_thread() + return self + + def __exit__(self, exit_type, value, the_traceback): + """Support the python 'with' statement. + + Shut down the pump thread.""" + self.stop_pump_thread() + + def start_pump_thread(self): + if self._thread: + raise Exception("pump thread is already running") + self._stop_thread = False + self._thread = threading.Thread(target=self._run_method) + self._thread.start() + + def stop_pump_thread(self): + self._stop_thread = True + if self._thread: + self._thread.join() + + def _process_new_bytes(self, new_bytes): + if not new_bytes: + return + if len(new_bytes) < 1: + return + + # Add new bytes to our accumulated unprocessed packet bytes. + self._receive_buffer += new_bytes + + # Parse fully-formed packets into individual packets. + has_more = len(self._receive_buffer) > 0 + while has_more: + if len(self._receive_buffer) <= 0: + has_more = False + # handle '+' ack + elif self._receive_buffer[0] == "+": + self._pump_queues.packet_queue().put("+") + self._receive_buffer = self._receive_buffer[1:] + if self._logger: + self._logger.debug( + "parsed packet from stub: +\n" + + "new receive_buffer: {}".format( + self._receive_buffer)) + else: + packet_match = self._GDB_REMOTE_PACKET_REGEX.match( + self._receive_buffer) + if packet_match: + # Our receive buffer matches a packet at the + # start of the receive buffer. + new_output_content = _handle_output_packet_string( + packet_match.group(1)) + if new_output_content: + # This was an $O packet with new content. + self._accumulated_output += new_output_content + self._pump_queues.output_queue().put(self._accumulated_output) + else: + # Any packet other than $O. + self._pump_queues.packet_queue().put(packet_match.group(0)) + + # Remove the parsed packet from the receive + # buffer. + self._receive_buffer = self._receive_buffer[ + len(packet_match.group(0)):] + if self._logger: + self._logger.debug( + "parsed packet from stub: " + + packet_match.group(0)) + self._logger.debug( + "new receive_buffer: " + + self._receive_buffer) + else: + # We don't have enough in the receive bufferto make a full + # packet. Stop trying until we read more. + has_more = False + + def _run_method(self): + self._receive_buffer = "" + self._accumulated_output = "" + + if self._logger: + self._logger.info("socket pump starting") + + # Keep looping around until we're asked to stop the thread. + while not self._stop_thread: + can_read, _, _ = select.select([self._socket], [], [], 0) + if can_read and self._socket in can_read: + try: + new_bytes = seven.bitcast_to_string(self._socket.recv(4096)) + if self._logger and new_bytes and len(new_bytes) > 0: + self._logger.debug( + "pump received bytes: {}".format(new_bytes)) + except: + # Likely a closed socket. Done with the pump thread. + if self._logger: + self._logger.debug( + "socket read failed, stopping pump read thread\n" + + traceback.format_exc(3)) + break + self._process_new_bytes(new_bytes) + + if self._logger: + self._logger.info("socket pump exiting") + + def get_accumulated_output(self): + return self._accumulated_output + + def get_receive_buffer(self): + return self._receive_buffer diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py new file mode 100644 index 00000000000..6f07b2f9e28 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/test/test_lldbgdbserverutils.py @@ -0,0 +1,62 @@ + + +import unittest2 +import re + +from lldbgdbserverutils import * + + +class TestLldbGdbServerUtils(unittest2.TestCase): + + def test_entry_exact_payload_match(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#9a") + + def test_entry_exact_payload_match_ignores_checksum(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + entry.assert_match(self, "$OK#00") + + def test_entry_creates_context(self): + entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a") + context = entry.assert_match(self, "$OK#9a") + self.assertIsNotNone(context) + + def test_entry_regex_matches(self): + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), + capture={ + 1: "thread_id"}) + context = entry.assert_match(self, "$QC980#00") + + def test_entry_regex_saves_match(self): + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"), + capture={ + 1: "thread_id"}) + context = entry.assert_match(self, "$QC980#00") + self.assertEqual(context["thread_id"], "980") + + def test_entry_regex_expect_captures_success(self): + context = {"thread_id": "980"} + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), + expect_captures={ + 2: "thread_id"}) + entry.assert_match(self, "$T11thread:980;", context=context) + + def test_entry_regex_expect_captures_raises_on_fail(self): + context = {"thread_id": "980"} + entry = GdbRemoteEntry( + is_send_to_remote=False, + regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"), + expect_captures={ + 2: "thread_id"}) + try: + entry.assert_match(self, "$T11thread:970;", context=context) + self.fail() + except AssertionError: + # okay + return None diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile new file mode 100644 index 00000000000..de4ec12b13c --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/Makefile @@ -0,0 +1,4 @@ +ENABLE_THREADS := YES +CXX_SOURCES := main.cpp + +include Makefile.rules diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py new file mode 100644 index 00000000000..9ec40c11742 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/TestGdbRemoteThreadName.py @@ -0,0 +1,41 @@ + +import gdbremote_testcase +from lldbsuite.test.decorators import * +from lldbsuite.test.lldbtest import * +from lldbsuite.test import lldbutil + + +class TestGdbRemoteThreadName(gdbremote_testcase.GdbRemoteTestCaseBase): + + mydir = TestBase.compute_mydir(__file__) + + def run_and_check_name(self, expected_name): + self.test_sequence.add_log_lines(["read packet: $vCont;c#a8", + {"direction": "send", + "regex": + r"^\$T([0-9a-fA-F]{2})([^#]+)#[0-9a-fA-F]{2}$", + "capture": { + 1: "signal", + 2: "key_vals_text"}}, + ], + True) + + context = self.expect_gdbremote_sequence() + self.assertIsNotNone(context) + + sigint = lldbutil.get_signal_number("SIGINT") + self.assertEqual(sigint, int(context.get("signal"), 16)) + kv_dict = self.parse_key_val_dict(context.get("key_vals_text")) + self.assertEqual(expected_name, kv_dict.get("name")) + + @skipIfWindows # the test is not updated for Windows. + @llgs_test + def test(self): + """ Make sure lldb-server can retrieve inferior thread name""" + self.init_llgs_test() + self.build() + self.set_inferior_startup_launch() + procs = self.prep_debug_monitor_and_inferior() + + self.run_and_check_name("hello world") + self.run_and_check_name("goodbye world") diff --git a/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp new file mode 100644 index 00000000000..86f0ecf76c7 --- /dev/null +++ b/gnu/llvm/lldb/packages/Python/lldbsuite/test/tools/lldb-server/thread-name/main.cpp @@ -0,0 +1,24 @@ +#include <pthread.h> +#include <signal.h> + +void set_thread_name(const char *name) { +#if defined(__APPLE__) + ::pthread_setname_np(name); +#elif defined(__FreeBSD__) + ::pthread_set_name_np(::pthread_self(), name); +#elif defined(__OpenBSD__) + ::pthread_set_name_np(::pthread_self(), name); +#elif defined(__linux__) + ::pthread_setname_np(::pthread_self(), name); +#elif defined(__NetBSD__) + ::pthread_setname_np(::pthread_self(), "%s", const_cast<char *>(name)); +#endif +} + +int main() { + set_thread_name("hello world"); + raise(SIGINT); + set_thread_name("goodbye world"); + raise(SIGINT); + return 0; +} |