forked from OSchip/llvm-project
Restore tests for lldb-server and lldb-vscode removed at rL366590
Summary: This was removed here rL366590 by accident. Reviewers: xiaobai, jfb Reviewed By: xiaobai Subscribers: dexonsmith, srhines, krytarowski, jfb, lldb-commits Tags: #lldb Differential Revision: https://reviews.llvm.org/D65123 llvm-svn: 366766
This commit is contained in:
parent
2f5543aa72
commit
a61c247ce1
|
@ -0,0 +1 @@
|
|||
BasedOnStyle: LLVM
|
|
@ -0,0 +1,8 @@
|
|||
LEVEL = ../../make
|
||||
|
||||
override CFLAGS_EXTRAS += -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS
|
||||
ENABLE_THREADS := YES
|
||||
CXX_SOURCES := main.cpp
|
||||
MAKE_DSYM :=NO
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,123 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
import json
|
||||
|
||||
class TestAppleSimulatorOSType(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def check_simulator_ostype(self, sdk, platform, arch='x86_64'):
|
||||
sim_devices_str = subprocess.check_output(['xcrun', 'simctl', 'list',
|
||||
'-j', 'devices']).decode("utf-8")
|
||||
sim_devices = json.loads(sim_devices_str)['devices']
|
||||
# Find an available simulator for the requested platform
|
||||
deviceUDID = None
|
||||
for simulator in sim_devices:
|
||||
if isinstance(simulator,dict):
|
||||
runtime = simulator['name']
|
||||
devices = simulator['devices']
|
||||
else:
|
||||
runtime = simulator
|
||||
devices = sim_devices[simulator]
|
||||
if not platform in runtime.lower():
|
||||
continue
|
||||
for device in devices:
|
||||
if 'availability' in device and device['availability'] != '(available)':
|
||||
continue
|
||||
if 'isAvailable' in device and device['isAvailable'] != True:
|
||||
continue
|
||||
deviceUDID = device['udid']
|
||||
break
|
||||
if deviceUDID != None:
|
||||
break
|
||||
|
||||
# Launch the process using simctl
|
||||
self.assertIsNotNone(deviceUDID)
|
||||
exe_name = 'test_simulator_platform_{}'.format(platform)
|
||||
sdkroot = subprocess.check_output(['xcrun', '--show-sdk-path', '--sdk',
|
||||
sdk]).decode("utf-8")
|
||||
self.build(dictionary={ 'EXE': exe_name, 'SDKROOT': sdkroot.strip(),
|
||||
'ARCH': arch })
|
||||
exe_path = self.getBuildArtifact(exe_name)
|
||||
sim_launcher = subprocess.Popen(['xcrun', 'simctl', 'spawn',
|
||||
deviceUDID, exe_path,
|
||||
'print-pid', 'sleep:10'],
|
||||
stderr=subprocess.PIPE)
|
||||
# Get the PID from the process output
|
||||
pid = None
|
||||
while not pid:
|
||||
stderr = sim_launcher.stderr.readline().decode("utf-8")
|
||||
if stderr == '':
|
||||
continue
|
||||
m = re.match(r"PID: (.*)", stderr)
|
||||
self.assertIsNotNone(m)
|
||||
pid = int(m.group(1))
|
||||
|
||||
# Launch debug monitor attaching to the simulated process
|
||||
self.init_debugserver_test()
|
||||
server = self.connect_to_debug_monitor(attach_pid=pid)
|
||||
|
||||
# Setup packet sequences
|
||||
self.add_no_ack_remote_stream()
|
||||
self.add_process_info_collection_packets()
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: " +
|
||||
"$jGetLoadedDynamicLibrariesInfos:{\"fetch_all_solibs\" : true}]#ce",
|
||||
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "dylib_info_raw"}}],
|
||||
True)
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Check that ostype is correct
|
||||
self.assertEquals(process_info['ostype'], platform)
|
||||
|
||||
# Now for dylibs
|
||||
dylib_info_raw = context.get("dylib_info_raw")
|
||||
dylib_info = json.loads(self.decode_gdbremote_binary(dylib_info_raw))
|
||||
images = dylib_info['images']
|
||||
|
||||
image_info = None
|
||||
for image in images:
|
||||
if image['pathname'] != exe_path:
|
||||
continue
|
||||
image_info = image
|
||||
break
|
||||
|
||||
self.assertIsNotNone(image_info)
|
||||
self.assertEquals(image['min_version_os_name'], platform)
|
||||
|
||||
|
||||
@apple_simulator_test('iphone')
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded
|
||||
def test_simulator_ostype_ios(self):
|
||||
self.check_simulator_ostype(sdk='iphonesimulator',
|
||||
platform='ios')
|
||||
|
||||
@apple_simulator_test('appletv')
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded
|
||||
def test_simulator_ostype_tvos(self):
|
||||
self.check_simulator_ostype(sdk='appletvsimulator',
|
||||
platform='tvos')
|
||||
|
||||
@apple_simulator_test('watch')
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded
|
||||
def test_simulator_ostype_watchos(self):
|
||||
self.check_simulator_ostype(sdk='watchsimulator',
|
||||
platform='watchos', arch='i386')
|
|
@ -0,0 +1,67 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def attach_with_vAttach(self):
|
||||
# Start the inferior, start the debug monitor, nothing is attached yet.
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=["sleep:60"])
|
||||
self.assertIsNotNone(procs)
|
||||
|
||||
# Make sure the target process has been launched.
|
||||
inferior = procs.get("inferior")
|
||||
self.assertIsNotNone(inferior)
|
||||
self.assertTrue(inferior.pid > 0)
|
||||
self.assertTrue(
|
||||
lldbgdbserverutils.process_is_running(
|
||||
inferior.pid, True))
|
||||
|
||||
# Add attach packets.
|
||||
self.test_sequence.add_log_lines([
|
||||
# Do the attach.
|
||||
"read packet: $vAttach;{:x}#00".format(inferior.pid),
|
||||
# Expect a stop notification from the attach.
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "stop_signal_hex"}},
|
||||
], True)
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the process id matches what we expected.
|
||||
pid_text = process_info.get('pid', None)
|
||||
self.assertIsNotNone(pid_text)
|
||||
reported_pid = int(pid_text, base=16)
|
||||
self.assertEqual(reported_pid, inferior.pid)
|
||||
|
||||
@debugserver_test
|
||||
def test_attach_with_vAttach_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach_manually()
|
||||
self.attach_with_vAttach()
|
||||
|
||||
@llgs_test
|
||||
def test_attach_with_vAttach_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach_manually()
|
||||
self.attach_with_vAttach()
|
|
@ -0,0 +1,220 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteAuxvSupport(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
AUXV_SUPPORT_FEATURE_NAME = "qXfer:auxv:read"
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def has_auxv_support(self):
|
||||
inferior_args = ["message:main entered", "sleep:5"]
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=inferior_args)
|
||||
|
||||
# Don't do anything until we match the launched inferior main entry output.
|
||||
# Then immediately interrupt the process.
|
||||
# This prevents auxv data being asked for before it's ready and leaves
|
||||
# us in a stopped state.
|
||||
self.test_sequence.add_log_lines([
|
||||
# Start the inferior...
|
||||
"read packet: $c#63",
|
||||
# ... match output....
|
||||
{"type": "output_match", "regex": self.maybe_strict_output_regex(
|
||||
r"message:main entered\r\n")},
|
||||
], True)
|
||||
# ... then interrupt.
|
||||
self.add_interrupt_packets()
|
||||
self.add_qSupported_packets()
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
features = self.parse_qSupported_response(context)
|
||||
return self.AUXV_SUPPORT_FEATURE_NAME in features and features[
|
||||
self.AUXV_SUPPORT_FEATURE_NAME] == "+"
|
||||
|
||||
def get_raw_auxv_data(self):
|
||||
# Start up llgs and inferior, and check for auxv support.
|
||||
if not self.has_auxv_support():
|
||||
self.skipTest("auxv data not supported")
|
||||
|
||||
# Grab pointer size for target. We'll assume that is equivalent to an unsigned long on the target.
|
||||
# Auxv is specified in terms of pairs of unsigned longs.
|
||||
self.reset_test_sequence()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
proc_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(proc_info)
|
||||
self.assertTrue("ptrsize" in proc_info)
|
||||
word_size = int(proc_info["ptrsize"])
|
||||
|
||||
OFFSET = 0
|
||||
LENGTH = 0x400
|
||||
|
||||
# Grab the auxv data.
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
[
|
||||
"read packet: $qXfer:auxv:read::{:x},{:x}:#00".format(
|
||||
OFFSET,
|
||||
LENGTH),
|
||||
{
|
||||
"direction": "send",
|
||||
"regex": re.compile(
|
||||
r"^\$([^E])(.*)#[0-9a-fA-F]{2}$",
|
||||
re.MULTILINE | re.DOTALL),
|
||||
"capture": {
|
||||
1: "response_type",
|
||||
2: "content_raw"}}],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Ensure we end up with all auxv data in one packet.
|
||||
# FIXME don't assume it all comes back in one packet.
|
||||
self.assertEqual(context.get("response_type"), "l")
|
||||
|
||||
# Decode binary data.
|
||||
content_raw = context.get("content_raw")
|
||||
self.assertIsNotNone(content_raw)
|
||||
return (word_size, self.decode_gdbremote_binary(content_raw))
|
||||
|
||||
def supports_auxv(self):
|
||||
# When non-auxv platforms support llgs, skip the test on platforms
|
||||
# that don't support auxv.
|
||||
self.assertTrue(self.has_auxv_support())
|
||||
|
||||
#
|
||||
# We skip the "supports_auxv" test on debugserver. The rest of the tests
|
||||
# appropriately skip the auxv tests if the support flag is not present
|
||||
# in the qSupported response, so the debugserver test bits are still there
|
||||
# in case debugserver code one day does have auxv support and thus those
|
||||
# tests don't get skipped.
|
||||
#
|
||||
|
||||
@llgs_test
|
||||
def test_supports_auxv_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.supports_auxv()
|
||||
|
||||
def auxv_data_is_correct_size(self):
|
||||
(word_size, auxv_data) = self.get_raw_auxv_data()
|
||||
self.assertIsNotNone(auxv_data)
|
||||
|
||||
# Ensure auxv data is a multiple of 2*word_size (there should be two
|
||||
# unsigned long fields per auxv entry).
|
||||
self.assertEqual(len(auxv_data) % (2 * word_size), 0)
|
||||
# print("auxv contains {} entries".format(len(auxv_data) / (2*word_size)))
|
||||
|
||||
@debugserver_test
|
||||
def test_auxv_data_is_correct_size_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_data_is_correct_size()
|
||||
|
||||
@llgs_test
|
||||
def test_auxv_data_is_correct_size_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_data_is_correct_size()
|
||||
|
||||
def auxv_keys_look_valid(self):
|
||||
(word_size, auxv_data) = self.get_raw_auxv_data()
|
||||
self.assertIsNotNone(auxv_data)
|
||||
|
||||
# Grab endian.
|
||||
self.reset_test_sequence()
|
||||
self.add_process_info_collection_packets()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
endian = process_info.get("endian")
|
||||
self.assertIsNotNone(endian)
|
||||
|
||||
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
|
||||
self.assertIsNotNone(auxv_dict)
|
||||
|
||||
# Verify keys look reasonable.
|
||||
for auxv_key in auxv_dict:
|
||||
self.assertTrue(auxv_key >= 1)
|
||||
self.assertTrue(auxv_key <= 1000)
|
||||
# print("auxv dict: {}".format(auxv_dict))
|
||||
|
||||
@debugserver_test
|
||||
def test_auxv_keys_look_valid_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_keys_look_valid()
|
||||
|
||||
@llgs_test
|
||||
def test_auxv_keys_look_valid_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_keys_look_valid()
|
||||
|
||||
def auxv_chunked_reads_work(self):
|
||||
# Verify that multiple smaller offset,length reads of auxv data
|
||||
# return the same data as a single larger read.
|
||||
|
||||
# Grab the auxv data with a single large read here.
|
||||
(word_size, auxv_data) = self.get_raw_auxv_data()
|
||||
self.assertIsNotNone(auxv_data)
|
||||
|
||||
# Grab endian.
|
||||
self.reset_test_sequence()
|
||||
self.add_process_info_collection_packets()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
endian = process_info.get("endian")
|
||||
self.assertIsNotNone(endian)
|
||||
|
||||
auxv_dict = self.build_auxv_dict(endian, word_size, auxv_data)
|
||||
self.assertIsNotNone(auxv_dict)
|
||||
|
||||
iterated_auxv_data = self.read_binary_data_in_chunks(
|
||||
"qXfer:auxv:read::", 2 * word_size)
|
||||
self.assertIsNotNone(iterated_auxv_data)
|
||||
|
||||
auxv_dict_iterated = self.build_auxv_dict(
|
||||
endian, word_size, iterated_auxv_data)
|
||||
self.assertIsNotNone(auxv_dict_iterated)
|
||||
|
||||
# Verify both types of data collection returned same content.
|
||||
self.assertEqual(auxv_dict_iterated, auxv_dict)
|
||||
|
||||
@debugserver_test
|
||||
def test_auxv_chunked_reads_work_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_chunked_reads_work()
|
||||
|
||||
@llgs_test
|
||||
def test_auxv_chunked_reads_work_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.auxv_chunked_reads_work()
|
|
@ -0,0 +1,127 @@
|
|||
from __future__ import print_function
|
||||
|
||||
# lldb test suite imports
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import TestBase
|
||||
|
||||
# gdb-remote-specific imports
|
||||
import lldbgdbserverutils
|
||||
from gdbremote_testcase import GdbRemoteTestCaseBase
|
||||
|
||||
|
||||
class TestGdbRemoteExitCode(GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
FAILED_LAUNCH_CODE = "E08"
|
||||
|
||||
def get_launch_fail_reason(self):
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $qLaunchSuccess#00"],
|
||||
True)
|
||||
self.test_sequence.add_log_lines(
|
||||
[{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "launch_result"}}],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
return context.get("launch_result")[1:]
|
||||
|
||||
def start_inferior(self):
|
||||
launch_args = self.install_and_create_launch_args()
|
||||
|
||||
server = self.connect_to_debug_monitor()
|
||||
self.assertIsNotNone(server)
|
||||
|
||||
self.add_no_ack_remote_stream()
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: %s" % lldbgdbserverutils.build_gdbremote_A_packet(
|
||||
launch_args)],
|
||||
True)
|
||||
self.test_sequence.add_log_lines(
|
||||
[{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "A_result"}}],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
launch_result = context.get("A_result")
|
||||
self.assertIsNotNone(launch_result)
|
||||
if launch_result == self.FAILED_LAUNCH_CODE:
|
||||
fail_reason = self.get_launch_fail_reason()
|
||||
self.fail("failed to launch inferior: " + fail_reason)
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_start_inferior_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.start_inferior()
|
||||
|
||||
@llgs_test
|
||||
def test_start_inferior_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.start_inferior()
|
||||
|
||||
def inferior_exit_0(self):
|
||||
launch_args = self.install_and_create_launch_args()
|
||||
|
||||
server = self.connect_to_debug_monitor()
|
||||
self.assertIsNotNone(server)
|
||||
|
||||
self.add_no_ack_remote_stream()
|
||||
self.add_verified_launch_packets(launch_args)
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $vCont;c#a8",
|
||||
"send packet: $W00#00"],
|
||||
True)
|
||||
|
||||
self.expect_gdbremote_sequence()
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_inferior_exit_0_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.inferior_exit_0()
|
||||
|
||||
@llgs_test
|
||||
def test_inferior_exit_0_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.inferior_exit_0()
|
||||
|
||||
def inferior_exit_42(self):
|
||||
launch_args = self.install_and_create_launch_args()
|
||||
|
||||
server = self.connect_to_debug_monitor()
|
||||
self.assertIsNotNone(server)
|
||||
|
||||
RETVAL = 42
|
||||
|
||||
# build launch args
|
||||
launch_args += ["retval:%d" % RETVAL]
|
||||
|
||||
self.add_no_ack_remote_stream()
|
||||
self.add_verified_launch_packets(launch_args)
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $vCont;c#a8",
|
||||
"send packet: $W{0:02x}#00".format(RETVAL)],
|
||||
True)
|
||||
|
||||
self.expect_gdbremote_sequence()
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_inferior_exit_42_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.inferior_exit_42()
|
||||
|
||||
@llgs_test
|
||||
def test_inferior_exit_42_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.inferior_exit_42()
|
|
@ -0,0 +1,162 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteExpeditedRegisters(
|
||||
gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
|
||||
def gather_expedited_registers(self):
|
||||
# Setup the stub and set the gdb remote command stream.
|
||||
procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"])
|
||||
self.test_sequence.add_log_lines([
|
||||
# Start up the inferior.
|
||||
"read packet: $c#63",
|
||||
# Immediately tell it to stop. We want to see what it reports.
|
||||
"read packet: {}".format(chr(3)),
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "stop_result",
|
||||
2: "key_vals_text"}},
|
||||
], True)
|
||||
|
||||
# Run the gdb remote command stream.
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Pull out expedited registers.
|
||||
key_vals_text = context.get("key_vals_text")
|
||||
self.assertIsNotNone(key_vals_text)
|
||||
|
||||
expedited_registers = self.extract_registers_from_stop_notification(
|
||||
key_vals_text)
|
||||
self.assertIsNotNone(expedited_registers)
|
||||
|
||||
return expedited_registers
|
||||
|
||||
def stop_notification_contains_generic_register(
|
||||
self, generic_register_name):
|
||||
# Generate a stop reply, parse out expedited registers from stop
|
||||
# notification.
|
||||
expedited_registers = self.gather_expedited_registers()
|
||||
self.assertIsNotNone(expedited_registers)
|
||||
self.assertTrue(len(expedited_registers) > 0)
|
||||
|
||||
# Gather target register infos.
|
||||
reg_infos = self.gather_register_infos()
|
||||
|
||||
# Find the generic register.
|
||||
reg_info = self.find_generic_register_with_name(
|
||||
reg_infos, generic_register_name)
|
||||
self.assertIsNotNone(reg_info)
|
||||
|
||||
# Ensure the expedited registers contained it.
|
||||
self.assertTrue(reg_info["lldb_register_index"] in expedited_registers)
|
||||
# print("{} reg_info:{}".format(generic_register_name, reg_info))
|
||||
|
||||
def stop_notification_contains_any_registers(self):
|
||||
# Generate a stop reply, parse out expedited registers from stop
|
||||
# notification.
|
||||
expedited_registers = self.gather_expedited_registers()
|
||||
# Verify we have at least one expedited register.
|
||||
self.assertTrue(len(expedited_registers) > 0)
|
||||
|
||||
@debugserver_test
|
||||
def test_stop_notification_contains_any_registers_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_any_registers()
|
||||
|
||||
@llgs_test
|
||||
def test_stop_notification_contains_any_registers_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_any_registers()
|
||||
|
||||
def stop_notification_contains_no_duplicate_registers(self):
|
||||
# Generate a stop reply, parse out expedited registers from stop
|
||||
# notification.
|
||||
expedited_registers = self.gather_expedited_registers()
|
||||
# Verify no expedited register was specified multiple times.
|
||||
for (reg_num, value) in list(expedited_registers.items()):
|
||||
if (isinstance(value, list)) and (len(value) > 0):
|
||||
self.fail(
|
||||
"expedited register number {} specified more than once ({} times)".format(
|
||||
reg_num, len(value)))
|
||||
|
||||
@debugserver_test
|
||||
def test_stop_notification_contains_no_duplicate_registers_debugserver(
|
||||
self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_no_duplicate_registers()
|
||||
|
||||
@llgs_test
|
||||
def test_stop_notification_contains_no_duplicate_registers_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_no_duplicate_registers()
|
||||
|
||||
def stop_notification_contains_pc_register(self):
|
||||
self.stop_notification_contains_generic_register("pc")
|
||||
|
||||
@debugserver_test
|
||||
def test_stop_notification_contains_pc_register_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_pc_register()
|
||||
|
||||
@llgs_test
|
||||
def test_stop_notification_contains_pc_register_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_pc_register()
|
||||
|
||||
# powerpc64 has no FP register
|
||||
@skipIf(triple='^powerpc64')
|
||||
def stop_notification_contains_fp_register(self):
|
||||
self.stop_notification_contains_generic_register("fp")
|
||||
|
||||
@debugserver_test
|
||||
def test_stop_notification_contains_fp_register_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_fp_register()
|
||||
|
||||
@llgs_test
|
||||
def test_stop_notification_contains_fp_register_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_fp_register()
|
||||
|
||||
def stop_notification_contains_sp_register(self):
|
||||
self.stop_notification_contains_generic_register("sp")
|
||||
|
||||
@debugserver_test
|
||||
def test_stop_notification_contains_sp_register_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_sp_register()
|
||||
|
||||
@llgs_test
|
||||
def test_stop_notification_contains_sp_register_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_notification_contains_sp_register()
|
|
@ -0,0 +1,131 @@
|
|||
from __future__ import print_function
|
||||
|
||||
# lldb test suite imports
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import TestBase
|
||||
|
||||
# gdb-remote-specific imports
|
||||
import lldbgdbserverutils
|
||||
from gdbremote_testcase import GdbRemoteTestCaseBase
|
||||
|
||||
|
||||
class TestGdbRemoteHostInfo(GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
KNOWN_HOST_INFO_KEYS = set([
|
||||
"arch",
|
||||
"cputype",
|
||||
"cpusubtype",
|
||||
"distribution_id",
|
||||
"endian",
|
||||
"hostname",
|
||||
"ostype",
|
||||
"os_build",
|
||||
"os_kernel",
|
||||
"os_version",
|
||||
"ptrsize",
|
||||
"triple",
|
||||
"vendor",
|
||||
"watchpoint_exceptions_received",
|
||||
"default_packet_timeout",
|
||||
])
|
||||
|
||||
DARWIN_REQUIRED_HOST_INFO_KEYS = set([
|
||||
"cputype",
|
||||
"cpusubtype",
|
||||
"endian",
|
||||
"ostype",
|
||||
"ptrsize",
|
||||
"vendor",
|
||||
"watchpoint_exceptions_received"
|
||||
])
|
||||
|
||||
def add_host_info_collection_packets(self):
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $qHostInfo#9b",
|
||||
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "host_info_raw"}}],
|
||||
True)
|
||||
|
||||
def parse_host_info_response(self, context):
|
||||
# Ensure we have a host info response.
|
||||
self.assertIsNotNone(context)
|
||||
host_info_raw = context.get("host_info_raw")
|
||||
self.assertIsNotNone(host_info_raw)
|
||||
|
||||
# Pull out key:value; pairs.
|
||||
host_info_dict = {match.group(1): match.group(2)
|
||||
for match in re.finditer(r"([^:]+):([^;]+);",
|
||||
host_info_raw)}
|
||||
|
||||
import pprint
|
||||
print("\nqHostInfo response:")
|
||||
pprint.pprint(host_info_dict)
|
||||
|
||||
# Validate keys are known.
|
||||
for (key, val) in list(host_info_dict.items()):
|
||||
self.assertTrue(key in self.KNOWN_HOST_INFO_KEYS,
|
||||
"unknown qHostInfo key: " + key)
|
||||
self.assertIsNotNone(val)
|
||||
|
||||
# Return the key:val pairs.
|
||||
return host_info_dict
|
||||
|
||||
def get_qHostInfo_response(self):
|
||||
# Launch the debug monitor stub, attaching to the inferior.
|
||||
server = self.connect_to_debug_monitor()
|
||||
self.assertIsNotNone(server)
|
||||
self.add_no_ack_remote_stream()
|
||||
|
||||
# Request qHostInfo and get response
|
||||
self.add_host_info_collection_packets()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Parse qHostInfo response.
|
||||
host_info = self.parse_host_info_response(context)
|
||||
self.assertIsNotNone(host_info)
|
||||
self.assertGreater(len(host_info), 0, "qHostInfo should have returned "
|
||||
"at least one key:val pair.")
|
||||
return host_info
|
||||
|
||||
def validate_darwin_minimum_host_info_keys(self, host_info_dict):
|
||||
self.assertIsNotNone(host_info_dict)
|
||||
missing_keys = [key for key in self.DARWIN_REQUIRED_HOST_INFO_KEYS
|
||||
if key not in host_info_dict]
|
||||
self.assertEquals(0, len(missing_keys),
|
||||
"qHostInfo is missing the following required "
|
||||
"keys: " + str(missing_keys))
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_qHostInfo_returns_at_least_one_key_val_pair_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.get_qHostInfo_response()
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@llgs_test
|
||||
def test_qHostInfo_returns_at_least_one_key_val_pair_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.get_qHostInfo_response()
|
||||
|
||||
@skipUnlessDarwin
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_qHostInfo_contains_darwin_required_keys_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
host_info_dict = self.get_qHostInfo_response()
|
||||
self.validate_darwin_minimum_host_info_keys(host_info_dict)
|
||||
|
||||
@skipUnlessDarwin
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@llgs_test
|
||||
def test_qHostInfo_contains_darwin_required_keys_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
host_info_dict = self.get_qHostInfo_response()
|
||||
self.validate_darwin_minimum_host_info_keys(host_info_dict)
|
|
@ -0,0 +1,59 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteKill(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
|
||||
def attach_commandline_kill_after_initial_stop(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.test_sequence.add_log_lines([
|
||||
"read packet: $k#6b",
|
||||
{"direction": "send", "regex": r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}"},
|
||||
], True)
|
||||
|
||||
if self.stub_sends_two_stop_notifications_on_kill:
|
||||
# Add an expectation for a second X result for stubs that send two
|
||||
# of these.
|
||||
self.test_sequence.add_log_lines([
|
||||
{"direction": "send", "regex": r"^\$X[0-9a-fA-F]+([^#]*)#[0-9A-Fa-f]{2}"},
|
||||
], True)
|
||||
|
||||
self.expect_gdbremote_sequence()
|
||||
|
||||
# Wait a moment for completed and now-detached inferior process to
|
||||
# clear.
|
||||
time.sleep(1)
|
||||
|
||||
if not lldb.remote_platform:
|
||||
# Process should be dead now. Reap results.
|
||||
poll_result = procs["inferior"].poll()
|
||||
self.assertIsNotNone(poll_result)
|
||||
|
||||
# Where possible, verify at the system level that the process is not
|
||||
# running.
|
||||
self.assertFalse(
|
||||
lldbgdbserverutils.process_is_running(
|
||||
procs["inferior"].pid, False))
|
||||
|
||||
@debugserver_test
|
||||
def test_attach_commandline_kill_after_initial_stop_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach()
|
||||
self.attach_commandline_kill_after_initial_stop()
|
||||
|
||||
@llgs_test
|
||||
def test_attach_commandline_kill_after_initial_stop_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach()
|
||||
self.attach_commandline_kill_after_initial_stop()
|
|
@ -0,0 +1,44 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
from lldbsuite.support import seven
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteModuleInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def module_info(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_process_info_collection_packets()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
info = self.parse_process_info_response(context)
|
||||
|
||||
self.test_sequence.add_log_lines([
|
||||
'read packet: $jModulesInfo:[{"file":"%s","triple":"%s"}]]#00' % (
|
||||
lldbutil.append_to_process_working_directory(self, "a.out"),
|
||||
seven.unhexlify(info["triple"])),
|
||||
{"direction": "send",
|
||||
"regex": r'^\$\[{(.*)}\]\]#[0-9A-Fa-f]{2}',
|
||||
"capture": {1: "spec"}},
|
||||
], True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
spec = context.get("spec")
|
||||
self.assertRegexpMatches(spec, '"file_path":".*"')
|
||||
self.assertRegexpMatches(spec, '"file_offset":\d+')
|
||||
self.assertRegexpMatches(spec, '"file_size":\d+')
|
||||
self.assertRegexpMatches(spec, '"triple":"\w*-\w*-.*"')
|
||||
self.assertRegexpMatches(spec, '"uuid":"[A-Fa-f0-9]+"')
|
||||
|
||||
@llgs_test
|
||||
def test_module_info(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.module_info()
|
|
@ -0,0 +1,211 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteProcessInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def qProcessInfo_returns_running_process(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the process id looks reasonable.
|
||||
pid_text = process_info.get("pid")
|
||||
self.assertIsNotNone(pid_text)
|
||||
pid = int(pid_text, base=16)
|
||||
self.assertNotEqual(0, pid)
|
||||
|
||||
# If possible, verify that the process is running.
|
||||
self.assertTrue(lldbgdbserverutils.process_is_running(pid, True))
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_qProcessInfo_returns_running_process_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.qProcessInfo_returns_running_process()
|
||||
|
||||
@llgs_test
|
||||
def test_qProcessInfo_returns_running_process_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.qProcessInfo_returns_running_process()
|
||||
|
||||
def attach_commandline_qProcessInfo_reports_correct_pid(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.assertIsNotNone(procs)
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence(timeout_seconds=8)
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the process id matches what we expected.
|
||||
pid_text = process_info.get('pid', None)
|
||||
self.assertIsNotNone(pid_text)
|
||||
reported_pid = int(pid_text, base=16)
|
||||
self.assertEqual(reported_pid, procs["inferior"].pid)
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_attach_commandline_qProcessInfo_reports_correct_pid_debugserver(
|
||||
self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach()
|
||||
self.attach_commandline_qProcessInfo_reports_correct_pid()
|
||||
|
||||
@llgs_test
|
||||
def test_attach_commandline_qProcessInfo_reports_correct_pid_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_attach()
|
||||
self.attach_commandline_qProcessInfo_reports_correct_pid()
|
||||
|
||||
def qProcessInfo_reports_valid_endian(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the process id looks reasonable.
|
||||
endian = process_info.get("endian")
|
||||
self.assertIsNotNone(endian)
|
||||
self.assertTrue(endian in ["little", "big", "pdp"])
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_qProcessInfo_reports_valid_endian_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.qProcessInfo_reports_valid_endian()
|
||||
|
||||
@llgs_test
|
||||
def test_qProcessInfo_reports_valid_endian_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.qProcessInfo_reports_valid_endian()
|
||||
|
||||
def qProcessInfo_contains_keys(self, expected_key_set):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the expected keys are present and non-None within the process
|
||||
# info.
|
||||
missing_key_set = set()
|
||||
for expected_key in expected_key_set:
|
||||
if expected_key not in process_info:
|
||||
missing_key_set.add(expected_key)
|
||||
|
||||
self.assertEqual(
|
||||
missing_key_set,
|
||||
set(),
|
||||
"the listed keys are missing in the qProcessInfo result")
|
||||
|
||||
def qProcessInfo_does_not_contain_keys(self, absent_key_set):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Run the stream
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info response
|
||||
process_info = self.parse_process_info_response(context)
|
||||
self.assertIsNotNone(process_info)
|
||||
|
||||
# Ensure the unexpected keys are not present
|
||||
unexpected_key_set = set()
|
||||
for unexpected_key in absent_key_set:
|
||||
if unexpected_key in process_info:
|
||||
unexpected_key_set.add(unexpected_key)
|
||||
|
||||
self.assertEqual(
|
||||
unexpected_key_set,
|
||||
set(),
|
||||
"the listed keys were present but unexpected in qProcessInfo result")
|
||||
|
||||
@skipUnlessDarwin
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_qProcessInfo_contains_cputype_cpusubtype_debugserver_darwin(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype']))
|
||||
|
||||
@skipUnlessDarwin
|
||||
@llgs_test
|
||||
def test_qProcessInfo_contains_cputype_cpusubtype_llgs_darwin(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.qProcessInfo_contains_keys(set(['cputype', 'cpusubtype']))
|
||||
|
||||
@skipUnlessPlatform(["linux"])
|
||||
@llgs_test
|
||||
def test_qProcessInfo_contains_triple_llgs_linux(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.qProcessInfo_contains_keys(set(['triple']))
|
||||
|
||||
@skipUnlessDarwin
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_qProcessInfo_does_not_contain_triple_debugserver_darwin(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
# We don't expect to see triple on darwin. If we do, we'll prefer triple
|
||||
# to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup
|
||||
# for the remote Host and Process.
|
||||
self.qProcessInfo_does_not_contain_keys(set(['triple']))
|
||||
|
||||
@skipUnlessDarwin
|
||||
@llgs_test
|
||||
def test_qProcessInfo_does_not_contain_triple_llgs_darwin(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
# We don't expect to see triple on darwin. If we do, we'll prefer triple
|
||||
# to cputype/cpusubtype and skip some darwin-based ProcessGDBRemote ArchSpec setup
|
||||
# for the remote Host and Process.
|
||||
self.qProcessInfo_does_not_contain_keys(set(['triple']))
|
||||
|
||||
@skipUnlessPlatform(["linux"])
|
||||
@llgs_test
|
||||
def test_qProcessInfo_does_not_contain_cputype_cpusubtype_llgs_linux(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.qProcessInfo_does_not_contain_keys(set(['cputype', 'cpusubtype']))
|
|
@ -0,0 +1,128 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteRegisterState(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
"""Test QSaveRegisterState/QRestoreRegisterState support."""
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def grp_register_save_restore_works(self, with_suffix):
|
||||
# Start up the process, use thread suffix, grab main thread id.
|
||||
inferior_args = ["message:main entered", "sleep:5"]
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=inferior_args)
|
||||
|
||||
self.add_process_info_collection_packets()
|
||||
self.add_register_info_collection_packets()
|
||||
if with_suffix:
|
||||
self.add_thread_suffix_request_packets()
|
||||
self.add_threadinfo_collection_packets()
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather process info.
|
||||
process_info = self.parse_process_info_response(context)
|
||||
endian = process_info.get("endian")
|
||||
self.assertIsNotNone(endian)
|
||||
|
||||
# Gather register info.
|
||||
reg_infos = self.parse_register_info_packets(context)
|
||||
self.assertIsNotNone(reg_infos)
|
||||
self.add_lldb_register_index(reg_infos)
|
||||
|
||||
# Pull out the register infos that we think we can bit flip
|
||||
# successfully.
|
||||
gpr_reg_infos = [
|
||||
reg_info for reg_info in reg_infos if self.is_bit_flippable_register(reg_info)]
|
||||
self.assertTrue(len(gpr_reg_infos) > 0)
|
||||
|
||||
# Gather thread info.
|
||||
if with_suffix:
|
||||
threads = self.parse_threadinfo_packets(context)
|
||||
self.assertIsNotNone(threads)
|
||||
thread_id = threads[0]
|
||||
self.assertIsNotNone(thread_id)
|
||||
# print("Running on thread: 0x{:x}".format(thread_id))
|
||||
else:
|
||||
thread_id = None
|
||||
|
||||
# Save register state.
|
||||
self.reset_test_sequence()
|
||||
self.add_QSaveRegisterState_packets(thread_id)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
(success, state_id) = self.parse_QSaveRegisterState_response(context)
|
||||
self.assertTrue(success)
|
||||
self.assertIsNotNone(state_id)
|
||||
# print("saved register state id: {}".format(state_id))
|
||||
|
||||
# Remember initial register values.
|
||||
initial_reg_values = self.read_register_values(
|
||||
gpr_reg_infos, endian, thread_id=thread_id)
|
||||
# print("initial_reg_values: {}".format(initial_reg_values))
|
||||
|
||||
# Flip gpr register values.
|
||||
(successful_writes, failed_writes) = self.flip_all_bits_in_each_register_value(
|
||||
gpr_reg_infos, endian, thread_id=thread_id)
|
||||
# print("successful writes: {}, failed writes: {}".format(successful_writes, failed_writes))
|
||||
self.assertTrue(successful_writes > 0)
|
||||
|
||||
flipped_reg_values = self.read_register_values(
|
||||
gpr_reg_infos, endian, thread_id=thread_id)
|
||||
# print("flipped_reg_values: {}".format(flipped_reg_values))
|
||||
|
||||
# Restore register values.
|
||||
self.reset_test_sequence()
|
||||
self.add_QRestoreRegisterState_packets(state_id, thread_id)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Verify registers match initial register values.
|
||||
final_reg_values = self.read_register_values(
|
||||
gpr_reg_infos, endian, thread_id=thread_id)
|
||||
# print("final_reg_values: {}".format(final_reg_values))
|
||||
self.assertIsNotNone(final_reg_values)
|
||||
self.assertEqual(final_reg_values, initial_reg_values)
|
||||
|
||||
@debugserver_test
|
||||
def test_grp_register_save_restore_works_with_suffix_debugserver(self):
|
||||
USE_THREAD_SUFFIX = True
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.grp_register_save_restore_works(USE_THREAD_SUFFIX)
|
||||
|
||||
@llgs_test
|
||||
def test_grp_register_save_restore_works_with_suffix_llgs(self):
|
||||
USE_THREAD_SUFFIX = True
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.grp_register_save_restore_works(USE_THREAD_SUFFIX)
|
||||
|
||||
@debugserver_test
|
||||
def test_grp_register_save_restore_works_no_suffix_debugserver(self):
|
||||
USE_THREAD_SUFFIX = False
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.grp_register_save_restore_works(USE_THREAD_SUFFIX)
|
||||
|
||||
@llgs_test
|
||||
def test_grp_register_save_restore_works_no_suffix_llgs(self):
|
||||
USE_THREAD_SUFFIX = False
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.grp_register_save_restore_works(USE_THREAD_SUFFIX)
|
|
@ -0,0 +1,41 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteSingleStep(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_single_step_only_steps_one_instruction_with_s_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=True, step_instruction="s")
|
||||
|
||||
@llgs_test
|
||||
@expectedFailureAndroid(
|
||||
bugnumber="llvm.org/pr24739",
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"])
|
||||
@expectedFailureAll(
|
||||
oslist=["linux"],
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"],
|
||||
bugnumber="llvm.org/pr24739")
|
||||
@skipIf(triple='^mips')
|
||||
def test_single_step_only_steps_one_instruction_with_s_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=True, step_instruction="s")
|
|
@ -0,0 +1,303 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import json
|
||||
import re
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
class TestGdbRemoteThreadsInStopReply(
|
||||
gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
ENABLE_THREADS_IN_STOP_REPLY_ENTRIES = [
|
||||
"read packet: $QListThreadsInStopReply#21",
|
||||
"send packet: $OK#00",
|
||||
]
|
||||
|
||||
def gather_stop_reply_fields(self, post_startup_log_lines, thread_count,
|
||||
field_names):
|
||||
# Set up the inferior args.
|
||||
inferior_args = []
|
||||
for i in range(thread_count - 1):
|
||||
inferior_args.append("thread:new")
|
||||
inferior_args.append("sleep:10")
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=inferior_args)
|
||||
|
||||
self.add_register_info_collection_packets()
|
||||
self.add_process_info_collection_packets()
|
||||
|
||||
# Assumes test_sequence has anything added needed to setup the initial state.
|
||||
# (Like optionally enabling QThreadsInStopReply.)
|
||||
if post_startup_log_lines:
|
||||
self.test_sequence.add_log_lines(post_startup_log_lines, True)
|
||||
self.test_sequence.add_log_lines([
|
||||
"read packet: $c#63"
|
||||
], True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
hw_info = self.parse_hw_info(context)
|
||||
|
||||
# Give threads time to start up, then break.
|
||||
time.sleep(1)
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
[
|
||||
"read packet: {}".format(
|
||||
chr(3)),
|
||||
{
|
||||
"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {
|
||||
1: "stop_result",
|
||||
2: "key_vals_text"}},
|
||||
],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Wait until all threads have started.
|
||||
threads = self.wait_for_thread_count(thread_count, timeout_seconds=3)
|
||||
self.assertIsNotNone(threads)
|
||||
self.assertEqual(len(threads), thread_count)
|
||||
|
||||
# Run, then stop the process, grab the stop reply content.
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(["read packet: $c#63",
|
||||
"read packet: {}".format(chr(3)),
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "stop_result",
|
||||
2: "key_vals_text"}},
|
||||
],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Parse the stop reply contents.
|
||||
key_vals_text = context.get("key_vals_text")
|
||||
self.assertIsNotNone(key_vals_text)
|
||||
kv_dict = self.parse_key_val_dict(key_vals_text)
|
||||
self.assertIsNotNone(kv_dict)
|
||||
|
||||
result = dict();
|
||||
result["pc_register"] = hw_info["pc_register"]
|
||||
result["little_endian"] = hw_info["little_endian"]
|
||||
for key_field in field_names:
|
||||
result[key_field] = kv_dict.get(key_field)
|
||||
|
||||
return result
|
||||
|
||||
def gather_stop_reply_threads(self, post_startup_log_lines, thread_count):
|
||||
# Pull out threads from stop response.
|
||||
stop_reply_threads_text = self.gather_stop_reply_fields(
|
||||
post_startup_log_lines, thread_count, ["threads"])["threads"]
|
||||
if stop_reply_threads_text:
|
||||
return [int(thread_id, 16)
|
||||
for thread_id in stop_reply_threads_text.split(",")]
|
||||
else:
|
||||
return []
|
||||
|
||||
def gather_stop_reply_pcs(self, post_startup_log_lines, thread_count):
|
||||
results = self.gather_stop_reply_fields( post_startup_log_lines,
|
||||
thread_count, ["threads", "thread-pcs"])
|
||||
if not results:
|
||||
return []
|
||||
|
||||
threads_text = results["threads"]
|
||||
pcs_text = results["thread-pcs"]
|
||||
thread_ids = threads_text.split(",")
|
||||
pcs = pcs_text.split(",")
|
||||
self.assertTrue(len(thread_ids) == len(pcs))
|
||||
|
||||
thread_pcs = dict()
|
||||
for i in range(0, len(pcs)):
|
||||
thread_pcs[int(thread_ids[i], 16)] = pcs[i]
|
||||
|
||||
result = dict()
|
||||
result["thread_pcs"] = thread_pcs
|
||||
result["pc_register"] = results["pc_register"]
|
||||
result["little_endian"] = results["little_endian"]
|
||||
return result
|
||||
|
||||
def switch_endian(self, egg):
|
||||
return "".join(reversed(re.findall("..", egg)))
|
||||
|
||||
def parse_hw_info(self, context):
|
||||
self.assertIsNotNone(context)
|
||||
process_info = self.parse_process_info_response(context)
|
||||
endian = process_info.get("endian")
|
||||
reg_info = self.parse_register_info_packets(context)
|
||||
(pc_lldb_reg_index, pc_reg_info) = self.find_pc_reg_info(reg_info)
|
||||
|
||||
hw_info = dict()
|
||||
hw_info["pc_register"] = pc_lldb_reg_index
|
||||
hw_info["little_endian"] = (endian == "little")
|
||||
return hw_info
|
||||
|
||||
def gather_threads_info_pcs(self, pc_register, little_endian):
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
[
|
||||
"read packet: $jThreadsInfo#c1",
|
||||
{
|
||||
"direction": "send",
|
||||
"regex": r"^\$(.*)#[0-9a-fA-F]{2}$",
|
||||
"capture": {
|
||||
1: "threads_info"}},
|
||||
],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
threads_info = context.get("threads_info")
|
||||
register = str(pc_register)
|
||||
# The jThreadsInfo response is not valid JSON data, so we have to
|
||||
# clean it up first.
|
||||
jthreads_info = json.loads(re.sub(r"}]", "}", threads_info))
|
||||
thread_pcs = dict()
|
||||
for thread_info in jthreads_info:
|
||||
tid = thread_info["tid"]
|
||||
pc = thread_info["registers"][register]
|
||||
thread_pcs[tid] = self.switch_endian(pc) if little_endian else pc
|
||||
|
||||
return thread_pcs
|
||||
|
||||
def QListThreadsInStopReply_supported(self):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.test_sequence.add_log_lines(
|
||||
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_QListThreadsInStopReply_supported_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.QListThreadsInStopReply_supported()
|
||||
|
||||
@llgs_test
|
||||
def test_QListThreadsInStopReply_supported_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.QListThreadsInStopReply_supported()
|
||||
|
||||
def stop_reply_reports_multiple_threads(self, thread_count):
|
||||
# Gather threads from stop notification when QThreadsInStopReply is
|
||||
# enabled.
|
||||
stop_reply_threads = self.gather_stop_reply_threads(
|
||||
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
|
||||
self.assertEqual(len(stop_reply_threads), thread_count)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_stop_reply_reports_multiple_threads_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_reports_multiple_threads(5)
|
||||
|
||||
@llgs_test
|
||||
def test_stop_reply_reports_multiple_threads_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_reports_multiple_threads(5)
|
||||
|
||||
def no_QListThreadsInStopReply_supplies_no_threads(self, thread_count):
|
||||
# Gather threads from stop notification when QThreadsInStopReply is not
|
||||
# enabled.
|
||||
stop_reply_threads = self.gather_stop_reply_threads(None, thread_count)
|
||||
self.assertEqual(len(stop_reply_threads), 0)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_no_QListThreadsInStopReply_supplies_no_threads_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.no_QListThreadsInStopReply_supplies_no_threads(5)
|
||||
|
||||
@llgs_test
|
||||
def test_no_QListThreadsInStopReply_supplies_no_threads_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.no_QListThreadsInStopReply_supplies_no_threads(5)
|
||||
|
||||
def stop_reply_reports_correct_threads(self, thread_count):
|
||||
# Gather threads from stop notification when QThreadsInStopReply is
|
||||
# enabled.
|
||||
stop_reply_threads = self.gather_stop_reply_threads(
|
||||
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
|
||||
self.assertEqual(len(stop_reply_threads), thread_count)
|
||||
|
||||
# Gather threads from q{f,s}ThreadInfo.
|
||||
self.reset_test_sequence()
|
||||
self.add_threadinfo_collection_packets()
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
threads = self.parse_threadinfo_packets(context)
|
||||
self.assertIsNotNone(threads)
|
||||
self.assertEqual(len(threads), thread_count)
|
||||
|
||||
# Ensure each thread in q{f,s}ThreadInfo appears in stop reply threads
|
||||
for tid in threads:
|
||||
self.assertTrue(tid in stop_reply_threads)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_stop_reply_reports_correct_threads_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_reports_correct_threads(5)
|
||||
|
||||
@llgs_test
|
||||
def test_stop_reply_reports_correct_threads_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_reports_correct_threads(5)
|
||||
|
||||
def stop_reply_contains_thread_pcs(self, thread_count):
|
||||
results = self.gather_stop_reply_pcs(
|
||||
self.ENABLE_THREADS_IN_STOP_REPLY_ENTRIES, thread_count)
|
||||
stop_reply_pcs = results["thread_pcs"]
|
||||
pc_register = results["pc_register"]
|
||||
little_endian = results["little_endian"]
|
||||
self.assertEqual(len(stop_reply_pcs), thread_count)
|
||||
|
||||
threads_info_pcs = self.gather_threads_info_pcs(pc_register,
|
||||
little_endian)
|
||||
|
||||
self.assertEqual(len(threads_info_pcs), thread_count)
|
||||
for thread_id in stop_reply_pcs:
|
||||
self.assertTrue(thread_id in threads_info_pcs)
|
||||
self.assertTrue(int(stop_reply_pcs[thread_id], 16)
|
||||
== int(threads_info_pcs[thread_id], 16))
|
||||
|
||||
@llgs_test
|
||||
def test_stop_reply_contains_thread_pcs_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_contains_thread_pcs(5)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@debugserver_test
|
||||
def test_stop_reply_contains_thread_pcs_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.stop_reply_contains_thread_pcs(5)
|
|
@ -0,0 +1,182 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
import unittest2
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemote_qThreadStopInfo(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
THREAD_COUNT = 5
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
@skipIfDarwinEmbedded # <rdar://problem/27005337>
|
||||
def gather_stop_replies_via_qThreadStopInfo(self, thread_count):
|
||||
# Set up the inferior args.
|
||||
inferior_args = []
|
||||
for i in range(thread_count - 1):
|
||||
inferior_args.append("thread:new")
|
||||
inferior_args.append("sleep:10")
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=inferior_args)
|
||||
|
||||
# Assumes test_sequence has anything added needed to setup the initial state.
|
||||
# (Like optionally enabling QThreadsInStopReply.)
|
||||
self.test_sequence.add_log_lines([
|
||||
"read packet: $c#63"
|
||||
], True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Give threads time to start up, then break.
|
||||
time.sleep(1)
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
[
|
||||
"read packet: {}".format(
|
||||
chr(3)),
|
||||
{
|
||||
"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {
|
||||
1: "stop_result",
|
||||
2: "key_vals_text"}},
|
||||
],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Wait until all threads have started.
|
||||
threads = self.wait_for_thread_count(thread_count, timeout_seconds=3)
|
||||
self.assertIsNotNone(threads)
|
||||
self.assertEqual(len(threads), thread_count)
|
||||
|
||||
# Grab stop reply for each thread via qThreadStopInfo{tid:hex}.
|
||||
stop_replies = {}
|
||||
thread_dicts = {}
|
||||
for thread in threads:
|
||||
# Run the qThreadStopInfo command.
|
||||
self.reset_test_sequence()
|
||||
self.test_sequence.add_log_lines(
|
||||
[
|
||||
"read packet: $qThreadStopInfo{:x}#00".format(thread),
|
||||
{
|
||||
"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {
|
||||
1: "stop_result",
|
||||
2: "key_vals_text"}},
|
||||
],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Parse stop reply contents.
|
||||
key_vals_text = context.get("key_vals_text")
|
||||
self.assertIsNotNone(key_vals_text)
|
||||
kv_dict = self.parse_key_val_dict(key_vals_text)
|
||||
self.assertIsNotNone(kv_dict)
|
||||
|
||||
# Verify there is a thread and that it matches the expected thread
|
||||
# id.
|
||||
kv_thread = kv_dict.get("thread")
|
||||
self.assertIsNotNone(kv_thread)
|
||||
kv_thread_id = int(kv_thread, 16)
|
||||
self.assertEqual(kv_thread_id, thread)
|
||||
|
||||
# Grab the stop id reported.
|
||||
stop_result_text = context.get("stop_result")
|
||||
self.assertIsNotNone(stop_result_text)
|
||||
stop_replies[kv_thread_id] = int(stop_result_text, 16)
|
||||
|
||||
# Hang on to the key-val dictionary for the thread.
|
||||
thread_dicts[kv_thread_id] = kv_dict
|
||||
|
||||
return (stop_replies, thread_dicts)
|
||||
|
||||
def qThreadStopInfo_works_for_multiple_threads(self, thread_count):
|
||||
(stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count)
|
||||
self.assertEqual(len(stop_replies), thread_count)
|
||||
|
||||
@debugserver_test
|
||||
def test_qThreadStopInfo_works_for_multiple_threads_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT)
|
||||
|
||||
@llgs_test
|
||||
def test_qThreadStopInfo_works_for_multiple_threads_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_works_for_multiple_threads(self.THREAD_COUNT)
|
||||
|
||||
def qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(
|
||||
self, thread_count):
|
||||
(stop_replies, _) = self.gather_stop_replies_via_qThreadStopInfo(thread_count)
|
||||
self.assertIsNotNone(stop_replies)
|
||||
|
||||
no_stop_reason_count = sum(
|
||||
1 for stop_reason in list(
|
||||
stop_replies.values()) if stop_reason == 0)
|
||||
with_stop_reason_count = sum(
|
||||
1 for stop_reason in list(
|
||||
stop_replies.values()) if stop_reason != 0)
|
||||
|
||||
# All but one thread should report no stop reason.
|
||||
self.assertEqual(no_stop_reason_count, thread_count - 1)
|
||||
|
||||
# Only one thread should should indicate a stop reason.
|
||||
self.assertEqual(with_stop_reason_count, 1)
|
||||
|
||||
@debugserver_test
|
||||
def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_debugserver(
|
||||
self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(
|
||||
self.THREAD_COUNT)
|
||||
|
||||
@llgs_test
|
||||
def test_qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt_llgs(
|
||||
self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_only_reports_one_thread_stop_reason_during_interrupt(
|
||||
self.THREAD_COUNT)
|
||||
|
||||
def qThreadStopInfo_has_valid_thread_names(
|
||||
self, thread_count, expected_thread_name):
|
||||
(_, thread_dicts) = self.gather_stop_replies_via_qThreadStopInfo(thread_count)
|
||||
self.assertIsNotNone(thread_dicts)
|
||||
|
||||
for thread_dict in list(thread_dicts.values()):
|
||||
name = thread_dict.get("name")
|
||||
self.assertIsNotNone(name)
|
||||
self.assertEqual(name, expected_thread_name)
|
||||
|
||||
@unittest2.skip("MacOSX doesn't have a default thread name")
|
||||
@debugserver_test
|
||||
def test_qThreadStopInfo_has_valid_thread_names_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out")
|
||||
|
||||
# test requires OS with set, equal thread names by default.
|
||||
@skipUnlessPlatform(["linux"])
|
||||
@llgs_test
|
||||
def test_qThreadStopInfo_has_valid_thread_names_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.qThreadStopInfo_has_valid_thread_names(self.THREAD_COUNT, "a.out")
|
|
@ -0,0 +1,159 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemote_vCont(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def vCont_supports_mode(self, mode, inferior_args=None):
|
||||
# Setup the stub and set the gdb remote command stream.
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=inferior_args)
|
||||
self.add_vCont_query_packets()
|
||||
|
||||
# Run the gdb remote command stream.
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Pull out supported modes.
|
||||
supported_vCont_modes = self.parse_vCont_query_response(context)
|
||||
self.assertIsNotNone(supported_vCont_modes)
|
||||
|
||||
# Verify we support the given mode.
|
||||
self.assertTrue(mode in supported_vCont_modes)
|
||||
|
||||
def vCont_supports_c(self):
|
||||
self.vCont_supports_mode("c")
|
||||
|
||||
def vCont_supports_C(self):
|
||||
self.vCont_supports_mode("C")
|
||||
|
||||
def vCont_supports_s(self):
|
||||
self.vCont_supports_mode("s")
|
||||
|
||||
def vCont_supports_S(self):
|
||||
self.vCont_supports_mode("S")
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_vCont_supports_c_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.vCont_supports_c()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@llgs_test
|
||||
def test_vCont_supports_c_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.vCont_supports_c()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_vCont_supports_C_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.vCont_supports_C()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@llgs_test
|
||||
def test_vCont_supports_C_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.vCont_supports_C()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_vCont_supports_s_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.vCont_supports_s()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@llgs_test
|
||||
def test_vCont_supports_s_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.vCont_supports_s()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_vCont_supports_S_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.vCont_supports_S()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@llgs_test
|
||||
def test_vCont_supports_S_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.vCont_supports_S()
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_debugserver(
|
||||
self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=True, step_instruction="vCont;s")
|
||||
|
||||
@llgs_test
|
||||
@expectedFailureAndroid(
|
||||
bugnumber="llvm.org/pr24739",
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"])
|
||||
@expectedFailureAll(
|
||||
oslist=["linux"],
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"],
|
||||
bugnumber="llvm.org/pr24739")
|
||||
@skipIf(triple='^mips')
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
def test_single_step_only_steps_one_instruction_with_Hc_vCont_s_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=True, step_instruction="vCont;s")
|
||||
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
@debugserver_test
|
||||
def test_single_step_only_steps_one_instruction_with_vCont_s_thread_debugserver(
|
||||
self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=False, step_instruction="vCont;s:{thread}")
|
||||
|
||||
@llgs_test
|
||||
@expectedFailureAndroid(
|
||||
bugnumber="llvm.org/pr24739",
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"])
|
||||
@expectedFailureAll(
|
||||
oslist=["linux"],
|
||||
archs=[
|
||||
"arm",
|
||||
"aarch64"],
|
||||
bugnumber="llvm.org/pr24739")
|
||||
@skipIf(triple='^mips')
|
||||
@expectedFailureAll(oslist=["ios", "tvos", "watchos", "bridgeos"], bugnumber="rdar://27005337")
|
||||
def test_single_step_only_steps_one_instruction_with_vCont_s_thread_llgs(
|
||||
self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.single_step_only_steps_one_instruction(
|
||||
use_Hc_packet=False, step_instruction="vCont;s:{thread}")
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,98 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
import re
|
||||
import select
|
||||
import socket
|
||||
import time
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestStubReverseConnect(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
_DEFAULT_TIMEOUT = 20
|
||||
|
||||
def setUp(self):
|
||||
# Set up the test.
|
||||
gdbremote_testcase.GdbRemoteTestCaseBase.setUp(self)
|
||||
|
||||
# Create a listener on a local port.
|
||||
self.listener_socket = self.create_listener_socket()
|
||||
self.assertIsNotNone(self.listener_socket)
|
||||
self.listener_port = self.listener_socket.getsockname()[1]
|
||||
|
||||
def create_listener_socket(self, timeout_seconds=_DEFAULT_TIMEOUT):
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.assertIsNotNone(sock)
|
||||
|
||||
sock.settimeout(timeout_seconds)
|
||||
sock.bind(("127.0.0.1", 0))
|
||||
sock.listen(1)
|
||||
|
||||
def tear_down_listener():
|
||||
try:
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
except:
|
||||
# ignore
|
||||
None
|
||||
|
||||
self.addTearDownHook(tear_down_listener)
|
||||
return sock
|
||||
|
||||
def reverse_connect_works(self):
|
||||
# Indicate stub startup should do a reverse connect.
|
||||
appended_stub_args = ["--reverse-connect"]
|
||||
if self.debug_monitor_extra_args:
|
||||
self.debug_monitor_extra_args += appended_stub_args
|
||||
else:
|
||||
self.debug_monitor_extra_args = appended_stub_args
|
||||
|
||||
self.stub_hostname = "127.0.0.1"
|
||||
self.port = self.listener_port
|
||||
|
||||
triple = self.dbg.GetSelectedPlatform().GetTriple()
|
||||
if re.match(".*-.*-.*-android", triple):
|
||||
self.forward_adb_port(
|
||||
self.port,
|
||||
self.port,
|
||||
"reverse",
|
||||
self.stub_device)
|
||||
|
||||
# Start the stub.
|
||||
server = self.launch_debug_monitor(logfile=sys.stdout)
|
||||
self.assertIsNotNone(server)
|
||||
self.assertTrue(
|
||||
lldbgdbserverutils.process_is_running(
|
||||
server.pid, True))
|
||||
|
||||
# Listen for the stub's connection to us.
|
||||
(stub_socket, address) = self.listener_socket.accept()
|
||||
self.assertIsNotNone(stub_socket)
|
||||
self.assertIsNotNone(address)
|
||||
print("connected to stub {} on {}".format(
|
||||
address, stub_socket.getsockname()))
|
||||
|
||||
# Verify we can do the handshake. If that works, we'll call it good.
|
||||
self.do_handshake(stub_socket, timeout_seconds=self._DEFAULT_TIMEOUT)
|
||||
|
||||
# Clean up.
|
||||
stub_socket.shutdown(socket.SHUT_RDWR)
|
||||
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def test_reverse_connect_works_debugserver(self):
|
||||
self.init_debugserver_test(use_named_pipe=False)
|
||||
self.set_inferior_startup_launch()
|
||||
self.reverse_connect_works()
|
||||
|
||||
@llgs_test
|
||||
@skipIfRemote # reverse connect is not a supported use case for now
|
||||
def test_reverse_connect_works_llgs(self):
|
||||
self.init_llgs_test(use_named_pipe=False)
|
||||
self.set_inferior_startup_launch()
|
||||
self.reverse_connect_works()
|
|
@ -0,0 +1,86 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import lldbgdbserverutils
|
||||
import os
|
||||
import select
|
||||
import tempfile
|
||||
import time
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestStubSetSIDTestCase(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def get_stub_sid(self, extra_stub_args=None):
|
||||
# Launch debugserver
|
||||
if extra_stub_args:
|
||||
self.debug_monitor_extra_args += extra_stub_args
|
||||
|
||||
server = self.launch_debug_monitor()
|
||||
self.assertIsNotNone(server)
|
||||
self.assertTrue(
|
||||
lldbgdbserverutils.process_is_running(
|
||||
server.pid, True))
|
||||
|
||||
# Get the process id for the stub.
|
||||
return os.getsid(server.pid)
|
||||
|
||||
def sid_is_same_without_setsid(self):
|
||||
stub_sid = self.get_stub_sid()
|
||||
self.assertEqual(stub_sid, os.getsid(0))
|
||||
|
||||
def sid_is_different_with_setsid(self):
|
||||
stub_sid = self.get_stub_sid(["--setsid"])
|
||||
self.assertNotEqual(stub_sid, os.getsid(0))
|
||||
|
||||
def sid_is_different_with_S(self):
|
||||
stub_sid = self.get_stub_sid(["-S"])
|
||||
self.assertNotEqual(stub_sid, os.getsid(0))
|
||||
|
||||
@debugserver_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
def test_sid_is_same_without_setsid_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_same_without_setsid()
|
||||
|
||||
@llgs_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
@expectedFailureAll(oslist=['freebsd'])
|
||||
def test_sid_is_same_without_setsid_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_same_without_setsid()
|
||||
|
||||
@debugserver_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
def test_sid_is_different_with_setsid_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_different_with_setsid()
|
||||
|
||||
@llgs_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
def test_sid_is_different_with_setsid_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_different_with_setsid()
|
||||
|
||||
@debugserver_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
def test_sid_is_different_with_S_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_different_with_S()
|
||||
|
||||
@llgs_test
|
||||
@skipIfRemote # --setsid not used on remote platform and currently it is also impossible to get the sid of lldb-platform running on a remote target
|
||||
def test_sid_is_different_with_S_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.set_inferior_startup_launch()
|
||||
self.sid_is_different_with_S()
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,8 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CFLAGS_EXTRAS += -D__STDC_LIMIT_MACROS -D__STDC_FORMAT_MACROS -std=c++11
|
||||
# LD_EXTRAS := -lpthread
|
||||
CXX_SOURCES := main.cpp
|
||||
MAKE_DSYM :=NO
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,46 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import signal
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteAbort(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def inferior_abort_received(self):
|
||||
procs = self.prep_debug_monitor_and_inferior(inferior_args=["abort"])
|
||||
self.assertIsNotNone(procs)
|
||||
|
||||
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "hex_exit_code"}},
|
||||
],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
hex_exit_code = context.get("hex_exit_code")
|
||||
self.assertIsNotNone(hex_exit_code)
|
||||
self.assertEqual(int(hex_exit_code, 16),
|
||||
lldbutil.get_signal_number('SIGABRT'))
|
||||
|
||||
@debugserver_test
|
||||
def test_inferior_abort_received_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.inferior_abort_received()
|
||||
|
||||
@llgs_test
|
||||
# std::abort() on <= API 16 raises SIGSEGV - b.android.com/179836
|
||||
@expectedFailureAndroid(api_levels=list(range(16 + 1)))
|
||||
def test_inferior_abort_received_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.inferior_abort_received()
|
|
@ -0,0 +1,46 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import signal
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteSegFault(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
GDB_REMOTE_STOP_CODE_BAD_ACCESS = 0x91
|
||||
|
||||
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
|
||||
def inferior_seg_fault_received(self, expected_signo):
|
||||
procs = self.prep_debug_monitor_and_inferior(
|
||||
inferior_args=["segfault"])
|
||||
self.assertIsNotNone(procs)
|
||||
|
||||
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "hex_exit_code"}},
|
||||
],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
hex_exit_code = context.get("hex_exit_code")
|
||||
self.assertIsNotNone(hex_exit_code)
|
||||
self.assertEqual(int(hex_exit_code, 16), expected_signo)
|
||||
|
||||
@debugserver_test
|
||||
def test_inferior_seg_fault_received_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.build()
|
||||
self.inferior_seg_fault_received(self.GDB_REMOTE_STOP_CODE_BAD_ACCESS)
|
||||
|
||||
@llgs_test
|
||||
def test_inferior_seg_fault_received_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.inferior_seg_fault_received(lldbutil.get_signal_number('SIGSEGV'))
|
|
@ -0,0 +1,31 @@
|
|||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
|
||||
namespace {
|
||||
const char *const SEGFAULT_COMMAND = "segfault";
|
||||
const char *const ABORT_COMMAND = "abort";
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (argc < 2) {
|
||||
std::cout << "expected at least one command provided on the command line"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
// Process command line args.
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
const char *const command = argv[i];
|
||||
if (std::strstr(command, SEGFAULT_COMMAND)) {
|
||||
// Perform a null pointer access.
|
||||
int *const null_int_ptr = nullptr;
|
||||
*null_int_ptr = 0xDEAD;
|
||||
} else if (std::strstr(command, ABORT_COMMAND)) {
|
||||
std::abort();
|
||||
} else {
|
||||
std::cout << "Unsupported command: " << command << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,945 @@
|
|||
"""Module for supporting unit testing of the lldb-server debug monitor exe.
|
||||
"""
|
||||
|
||||
from __future__ import division, print_function
|
||||
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import platform
|
||||
import re
|
||||
import six
|
||||
import socket_packet_pump
|
||||
import subprocess
|
||||
import time
|
||||
from lldbsuite.test.lldbtest import *
|
||||
|
||||
from six.moves import queue
|
||||
|
||||
|
||||
def _get_debug_monitor_from_lldb(lldb_exe, debug_monitor_basename):
|
||||
"""Return the debug monitor exe path given the lldb exe path.
|
||||
|
||||
This method attempts to construct a valid debug monitor exe name
|
||||
from a given lldb exe name. It will return None if the synthesized
|
||||
debug monitor name is not found to exist.
|
||||
|
||||
The debug monitor exe path is synthesized by taking the directory
|
||||
of the lldb exe, and replacing the portion of the base name that
|
||||
matches "lldb" (case insensitive) and replacing with the value of
|
||||
debug_monitor_basename.
|
||||
|
||||
Args:
|
||||
lldb_exe: the path to an lldb executable.
|
||||
|
||||
debug_monitor_basename: the base name portion of the debug monitor
|
||||
that will replace 'lldb'.
|
||||
|
||||
Returns:
|
||||
A path to the debug monitor exe if it is found to exist; otherwise,
|
||||
returns None.
|
||||
|
||||
"""
|
||||
if not lldb_exe:
|
||||
return None
|
||||
|
||||
exe_dir = os.path.dirname(lldb_exe)
|
||||
exe_base = os.path.basename(lldb_exe)
|
||||
|
||||
# we'll rebuild the filename by replacing lldb with
|
||||
# the debug monitor basename, keeping any prefix or suffix in place.
|
||||
regex = re.compile(r"lldb", re.IGNORECASE)
|
||||
new_base = regex.sub(debug_monitor_basename, exe_base)
|
||||
|
||||
debug_monitor_exe = os.path.join(exe_dir, new_base)
|
||||
if os.path.exists(debug_monitor_exe):
|
||||
return debug_monitor_exe
|
||||
|
||||
new_base = regex.sub(
|
||||
'LLDB.framework/Versions/A/Resources/' +
|
||||
debug_monitor_basename,
|
||||
exe_base)
|
||||
debug_monitor_exe = os.path.join(exe_dir, new_base)
|
||||
if os.path.exists(debug_monitor_exe):
|
||||
return debug_monitor_exe
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_lldb_server_exe():
|
||||
"""Return the lldb-server exe path.
|
||||
|
||||
Returns:
|
||||
A path to the lldb-server exe if it is found to exist; otherwise,
|
||||
returns None.
|
||||
"""
|
||||
if "LLDB_DEBUGSERVER_PATH" in os.environ:
|
||||
return os.environ["LLDB_DEBUGSERVER_PATH"]
|
||||
|
||||
return _get_debug_monitor_from_lldb(
|
||||
lldbtest_config.lldbExec, "lldb-server")
|
||||
|
||||
|
||||
def get_debugserver_exe():
|
||||
"""Return the debugserver exe path.
|
||||
|
||||
Returns:
|
||||
A path to the debugserver exe if it is found to exist; otherwise,
|
||||
returns None.
|
||||
"""
|
||||
if "LLDB_DEBUGSERVER_PATH" in os.environ:
|
||||
return os.environ["LLDB_DEBUGSERVER_PATH"]
|
||||
|
||||
return _get_debug_monitor_from_lldb(
|
||||
lldbtest_config.lldbExec, "debugserver")
|
||||
|
||||
_LOG_LINE_REGEX = re.compile(r'^(lldb-server|debugserver)\s+<\s*(\d+)>' +
|
||||
'\s+(read|send)\s+packet:\s+(.+)$')
|
||||
|
||||
|
||||
def _is_packet_lldb_gdbserver_input(packet_type, llgs_input_is_read):
|
||||
"""Return whether a given packet is input for lldb-gdbserver.
|
||||
|
||||
Args:
|
||||
packet_type: a string indicating 'send' or 'receive', from a
|
||||
gdbremote packet protocol log.
|
||||
|
||||
llgs_input_is_read: true if lldb-gdbserver input (content sent to
|
||||
lldb-gdbserver) is listed as 'read' or 'send' in the packet
|
||||
log entry.
|
||||
|
||||
Returns:
|
||||
True if the packet should be considered input for lldb-gdbserver; False
|
||||
otherwise.
|
||||
"""
|
||||
if packet_type == 'read':
|
||||
# when llgs is the read side, then a read packet is meant for
|
||||
# input to llgs (when captured from the llgs/debugserver exe).
|
||||
return llgs_input_is_read
|
||||
elif packet_type == 'send':
|
||||
# when llgs is the send side, then a send packet is meant to
|
||||
# be input to llgs (when captured from the lldb exe).
|
||||
return not llgs_input_is_read
|
||||
else:
|
||||
# don't understand what type of packet this is
|
||||
raise "Unknown packet type: {}".format(packet_type)
|
||||
|
||||
|
||||
def handle_O_packet(context, packet_contents, logger):
|
||||
"""Handle O packets."""
|
||||
if (not packet_contents) or (len(packet_contents) < 1):
|
||||
return False
|
||||
elif packet_contents[0] != "O":
|
||||
return False
|
||||
elif packet_contents == "OK":
|
||||
return False
|
||||
|
||||
new_text = gdbremote_hex_decode_string(packet_contents[1:])
|
||||
context["O_content"] += new_text
|
||||
context["O_count"] += 1
|
||||
|
||||
if logger:
|
||||
logger.debug(
|
||||
"text: new \"{}\", cumulative: \"{}\"".format(
|
||||
new_text, context["O_content"]))
|
||||
|
||||
return True
|
||||
|
||||
_STRIP_CHECKSUM_REGEX = re.compile(r'#[0-9a-fA-F]{2}$')
|
||||
_STRIP_COMMAND_PREFIX_REGEX = re.compile(r"^\$")
|
||||
_STRIP_COMMAND_PREFIX_M_REGEX = re.compile(r"^\$m")
|
||||
|
||||
|
||||
def assert_packets_equal(asserter, actual_packet, expected_packet):
|
||||
# strip off the checksum digits of the packet. When we're in
|
||||
# no-ack mode, the # checksum is ignored, and should not be cause
|
||||
# for a mismatched packet.
|
||||
actual_stripped = _STRIP_CHECKSUM_REGEX.sub('', actual_packet)
|
||||
expected_stripped = _STRIP_CHECKSUM_REGEX.sub('', expected_packet)
|
||||
asserter.assertEqual(actual_stripped, expected_stripped)
|
||||
|
||||
|
||||
def expect_lldb_gdbserver_replay(
|
||||
asserter,
|
||||
sock,
|
||||
test_sequence,
|
||||
pump_queues,
|
||||
timeout_seconds,
|
||||
logger=None):
|
||||
"""Replay socket communication with lldb-gdbserver and verify responses.
|
||||
|
||||
Args:
|
||||
asserter: the object providing assertEqual(first, second, msg=None), e.g. TestCase instance.
|
||||
|
||||
sock: the TCP socket connected to the lldb-gdbserver exe.
|
||||
|
||||
test_sequence: a GdbRemoteTestSequence instance that describes
|
||||
the messages sent to the gdb remote and the responses
|
||||
expected from it.
|
||||
|
||||
timeout_seconds: any response taking more than this number of
|
||||
seconds will cause an exception to be raised.
|
||||
|
||||
logger: a Python logger instance.
|
||||
|
||||
Returns:
|
||||
The context dictionary from running the given gdbremote
|
||||
protocol sequence. This will contain any of the capture
|
||||
elements specified to any GdbRemoteEntry instances in
|
||||
test_sequence.
|
||||
|
||||
The context will also contain an entry, context["O_content"]
|
||||
which contains the text from the inferior received via $O
|
||||
packets. $O packets should not attempt to be matched
|
||||
directly since they are not entirely deterministic as to
|
||||
how many arrive and how much text is in each one.
|
||||
|
||||
context["O_count"] will contain an integer of the number of
|
||||
O packets received.
|
||||
"""
|
||||
|
||||
# Ensure we have some work to do.
|
||||
if len(test_sequence.entries) < 1:
|
||||
return {}
|
||||
|
||||
context = {"O_count": 0, "O_content": ""}
|
||||
with socket_packet_pump.SocketPacketPump(sock, pump_queues, logger) as pump:
|
||||
# Grab the first sequence entry.
|
||||
sequence_entry = test_sequence.entries.pop(0)
|
||||
|
||||
# While we have an active sequence entry, send messages
|
||||
# destined for the stub and collect/match/process responses
|
||||
# expected from the stub.
|
||||
while sequence_entry:
|
||||
if sequence_entry.is_send_to_remote():
|
||||
# This is an entry to send to the remote debug monitor.
|
||||
send_packet = sequence_entry.get_send_packet()
|
||||
if logger:
|
||||
if len(send_packet) == 1 and send_packet[0] == chr(3):
|
||||
packet_desc = "^C"
|
||||
else:
|
||||
packet_desc = send_packet
|
||||
logger.info(
|
||||
"sending packet to remote: {}".format(packet_desc))
|
||||
sock.sendall(send_packet.encode())
|
||||
else:
|
||||
# This is an entry expecting to receive content from the remote
|
||||
# debug monitor.
|
||||
|
||||
# We'll pull from (and wait on) the queue appropriate for the type of matcher.
|
||||
# We keep separate queues for process output (coming from non-deterministic
|
||||
# $O packet division) and for all other packets.
|
||||
if sequence_entry.is_output_matcher():
|
||||
try:
|
||||
# Grab next entry from the output queue.
|
||||
content = pump_queues.output_queue().get(True, timeout_seconds)
|
||||
except queue.Empty:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"timeout waiting for stub output (accumulated output:{})".format(
|
||||
pump.get_accumulated_output()))
|
||||
raise Exception(
|
||||
"timed out while waiting for output match (accumulated output: {})".format(
|
||||
pump.get_accumulated_output()))
|
||||
else:
|
||||
try:
|
||||
content = pump_queues.packet_queue().get(True, timeout_seconds)
|
||||
except queue.Empty:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"timeout waiting for packet match (receive buffer: {})".format(
|
||||
pump.get_receive_buffer()))
|
||||
raise Exception(
|
||||
"timed out while waiting for packet match (receive buffer: {})".format(
|
||||
pump.get_receive_buffer()))
|
||||
|
||||
# Give the sequence entry the opportunity to match the content.
|
||||
# Output matchers might match or pass after more output accumulates.
|
||||
# Other packet types generally must match.
|
||||
asserter.assertIsNotNone(content)
|
||||
context = sequence_entry.assert_match(
|
||||
asserter, content, context=context)
|
||||
|
||||
# Move on to next sequence entry as needed. Some sequence entries support executing multiple
|
||||
# times in different states (for looping over query/response
|
||||
# packets).
|
||||
if sequence_entry.is_consumed():
|
||||
if len(test_sequence.entries) > 0:
|
||||
sequence_entry = test_sequence.entries.pop(0)
|
||||
else:
|
||||
sequence_entry = None
|
||||
|
||||
# Fill in the O_content entries.
|
||||
context["O_count"] = 1
|
||||
context["O_content"] = pump.get_accumulated_output()
|
||||
|
||||
return context
|
||||
|
||||
|
||||
def gdbremote_hex_encode_string(str):
|
||||
output = ''
|
||||
for c in str:
|
||||
output += '{0:02x}'.format(ord(c))
|
||||
return output
|
||||
|
||||
|
||||
def gdbremote_hex_decode_string(str):
|
||||
return str.decode("hex")
|
||||
|
||||
|
||||
def gdbremote_packet_encode_string(str):
|
||||
checksum = 0
|
||||
for c in str:
|
||||
checksum += ord(c)
|
||||
return '$' + str + '#{0:02x}'.format(checksum % 256)
|
||||
|
||||
|
||||
def build_gdbremote_A_packet(args_list):
|
||||
"""Given a list of args, create a properly-formed $A packet containing each arg.
|
||||
"""
|
||||
payload = "A"
|
||||
|
||||
# build the arg content
|
||||
arg_index = 0
|
||||
for arg in args_list:
|
||||
# Comma-separate the args.
|
||||
if arg_index > 0:
|
||||
payload += ','
|
||||
|
||||
# Hex-encode the arg.
|
||||
hex_arg = gdbremote_hex_encode_string(arg)
|
||||
|
||||
# Build the A entry.
|
||||
payload += "{},{},{}".format(len(hex_arg), arg_index, hex_arg)
|
||||
|
||||
# Next arg index, please.
|
||||
arg_index += 1
|
||||
|
||||
# return the packetized payload
|
||||
return gdbremote_packet_encode_string(payload)
|
||||
|
||||
|
||||
def parse_reg_info_response(response_packet):
|
||||
if not response_packet:
|
||||
raise Exception("response_packet cannot be None")
|
||||
|
||||
# Strip off prefix $ and suffix #xx if present.
|
||||
response_packet = _STRIP_COMMAND_PREFIX_REGEX.sub("", response_packet)
|
||||
response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet)
|
||||
|
||||
# Build keyval pairs
|
||||
values = {}
|
||||
for kv in response_packet.split(";"):
|
||||
if len(kv) < 1:
|
||||
continue
|
||||
(key, val) = kv.split(':')
|
||||
values[key] = val
|
||||
|
||||
return values
|
||||
|
||||
|
||||
def parse_threadinfo_response(response_packet):
|
||||
if not response_packet:
|
||||
raise Exception("response_packet cannot be None")
|
||||
|
||||
# Strip off prefix $ and suffix #xx if present.
|
||||
response_packet = _STRIP_COMMAND_PREFIX_M_REGEX.sub("", response_packet)
|
||||
response_packet = _STRIP_CHECKSUM_REGEX.sub("", response_packet)
|
||||
|
||||
# Return list of thread ids
|
||||
return [int(thread_id_hex, 16) for thread_id_hex in response_packet.split(
|
||||
",") if len(thread_id_hex) > 0]
|
||||
|
||||
|
||||
def unpack_endian_binary_string(endian, value_string):
|
||||
"""Unpack a gdb-remote binary (post-unescaped, i.e. not escaped) response to an unsigned int given endianness of the inferior."""
|
||||
if not endian:
|
||||
raise Exception("endian cannot be None")
|
||||
if not value_string or len(value_string) < 1:
|
||||
raise Exception("value_string cannot be None or empty")
|
||||
|
||||
if endian == 'little':
|
||||
value = 0
|
||||
i = 0
|
||||
while len(value_string) > 0:
|
||||
value += (ord(value_string[0]) << i)
|
||||
value_string = value_string[1:]
|
||||
i += 8
|
||||
return value
|
||||
elif endian == 'big':
|
||||
value = 0
|
||||
while len(value_string) > 0:
|
||||
value = (value << 8) + ord(value_string[0])
|
||||
value_string = value_string[1:]
|
||||
return value
|
||||
else:
|
||||
# pdp is valid but need to add parse code once needed.
|
||||
raise Exception("unsupported endian:{}".format(endian))
|
||||
|
||||
|
||||
def unpack_register_hex_unsigned(endian, value_string):
|
||||
"""Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior."""
|
||||
if not endian:
|
||||
raise Exception("endian cannot be None")
|
||||
if not value_string or len(value_string) < 1:
|
||||
raise Exception("value_string cannot be None or empty")
|
||||
|
||||
if endian == 'little':
|
||||
value = 0
|
||||
i = 0
|
||||
while len(value_string) > 0:
|
||||
value += (int(value_string[0:2], 16) << i)
|
||||
value_string = value_string[2:]
|
||||
i += 8
|
||||
return value
|
||||
elif endian == 'big':
|
||||
return int(value_string, 16)
|
||||
else:
|
||||
# pdp is valid but need to add parse code once needed.
|
||||
raise Exception("unsupported endian:{}".format(endian))
|
||||
|
||||
|
||||
def pack_register_hex(endian, value, byte_size=None):
|
||||
"""Unpack a gdb-remote $p-style response to an unsigned int given endianness of inferior."""
|
||||
if not endian:
|
||||
raise Exception("endian cannot be None")
|
||||
|
||||
if endian == 'little':
|
||||
# Create the litt-endian return value.
|
||||
retval = ""
|
||||
while value != 0:
|
||||
retval = retval + "{:02x}".format(value & 0xff)
|
||||
value = value >> 8
|
||||
if byte_size:
|
||||
# Add zero-fill to the right/end (MSB side) of the value.
|
||||
retval += "00" * (byte_size - len(retval) // 2)
|
||||
return retval
|
||||
|
||||
elif endian == 'big':
|
||||
retval = ""
|
||||
while value != 0:
|
||||
retval = "{:02x}".format(value & 0xff) + retval
|
||||
value = value >> 8
|
||||
if byte_size:
|
||||
# Add zero-fill to the left/front (MSB side) of the value.
|
||||
retval = ("00" * (byte_size - len(retval) // 2)) + retval
|
||||
return retval
|
||||
|
||||
else:
|
||||
# pdp is valid but need to add parse code once needed.
|
||||
raise Exception("unsupported endian:{}".format(endian))
|
||||
|
||||
|
||||
class GdbRemoteEntryBase(object):
|
||||
|
||||
def is_output_matcher(self):
|
||||
return False
|
||||
|
||||
|
||||
class GdbRemoteEntry(GdbRemoteEntryBase):
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
is_send_to_remote=True,
|
||||
exact_payload=None,
|
||||
regex=None,
|
||||
capture=None,
|
||||
expect_captures=None):
|
||||
"""Create an entry representing one piece of the I/O to/from a gdb remote debug monitor.
|
||||
|
||||
Args:
|
||||
|
||||
is_send_to_remote: True if this entry is a message to be
|
||||
sent to the gdbremote debug monitor; False if this
|
||||
entry represents text to be matched against the reply
|
||||
from the gdbremote debug monitor.
|
||||
|
||||
exact_payload: if not None, then this packet is an exact
|
||||
send (when sending to the remote) or an exact match of
|
||||
the response from the gdbremote. The checksums are
|
||||
ignored on exact match requests since negotiation of
|
||||
no-ack makes the checksum content essentially
|
||||
undefined.
|
||||
|
||||
regex: currently only valid for receives from gdbremote.
|
||||
When specified (and only if exact_payload is None),
|
||||
indicates the gdbremote response must match the given
|
||||
regex. Match groups in the regex can be used for two
|
||||
different purposes: saving the match (see capture
|
||||
arg), or validating that a match group matches a
|
||||
previously established value (see expect_captures). It
|
||||
is perfectly valid to have just a regex arg and to
|
||||
specify neither capture or expect_captures args. This
|
||||
arg only makes sense if exact_payload is not
|
||||
specified.
|
||||
|
||||
capture: if specified, is a dictionary of regex match
|
||||
group indices (should start with 1) to variable names
|
||||
that will store the capture group indicated by the
|
||||
index. For example, {1:"thread_id"} will store capture
|
||||
group 1's content in the context dictionary where
|
||||
"thread_id" is the key and the match group value is
|
||||
the value. The value stored off can be used later in a
|
||||
expect_captures expression. This arg only makes sense
|
||||
when regex is specified.
|
||||
|
||||
expect_captures: if specified, is a dictionary of regex
|
||||
match group indices (should start with 1) to variable
|
||||
names, where the match group should match the value
|
||||
existing in the context at the given variable name.
|
||||
For example, {2:"thread_id"} indicates that the second
|
||||
match group must match the value stored under the
|
||||
context's previously stored "thread_id" key. This arg
|
||||
only makes sense when regex is specified.
|
||||
"""
|
||||
self._is_send_to_remote = is_send_to_remote
|
||||
self.exact_payload = exact_payload
|
||||
self.regex = regex
|
||||
self.capture = capture
|
||||
self.expect_captures = expect_captures
|
||||
|
||||
def is_send_to_remote(self):
|
||||
return self._is_send_to_remote
|
||||
|
||||
def is_consumed(self):
|
||||
# For now, all packets are consumed after first use.
|
||||
return True
|
||||
|
||||
def get_send_packet(self):
|
||||
if not self.is_send_to_remote():
|
||||
raise Exception(
|
||||
"get_send_packet() called on GdbRemoteEntry that is not a send-to-remote packet")
|
||||
if not self.exact_payload:
|
||||
raise Exception(
|
||||
"get_send_packet() called on GdbRemoteEntry but it doesn't have an exact payload")
|
||||
return self.exact_payload
|
||||
|
||||
def _assert_exact_payload_match(self, asserter, actual_packet):
|
||||
assert_packets_equal(asserter, actual_packet, self.exact_payload)
|
||||
return None
|
||||
|
||||
def _assert_regex_match(self, asserter, actual_packet, context):
|
||||
# Ensure the actual packet matches from the start of the actual packet.
|
||||
match = self.regex.match(actual_packet)
|
||||
if not match:
|
||||
asserter.fail(
|
||||
"regex '{}' failed to match against content '{}'".format(
|
||||
self.regex.pattern, actual_packet))
|
||||
|
||||
if self.capture:
|
||||
# Handle captures.
|
||||
for group_index, var_name in list(self.capture.items()):
|
||||
capture_text = match.group(group_index)
|
||||
# It is okay for capture text to be None - which it will be if it is a group that can match nothing.
|
||||
# The user must be okay with it since the regex itself matched
|
||||
# above.
|
||||
context[var_name] = capture_text
|
||||
|
||||
if self.expect_captures:
|
||||
# Handle comparing matched groups to context dictionary entries.
|
||||
for group_index, var_name in list(self.expect_captures.items()):
|
||||
capture_text = match.group(group_index)
|
||||
if not capture_text:
|
||||
raise Exception(
|
||||
"No content to expect for group index {}".format(group_index))
|
||||
asserter.assertEqual(capture_text, context[var_name])
|
||||
|
||||
return context
|
||||
|
||||
def assert_match(self, asserter, actual_packet, context=None):
|
||||
# This only makes sense for matching lines coming from the
|
||||
# remote debug monitor.
|
||||
if self.is_send_to_remote():
|
||||
raise Exception(
|
||||
"Attempted to match a packet being sent to the remote debug monitor, doesn't make sense.")
|
||||
|
||||
# Create a new context if needed.
|
||||
if not context:
|
||||
context = {}
|
||||
|
||||
# If this is an exact payload, ensure they match exactly,
|
||||
# ignoring the packet checksum which is optional for no-ack
|
||||
# mode.
|
||||
if self.exact_payload:
|
||||
self._assert_exact_payload_match(asserter, actual_packet)
|
||||
return context
|
||||
elif self.regex:
|
||||
return self._assert_regex_match(asserter, actual_packet, context)
|
||||
else:
|
||||
raise Exception(
|
||||
"Don't know how to match a remote-sent packet when exact_payload isn't specified.")
|
||||
|
||||
|
||||
class MultiResponseGdbRemoteEntry(GdbRemoteEntryBase):
|
||||
"""Represents a query/response style packet.
|
||||
|
||||
Assumes the first item is sent to the gdb remote.
|
||||
An end sequence regex indicates the end of the query/response
|
||||
packet sequence. All responses up through (but not including) the
|
||||
end response are stored in a context variable.
|
||||
|
||||
Settings accepted from params:
|
||||
|
||||
next_query or query: required. The typical query packet without the $ prefix or #xx suffix.
|
||||
If there is a special first packet to start the iteration query, see the
|
||||
first_query key.
|
||||
|
||||
first_query: optional. If the first query requires a special query command, specify
|
||||
it with this key. Do not specify the $ prefix or #xx suffix.
|
||||
|
||||
append_iteration_suffix: defaults to False. Specify True if the 0-based iteration
|
||||
index should be appended as a suffix to the command. e.g. qRegisterInfo with
|
||||
this key set true will generate query packets of qRegisterInfo0, qRegisterInfo1,
|
||||
etc.
|
||||
|
||||
end_regex: required. Specifies a compiled regex object that will match the full text
|
||||
of any response that signals an end to the iteration. It must include the
|
||||
initial $ and ending #xx and must match the whole packet.
|
||||
|
||||
save_key: required. Specifies the key within the context where an array will be stored.
|
||||
Each packet received from the gdb remote that does not match the end_regex will get
|
||||
appended to the array stored within the context at that key.
|
||||
|
||||
runaway_response_count: optional. Defaults to 10000. If this many responses are retrieved,
|
||||
assume there is something wrong with either the response collection or the ending
|
||||
detection regex and throw an exception.
|
||||
"""
|
||||
|
||||
def __init__(self, params):
|
||||
self._next_query = params.get("next_query", params.get("query"))
|
||||
if not self._next_query:
|
||||
raise "either next_query or query key must be specified for MultiResponseGdbRemoteEntry"
|
||||
|
||||
self._first_query = params.get("first_query", self._next_query)
|
||||
self._append_iteration_suffix = params.get(
|
||||
"append_iteration_suffix", False)
|
||||
self._iteration = 0
|
||||
self._end_regex = params["end_regex"]
|
||||
self._save_key = params["save_key"]
|
||||
self._runaway_response_count = params.get(
|
||||
"runaway_response_count", 10000)
|
||||
self._is_send_to_remote = True
|
||||
self._end_matched = False
|
||||
|
||||
def is_send_to_remote(self):
|
||||
return self._is_send_to_remote
|
||||
|
||||
def get_send_packet(self):
|
||||
if not self.is_send_to_remote():
|
||||
raise Exception(
|
||||
"get_send_packet() called on MultiResponseGdbRemoteEntry that is not in the send state")
|
||||
if self._end_matched:
|
||||
raise Exception(
|
||||
"get_send_packet() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.")
|
||||
|
||||
# Choose the first or next query for the base payload.
|
||||
if self._iteration == 0 and self._first_query:
|
||||
payload = self._first_query
|
||||
else:
|
||||
payload = self._next_query
|
||||
|
||||
# Append the suffix as needed.
|
||||
if self._append_iteration_suffix:
|
||||
payload += "%x" % self._iteration
|
||||
|
||||
# Keep track of the iteration.
|
||||
self._iteration += 1
|
||||
|
||||
# Now that we've given the query packet, flip the mode to
|
||||
# receive/match.
|
||||
self._is_send_to_remote = False
|
||||
|
||||
# Return the result, converted to packet form.
|
||||
return gdbremote_packet_encode_string(payload)
|
||||
|
||||
def is_consumed(self):
|
||||
return self._end_matched
|
||||
|
||||
def assert_match(self, asserter, actual_packet, context=None):
|
||||
# This only makes sense for matching lines coming from the remote debug
|
||||
# monitor.
|
||||
if self.is_send_to_remote():
|
||||
raise Exception(
|
||||
"assert_match() called on MultiResponseGdbRemoteEntry but state is set to send a query packet.")
|
||||
|
||||
if self._end_matched:
|
||||
raise Exception(
|
||||
"assert_match() called on MultiResponseGdbRemoteEntry but end of query/response sequence has already been seen.")
|
||||
|
||||
# Set up a context as needed.
|
||||
if not context:
|
||||
context = {}
|
||||
|
||||
# Check if the packet matches the end condition.
|
||||
match = self._end_regex.match(actual_packet)
|
||||
if match:
|
||||
# We're done iterating.
|
||||
self._end_matched = True
|
||||
return context
|
||||
|
||||
# Not done iterating - save the packet.
|
||||
context[self._save_key] = context.get(self._save_key, [])
|
||||
context[self._save_key].append(actual_packet)
|
||||
|
||||
# Check for a runaway response cycle.
|
||||
if len(context[self._save_key]) >= self._runaway_response_count:
|
||||
raise Exception(
|
||||
"runaway query/response cycle detected: %d responses captured so far. Last response: %s" %
|
||||
(len(
|
||||
context[
|
||||
self._save_key]), context[
|
||||
self._save_key][
|
||||
-1]))
|
||||
|
||||
# Flip the mode to send for generating the query.
|
||||
self._is_send_to_remote = True
|
||||
return context
|
||||
|
||||
|
||||
class MatchRemoteOutputEntry(GdbRemoteEntryBase):
|
||||
"""Waits for output from the debug monitor to match a regex or time out.
|
||||
|
||||
This entry type tries to match each time new gdb remote output is accumulated
|
||||
using a provided regex. If the output does not match the regex within the
|
||||
given timeframe, the command fails the playback session. If the regex does
|
||||
match, any capture fields are recorded in the context.
|
||||
|
||||
Settings accepted from params:
|
||||
|
||||
regex: required. Specifies a compiled regex object that must either succeed
|
||||
with re.match or re.search (see regex_mode below) within the given timeout
|
||||
(see timeout_seconds below) or cause the playback to fail.
|
||||
|
||||
regex_mode: optional. Available values: "match" or "search". If "match", the entire
|
||||
stub output as collected so far must match the regex. If search, then the regex
|
||||
must match starting somewhere within the output text accumulated thus far.
|
||||
Default: "match" (i.e. the regex must match the entirety of the accumulated output
|
||||
buffer, so unexpected text will generally fail the match).
|
||||
|
||||
capture: optional. If specified, is a dictionary of regex match group indices (should start
|
||||
with 1) to variable names that will store the capture group indicated by the
|
||||
index. For example, {1:"thread_id"} will store capture group 1's content in the
|
||||
context dictionary where "thread_id" is the key and the match group value is
|
||||
the value. The value stored off can be used later in a expect_captures expression.
|
||||
This arg only makes sense when regex is specified.
|
||||
"""
|
||||
|
||||
def __init__(self, regex=None, regex_mode="match", capture=None):
|
||||
self._regex = regex
|
||||
self._regex_mode = regex_mode
|
||||
self._capture = capture
|
||||
self._matched = False
|
||||
|
||||
if not self._regex:
|
||||
raise Exception("regex cannot be None")
|
||||
|
||||
if not self._regex_mode in ["match", "search"]:
|
||||
raise Exception(
|
||||
"unsupported regex mode \"{}\": must be \"match\" or \"search\"".format(
|
||||
self._regex_mode))
|
||||
|
||||
def is_output_matcher(self):
|
||||
return True
|
||||
|
||||
def is_send_to_remote(self):
|
||||
# This is always a "wait for remote" command.
|
||||
return False
|
||||
|
||||
def is_consumed(self):
|
||||
return self._matched
|
||||
|
||||
def assert_match(self, asserter, accumulated_output, context):
|
||||
# Validate args.
|
||||
if not accumulated_output:
|
||||
raise Exception("accumulated_output cannot be none")
|
||||
if not context:
|
||||
raise Exception("context cannot be none")
|
||||
|
||||
# Validate that we haven't already matched.
|
||||
if self._matched:
|
||||
raise Exception(
|
||||
"invalid state - already matched, attempting to match again")
|
||||
|
||||
# If we don't have any content yet, we don't match.
|
||||
if len(accumulated_output) < 1:
|
||||
return context
|
||||
|
||||
# Check if we match
|
||||
if self._regex_mode == "match":
|
||||
match = self._regex.match(accumulated_output)
|
||||
elif self._regex_mode == "search":
|
||||
match = self._regex.search(accumulated_output)
|
||||
else:
|
||||
raise Exception(
|
||||
"Unexpected regex mode: {}".format(
|
||||
self._regex_mode))
|
||||
|
||||
# If we don't match, wait to try again after next $O content, or time
|
||||
# out.
|
||||
if not match:
|
||||
# print("re pattern \"{}\" did not match against \"{}\"".format(self._regex.pattern, accumulated_output))
|
||||
return context
|
||||
|
||||
# We do match.
|
||||
self._matched = True
|
||||
# print("re pattern \"{}\" matched against \"{}\"".format(self._regex.pattern, accumulated_output))
|
||||
|
||||
# Collect up any captures into the context.
|
||||
if self._capture:
|
||||
# Handle captures.
|
||||
for group_index, var_name in list(self._capture.items()):
|
||||
capture_text = match.group(group_index)
|
||||
if not capture_text:
|
||||
raise Exception(
|
||||
"No content for group index {}".format(group_index))
|
||||
context[var_name] = capture_text
|
||||
|
||||
return context
|
||||
|
||||
|
||||
class GdbRemoteTestSequence(object):
|
||||
|
||||
_LOG_LINE_REGEX = re.compile(r'^.*(read|send)\s+packet:\s+(.+)$')
|
||||
|
||||
def __init__(self, logger):
|
||||
self.entries = []
|
||||
self.logger = logger
|
||||
|
||||
def add_log_lines(self, log_lines, remote_input_is_read):
|
||||
for line in log_lines:
|
||||
if isinstance(line, str):
|
||||
# Handle log line import
|
||||
# if self.logger:
|
||||
# self.logger.debug("processing log line: {}".format(line))
|
||||
match = self._LOG_LINE_REGEX.match(line)
|
||||
if match:
|
||||
playback_packet = match.group(2)
|
||||
direction = match.group(1)
|
||||
if _is_packet_lldb_gdbserver_input(
|
||||
direction, remote_input_is_read):
|
||||
# Handle as something to send to the remote debug monitor.
|
||||
# if self.logger:
|
||||
# self.logger.info("processed packet to send to remote: {}".format(playback_packet))
|
||||
self.entries.append(
|
||||
GdbRemoteEntry(
|
||||
is_send_to_remote=True,
|
||||
exact_payload=playback_packet))
|
||||
else:
|
||||
# Log line represents content to be expected from the remote debug monitor.
|
||||
# if self.logger:
|
||||
# self.logger.info("receiving packet from llgs, should match: {}".format(playback_packet))
|
||||
self.entries.append(
|
||||
GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
exact_payload=playback_packet))
|
||||
else:
|
||||
raise Exception(
|
||||
"failed to interpret log line: {}".format(line))
|
||||
elif isinstance(line, dict):
|
||||
entry_type = line.get("type", "regex_capture")
|
||||
if entry_type == "regex_capture":
|
||||
# Handle more explicit control over details via dictionary.
|
||||
direction = line.get("direction", None)
|
||||
regex = line.get("regex", None)
|
||||
capture = line.get("capture", None)
|
||||
expect_captures = line.get("expect_captures", None)
|
||||
|
||||
# Compile the regex.
|
||||
if regex and (isinstance(regex, str)):
|
||||
regex = re.compile(regex)
|
||||
|
||||
if _is_packet_lldb_gdbserver_input(
|
||||
direction, remote_input_is_read):
|
||||
# Handle as something to send to the remote debug monitor.
|
||||
# if self.logger:
|
||||
# self.logger.info("processed dict sequence to send to remote")
|
||||
self.entries.append(
|
||||
GdbRemoteEntry(
|
||||
is_send_to_remote=True,
|
||||
regex=regex,
|
||||
capture=capture,
|
||||
expect_captures=expect_captures))
|
||||
else:
|
||||
# Log line represents content to be expected from the remote debug monitor.
|
||||
# if self.logger:
|
||||
# self.logger.info("processed dict sequence to match receiving from remote")
|
||||
self.entries.append(
|
||||
GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
regex=regex,
|
||||
capture=capture,
|
||||
expect_captures=expect_captures))
|
||||
elif entry_type == "multi_response":
|
||||
self.entries.append(MultiResponseGdbRemoteEntry(line))
|
||||
elif entry_type == "output_match":
|
||||
|
||||
regex = line.get("regex", None)
|
||||
# Compile the regex.
|
||||
if regex and (isinstance(regex, str)):
|
||||
regex = re.compile(regex, re.DOTALL)
|
||||
|
||||
regex_mode = line.get("regex_mode", "match")
|
||||
capture = line.get("capture", None)
|
||||
self.entries.append(
|
||||
MatchRemoteOutputEntry(
|
||||
regex=regex,
|
||||
regex_mode=regex_mode,
|
||||
capture=capture))
|
||||
else:
|
||||
raise Exception("unknown entry type \"%s\"" % entry_type)
|
||||
|
||||
|
||||
def process_is_running(pid, unknown_value=True):
|
||||
"""If possible, validate that the given pid represents a running process on the local system.
|
||||
|
||||
Args:
|
||||
|
||||
pid: an OS-specific representation of a process id. Should be an integral value.
|
||||
|
||||
unknown_value: value used when we cannot determine how to check running local
|
||||
processes on the OS.
|
||||
|
||||
Returns:
|
||||
|
||||
If we can figure out how to check running process ids on the given OS:
|
||||
return True if the process is running, or False otherwise.
|
||||
|
||||
If we don't know how to check running process ids on the given OS:
|
||||
return the value provided by the unknown_value arg.
|
||||
"""
|
||||
if not isinstance(pid, six.integer_types):
|
||||
raise Exception(
|
||||
"pid must be an integral type (actual type: %s)" % str(
|
||||
type(pid)))
|
||||
|
||||
process_ids = []
|
||||
|
||||
if lldb.remote_platform:
|
||||
# Don't know how to get list of running process IDs on a remote
|
||||
# platform
|
||||
return unknown_value
|
||||
elif platform.system() in ['Darwin', 'Linux', 'FreeBSD', 'NetBSD']:
|
||||
# Build the list of running process ids
|
||||
output = subprocess.check_output(
|
||||
"ps ax | awk '{ print $1; }'", shell=True).decode("utf-8")
|
||||
text_process_ids = output.split('\n')[1:]
|
||||
# Convert text pids to ints
|
||||
process_ids = [int(text_pid)
|
||||
for text_pid in text_process_ids if text_pid != '']
|
||||
# elif {your_platform_here}:
|
||||
# fill in process_ids as a list of int type process IDs running on
|
||||
# the local system.
|
||||
else:
|
||||
# Don't know how to get list of running process IDs on this
|
||||
# OS, so return the "don't know" value.
|
||||
return unknown_value
|
||||
|
||||
# Check if the pid is in the process_ids
|
||||
return pid in process_ids
|
||||
|
||||
if __name__ == '__main__':
|
||||
EXE_PATH = get_lldb_server_exe()
|
||||
if EXE_PATH:
|
||||
print("lldb-server path detected: {}".format(EXE_PATH))
|
||||
else:
|
||||
print("lldb-server could not be found")
|
|
@ -0,0 +1,370 @@
|
|||
//===-- main.cpp ------------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <errno.h>
|
||||
#include <inttypes.h>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#if !defined(_WIN32)
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
#include <setjmp.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <thread>
|
||||
#include <time.h>
|
||||
#include <vector>
|
||||
|
||||
#if defined(__APPLE__)
|
||||
__OSX_AVAILABLE_STARTING(__MAC_10_6, __IPHONE_3_2)
|
||||
int pthread_threadid_np(pthread_t, __uint64_t *);
|
||||
#elif defined(__linux__)
|
||||
#include <sys/syscall.h>
|
||||
#elif defined(__NetBSD__)
|
||||
#include <lwp.h>
|
||||
#elif defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
static const char *const RETVAL_PREFIX = "retval:";
|
||||
static const char *const SLEEP_PREFIX = "sleep:";
|
||||
static const char *const STDERR_PREFIX = "stderr:";
|
||||
static const char *const SET_MESSAGE_PREFIX = "set-message:";
|
||||
static const char *const PRINT_MESSAGE_COMMAND = "print-message:";
|
||||
static const char *const GET_DATA_ADDRESS_PREFIX = "get-data-address-hex:";
|
||||
static const char *const GET_STACK_ADDRESS_COMMAND = "get-stack-address-hex:";
|
||||
static const char *const GET_HEAP_ADDRESS_COMMAND = "get-heap-address-hex:";
|
||||
|
||||
static const char *const GET_CODE_ADDRESS_PREFIX = "get-code-address-hex:";
|
||||
static const char *const CALL_FUNCTION_PREFIX = "call-function:";
|
||||
|
||||
static const char *const THREAD_PREFIX = "thread:";
|
||||
static const char *const THREAD_COMMAND_NEW = "new";
|
||||
static const char *const THREAD_COMMAND_PRINT_IDS = "print-ids";
|
||||
static const char *const THREAD_COMMAND_SEGFAULT = "segfault";
|
||||
|
||||
static const char *const PRINT_PID_COMMAND = "print-pid";
|
||||
|
||||
static bool g_print_thread_ids = false;
|
||||
static std::mutex g_print_mutex;
|
||||
static bool g_threads_do_segfault = false;
|
||||
|
||||
static std::mutex g_jump_buffer_mutex;
|
||||
static jmp_buf g_jump_buffer;
|
||||
static bool g_is_segfaulting = false;
|
||||
|
||||
static char g_message[256];
|
||||
|
||||
static volatile char g_c1 = '0';
|
||||
static volatile char g_c2 = '1';
|
||||
|
||||
static void print_pid() {
|
||||
#if defined(_WIN32)
|
||||
fprintf(stderr, "PID: %d\n", ::GetCurrentProcessId());
|
||||
#else
|
||||
fprintf(stderr, "PID: %d\n", getpid());
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_thread_id() {
|
||||
// Put in the right magic here for your platform to spit out the thread id (tid)
|
||||
// that debugserver/lldb-gdbserver would see as a TID. Otherwise, let the else
|
||||
// clause print out the unsupported text so that the unit test knows to skip
|
||||
// verifying thread ids.
|
||||
#if defined(__APPLE__)
|
||||
__uint64_t tid = 0;
|
||||
pthread_threadid_np(pthread_self(), &tid);
|
||||
printf("%" PRIx64, tid);
|
||||
#elif defined(__linux__)
|
||||
// This is a call to gettid() via syscall.
|
||||
printf("%" PRIx64, static_cast<uint64_t>(syscall(__NR_gettid)));
|
||||
#elif defined(__NetBSD__)
|
||||
// Technically lwpid_t is 32-bit signed integer
|
||||
printf("%" PRIx64, static_cast<uint64_t>(_lwp_self()));
|
||||
#elif defined(_WIN32)
|
||||
printf("%" PRIx64, static_cast<uint64_t>(::GetCurrentThreadId()));
|
||||
#else
|
||||
printf("{no-tid-support}");
|
||||
#endif
|
||||
}
|
||||
|
||||
static void signal_handler(int signo) {
|
||||
#if defined(_WIN32)
|
||||
// No signal support on Windows.
|
||||
#else
|
||||
const char *signal_name = nullptr;
|
||||
switch (signo) {
|
||||
case SIGUSR1:
|
||||
signal_name = "SIGUSR1";
|
||||
break;
|
||||
case SIGSEGV:
|
||||
signal_name = "SIGSEGV";
|
||||
break;
|
||||
default:
|
||||
signal_name = nullptr;
|
||||
}
|
||||
|
||||
// Print notice that we received the signal on a given thread.
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
if (signal_name)
|
||||
printf("received %s on thread id: ", signal_name);
|
||||
else
|
||||
printf("received signo %d (%s) on thread id: ", signo, strsignal(signo));
|
||||
print_thread_id();
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
// Reset the signal handler if we're one of the expected signal handlers.
|
||||
switch (signo) {
|
||||
case SIGSEGV:
|
||||
if (g_is_segfaulting) {
|
||||
// Fix up the pointer we're writing to. This needs to happen if nothing
|
||||
// intercepts the SIGSEGV (i.e. if somebody runs this from the command
|
||||
// line).
|
||||
longjmp(g_jump_buffer, 1);
|
||||
}
|
||||
break;
|
||||
case SIGUSR1:
|
||||
if (g_is_segfaulting) {
|
||||
// Fix up the pointer we're writing to. This is used to test gdb remote
|
||||
// signal delivery. A SIGSEGV will be raised when the thread is created,
|
||||
// switched out for a SIGUSR1, and then this code still needs to fix the
|
||||
// seg fault. (i.e. if somebody runs this from the command line).
|
||||
longjmp(g_jump_buffer, 1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Reset the signal handler.
|
||||
sig_t sig_result = signal(signo, signal_handler);
|
||||
if (sig_result == SIG_ERR) {
|
||||
fprintf(stderr, "failed to set signal handler: errno=%d\n", errno);
|
||||
exit(1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static void swap_chars() {
|
||||
g_c1 = '1';
|
||||
g_c2 = '0';
|
||||
|
||||
g_c1 = '0';
|
||||
g_c2 = '1';
|
||||
}
|
||||
|
||||
static void hello() {
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("hello, world\n");
|
||||
}
|
||||
|
||||
static void *thread_func(void *arg) {
|
||||
static std::atomic<int> s_thread_index(1);
|
||||
const int this_thread_index = s_thread_index++;
|
||||
if (g_print_thread_ids) {
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("thread %d id: ", this_thread_index);
|
||||
print_thread_id();
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
if (g_threads_do_segfault) {
|
||||
// Sleep for a number of seconds based on the thread index.
|
||||
// TODO add ability to send commands to test exe so we can
|
||||
// handle timing more precisely. This is clunky. All we're
|
||||
// trying to do is add predictability as to the timing of
|
||||
// signal generation by created threads.
|
||||
int sleep_seconds = 2 * (this_thread_index - 1);
|
||||
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds));
|
||||
|
||||
// Test creating a SEGV.
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(g_jump_buffer_mutex);
|
||||
g_is_segfaulting = true;
|
||||
int *bad_p = nullptr;
|
||||
if (setjmp(g_jump_buffer) == 0) {
|
||||
// Force a seg fault signal on this thread.
|
||||
*bad_p = 0;
|
||||
} else {
|
||||
// Tell the system we're no longer seg faulting.
|
||||
// Used by the SIGUSR1 signal handler that we inject
|
||||
// in place of the SIGSEGV so it only tries to
|
||||
// recover from the SIGSEGV if this seg fault code
|
||||
// was in play.
|
||||
g_is_segfaulting = false;
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("thread ");
|
||||
print_thread_id();
|
||||
printf(": past SIGSEGV\n");
|
||||
}
|
||||
}
|
||||
|
||||
int sleep_seconds_remaining = 60;
|
||||
std::this_thread::sleep_for(std::chrono::seconds(sleep_seconds_remaining));
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
lldb_enable_attach();
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
std::unique_ptr<uint8_t[]> heap_array_up;
|
||||
int return_value = 0;
|
||||
|
||||
#if !defined(_WIN32)
|
||||
// Set the signal handler.
|
||||
sig_t sig_result = signal(SIGALRM, signal_handler);
|
||||
if (sig_result == SIG_ERR) {
|
||||
fprintf(stderr, "failed to set SIGALRM signal handler: errno=%d\n", errno);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sig_result = signal(SIGUSR1, signal_handler);
|
||||
if (sig_result == SIG_ERR) {
|
||||
fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
sig_result = signal(SIGSEGV, signal_handler);
|
||||
if (sig_result == SIG_ERR) {
|
||||
fprintf(stderr, "failed to set SIGUSR1 handler: errno=%d\n", errno);
|
||||
exit(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Process command line args.
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
if (std::strstr(argv[i], STDERR_PREFIX)) {
|
||||
// Treat remainder as text to go to stderr.
|
||||
fprintf(stderr, "%s\n", (argv[i] + strlen(STDERR_PREFIX)));
|
||||
} else if (std::strstr(argv[i], RETVAL_PREFIX)) {
|
||||
// Treat as the return value for the program.
|
||||
return_value = std::atoi(argv[i] + strlen(RETVAL_PREFIX));
|
||||
} else if (std::strstr(argv[i], SLEEP_PREFIX)) {
|
||||
// Treat as the amount of time to have this process sleep (in seconds).
|
||||
int sleep_seconds_remaining = std::atoi(argv[i] + strlen(SLEEP_PREFIX));
|
||||
|
||||
// Loop around, sleeping until all sleep time is used up. Note that
|
||||
// signals will cause sleep to end early with the number of seconds
|
||||
// remaining.
|
||||
std::this_thread::sleep_for(
|
||||
std::chrono::seconds(sleep_seconds_remaining));
|
||||
|
||||
} else if (std::strstr(argv[i], SET_MESSAGE_PREFIX)) {
|
||||
// Copy the contents after "set-message:" to the g_message buffer.
|
||||
// Used for reading inferior memory and verifying contents match
|
||||
// expectations.
|
||||
strncpy(g_message, argv[i] + strlen(SET_MESSAGE_PREFIX),
|
||||
sizeof(g_message));
|
||||
|
||||
// Ensure we're null terminated.
|
||||
g_message[sizeof(g_message) - 1] = '\0';
|
||||
|
||||
} else if (std::strstr(argv[i], PRINT_MESSAGE_COMMAND)) {
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("message: %s\n", g_message);
|
||||
} else if (std::strstr(argv[i], GET_DATA_ADDRESS_PREFIX)) {
|
||||
volatile void *data_p = nullptr;
|
||||
|
||||
if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_message"))
|
||||
data_p = &g_message[0];
|
||||
else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c1"))
|
||||
data_p = &g_c1;
|
||||
else if (std::strstr(argv[i] + strlen(GET_DATA_ADDRESS_PREFIX), "g_c2"))
|
||||
data_p = &g_c2;
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("data address: %p\n", data_p);
|
||||
} else if (std::strstr(argv[i], GET_HEAP_ADDRESS_COMMAND)) {
|
||||
// Create a byte array if not already present.
|
||||
if (!heap_array_up)
|
||||
heap_array_up.reset(new uint8_t[32]);
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("heap address: %p\n", heap_array_up.get());
|
||||
|
||||
} else if (std::strstr(argv[i], GET_STACK_ADDRESS_COMMAND)) {
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("stack address: %p\n", &return_value);
|
||||
} else if (std::strstr(argv[i], GET_CODE_ADDRESS_PREFIX)) {
|
||||
void (*func_p)() = nullptr;
|
||||
|
||||
if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX), "hello"))
|
||||
func_p = hello;
|
||||
else if (std::strstr(argv[i] + strlen(GET_CODE_ADDRESS_PREFIX),
|
||||
"swap_chars"))
|
||||
func_p = swap_chars;
|
||||
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("code address: %p\n", func_p);
|
||||
} else if (std::strstr(argv[i], CALL_FUNCTION_PREFIX)) {
|
||||
void (*func_p)() = nullptr;
|
||||
|
||||
// Defaut to providing the address of main.
|
||||
if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX), "hello") == 0)
|
||||
func_p = hello;
|
||||
else if (std::strcmp(argv[i] + strlen(CALL_FUNCTION_PREFIX),
|
||||
"swap_chars") == 0)
|
||||
func_p = swap_chars;
|
||||
else {
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("unknown function: %s\n",
|
||||
argv[i] + strlen(CALL_FUNCTION_PREFIX));
|
||||
}
|
||||
if (func_p)
|
||||
func_p();
|
||||
} else if (std::strstr(argv[i], THREAD_PREFIX)) {
|
||||
// Check if we're creating a new thread.
|
||||
if (std::strstr(argv[i] + strlen(THREAD_PREFIX), THREAD_COMMAND_NEW)) {
|
||||
threads.push_back(std::thread(thread_func, nullptr));
|
||||
} else if (std::strstr(argv[i] + strlen(THREAD_PREFIX),
|
||||
THREAD_COMMAND_PRINT_IDS)) {
|
||||
// Turn on thread id announcing.
|
||||
g_print_thread_ids = true;
|
||||
|
||||
// And announce us.
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(g_print_mutex);
|
||||
printf("thread 0 id: ");
|
||||
print_thread_id();
|
||||
printf("\n");
|
||||
}
|
||||
} else if (std::strstr(argv[i] + strlen(THREAD_PREFIX),
|
||||
THREAD_COMMAND_SEGFAULT)) {
|
||||
g_threads_do_segfault = true;
|
||||
} else {
|
||||
// At this point we don't do anything else with threads.
|
||||
// Later use thread index and send command to thread.
|
||||
}
|
||||
} else if (std::strstr(argv[i], PRINT_PID_COMMAND)) {
|
||||
print_pid();
|
||||
} else {
|
||||
// Treat the argument as text for stdout.
|
||||
printf("%s\n", argv[i]);
|
||||
}
|
||||
}
|
||||
|
||||
// If we launched any threads, join them
|
||||
for (std::vector<std::thread>::iterator it = threads.begin();
|
||||
it != threads.end(); ++it)
|
||||
it->join();
|
||||
|
||||
return return_value;
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,96 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import time
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestPlatformProcessConnect(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@llgs_test
|
||||
@no_debug_info_test
|
||||
@skipIf(remote=False)
|
||||
@expectedFailureAll(hostoslist=["windows"], triple='.*-android')
|
||||
def test_platform_process_connect(self):
|
||||
self.build()
|
||||
self.init_llgs_test(False)
|
||||
|
||||
working_dir = lldb.remote_platform.GetWorkingDirectory()
|
||||
src = lldb.SBFileSpec(self.getBuildArtifact("a.out"))
|
||||
dest = lldb.SBFileSpec(os.path.join(working_dir, "a.out"))
|
||||
err = lldb.remote_platform.Put(src, dest)
|
||||
if err.Fail():
|
||||
raise RuntimeError(
|
||||
"Unable copy '%s' to '%s'.\n>>> %s" %
|
||||
(f, wd, err.GetCString()))
|
||||
|
||||
m = re.search("^(.*)://([^:/]*)", configuration.lldb_platform_url)
|
||||
protocol = m.group(1)
|
||||
hostname = m.group(2)
|
||||
unix_protocol = protocol.startswith("unix-")
|
||||
if unix_protocol:
|
||||
p = re.search("^(.*)-connect", protocol)
|
||||
path = lldbutil.join_remote_paths(configuration.lldb_platform_working_dir,
|
||||
self.getBuildDirBasename(), "platform-%d.sock" % int(time.time()))
|
||||
listen_url = "%s://%s" % (p.group(1), path)
|
||||
else:
|
||||
listen_url = "*:0"
|
||||
|
||||
port_file = "%s/port" % working_dir
|
||||
commandline_args = [
|
||||
"platform",
|
||||
"--listen",
|
||||
listen_url,
|
||||
"--socket-file",
|
||||
port_file,
|
||||
"--",
|
||||
"%s/a.out" %
|
||||
working_dir,
|
||||
"foo"]
|
||||
self.spawnSubprocess(
|
||||
self.debug_monitor_exe,
|
||||
commandline_args,
|
||||
install_remote=False)
|
||||
self.addTearDownHook(self.cleanupSubprocesses)
|
||||
|
||||
socket_id = lldbutil.wait_for_file_on_target(self, port_file)
|
||||
|
||||
new_debugger = lldb.SBDebugger.Create()
|
||||
new_debugger.SetAsync(False)
|
||||
|
||||
def del_debugger(new_debugger=new_debugger):
|
||||
del new_debugger
|
||||
self.addTearDownHook(del_debugger)
|
||||
|
||||
new_platform = lldb.SBPlatform(lldb.remote_platform.GetName())
|
||||
new_debugger.SetSelectedPlatform(new_platform)
|
||||
new_interpreter = new_debugger.GetCommandInterpreter()
|
||||
|
||||
if unix_protocol:
|
||||
connect_url = "%s://%s%s" % (protocol, hostname, socket_id)
|
||||
else:
|
||||
connect_url = "%s://%s:%s" % (protocol, hostname, socket_id)
|
||||
|
||||
command = "platform connect %s" % (connect_url)
|
||||
result = lldb.SBCommandReturnObject()
|
||||
new_interpreter.HandleCommand(command, result)
|
||||
self.assertTrue(
|
||||
result.Succeeded(),
|
||||
"platform process connect failed: %s" %
|
||||
result.GetOutput())
|
||||
|
||||
target = new_debugger.GetSelectedTarget()
|
||||
process = target.GetProcess()
|
||||
thread = process.GetThreadAtIndex(0)
|
||||
|
||||
breakpoint = target.BreakpointCreateByName("main")
|
||||
process.Continue()
|
||||
|
||||
frame = thread.GetFrameAtIndex(0)
|
||||
self.assertEqual(frame.GetFunction().GetName(), "main")
|
||||
self.assertEqual(frame.FindVariable("argc").GetValueAsSigned(), 2)
|
||||
process.Continue()
|
|
@ -0,0 +1,6 @@
|
|||
#include <cstdio>
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
printf("argc: %d\n", argc);
|
||||
return argv[0][0];
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,153 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import gdbremote_testcase
|
||||
import textwrap
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
def _extract_register_value(reg_info, reg_bank, byte_order, bytes_per_entry=8):
|
||||
reg_offset = int(reg_info["offset"])*2
|
||||
reg_byte_size = int(2 * int(reg_info["bitsize"]) / 8)
|
||||
# Create slice with the contents of the register.
|
||||
reg_slice = reg_bank[reg_offset:reg_offset+reg_byte_size]
|
||||
|
||||
reg_value = []
|
||||
# Wrap slice according to bytes_per_entry.
|
||||
for entry in textwrap.wrap(reg_slice, 2 * bytes_per_entry):
|
||||
# Invert the bytes order if target uses little-endian.
|
||||
if byte_order == lldb.eByteOrderLittle:
|
||||
entry = "".join(reversed([entry[i:i+2] for i in range(0,
|
||||
len(entry),2)]))
|
||||
reg_value.append("0x" + entry)
|
||||
|
||||
return reg_value
|
||||
|
||||
|
||||
class TestGdbRemoteGPacket(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def run_test_g_packet(self):
|
||||
self.build()
|
||||
self.prep_debug_monitor_and_inferior()
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $g#67",
|
||||
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "register_bank"}}],
|
||||
True)
|
||||
self.connect_to_debug_monitor()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
register_bank = context.get("register_bank")
|
||||
self.assertTrue(register_bank[0] != 'E')
|
||||
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $G" + register_bank + "#00",
|
||||
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "G_reply"}}],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertTrue(context.get("G_reply")[0] != 'E')
|
||||
|
||||
@skipIfOutOfTreeDebugserver
|
||||
@debugserver_test
|
||||
@skipIfDarwinEmbedded
|
||||
def test_g_packet_debugserver(self):
|
||||
self.init_debugserver_test()
|
||||
self.run_test_g_packet()
|
||||
|
||||
@skipIf(archs=no_match(["x86_64"]))
|
||||
def g_returns_correct_data(self, with_suffix):
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
|
||||
self.add_register_info_collection_packets()
|
||||
if with_suffix:
|
||||
self.add_thread_suffix_request_packets()
|
||||
self.add_threadinfo_collection_packets()
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Gather register info.
|
||||
reg_infos = self.parse_register_info_packets(context)
|
||||
self.assertIsNotNone(reg_infos)
|
||||
self.add_lldb_register_index(reg_infos)
|
||||
# Index register info entries by name.
|
||||
reg_infos = {info['name']: info for info in reg_infos}
|
||||
|
||||
# Gather thread info.
|
||||
if with_suffix:
|
||||
threads = self.parse_threadinfo_packets(context)
|
||||
self.assertIsNotNone(threads)
|
||||
thread_id = threads[0]
|
||||
self.assertIsNotNone(thread_id)
|
||||
else:
|
||||
thread_id = None
|
||||
|
||||
# Send vCont packet to resume the inferior.
|
||||
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "hex_exit_code"}},
|
||||
],
|
||||
True)
|
||||
|
||||
# Send g packet to retrieve the register bank
|
||||
if thread_id:
|
||||
g_request = "read packet: $g;thread:{:x}#00".format(thread_id)
|
||||
else:
|
||||
g_request = "read packet: $g#00"
|
||||
self.test_sequence.add_log_lines(
|
||||
[g_request,
|
||||
{"direction": "send", "regex": r"^\$(.+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "register_bank"}}],
|
||||
True)
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
reg_bank = context.get("register_bank")
|
||||
self.assertTrue(reg_bank[0] != 'E')
|
||||
|
||||
byte_order = self.get_target_byte_order()
|
||||
get_reg_value = lambda reg_name : _extract_register_value(
|
||||
reg_infos[reg_name], reg_bank, byte_order)
|
||||
|
||||
self.assertEqual(['0x0102030405060708'], get_reg_value('r8'))
|
||||
self.assertEqual(['0x1112131415161718'], get_reg_value('r9'))
|
||||
self.assertEqual(['0x2122232425262728'], get_reg_value('r10'))
|
||||
self.assertEqual(['0x3132333435363738'], get_reg_value('r11'))
|
||||
self.assertEqual(['0x4142434445464748'], get_reg_value('r12'))
|
||||
self.assertEqual(['0x5152535455565758'], get_reg_value('r13'))
|
||||
self.assertEqual(['0x6162636465666768'], get_reg_value('r14'))
|
||||
self.assertEqual(['0x7172737475767778'], get_reg_value('r15'))
|
||||
|
||||
self.assertEqual(
|
||||
['0x020406080a0c0e01', '0x030507090b0d0f00'], get_reg_value('xmm8'))
|
||||
self.assertEqual(
|
||||
['0x121416181a1c1e11', '0x131517191b1d1f10'], get_reg_value('xmm9'))
|
||||
self.assertEqual(
|
||||
['0x222426282a2c2e21', '0x232527292b2d2f20'], get_reg_value('xmm10'))
|
||||
self.assertEqual(
|
||||
['0x323436383a3c3e31', '0x333537393b3d3f30'], get_reg_value('xmm11'))
|
||||
self.assertEqual(
|
||||
['0x424446484a4c4e41', '0x434547494b4d4f40'], get_reg_value('xmm12'))
|
||||
self.assertEqual(
|
||||
['0x525456585a5c5e51', '0x535557595b5d5f50'], get_reg_value('xmm13'))
|
||||
self.assertEqual(
|
||||
['0x626466686a6c6e61', '0x636567696b6d6f60'], get_reg_value('xmm14'))
|
||||
self.assertEqual(
|
||||
['0x727476787a7c7e71', '0x737577797b7d7f70'], get_reg_value('xmm15'))
|
||||
|
||||
@llgs_test
|
||||
def test_g_returns_correct_data_with_suffix_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.g_returns_correct_data(True)
|
||||
|
||||
@llgs_test
|
||||
def test_g_returns_correct_data_no_suffix_llgs(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
self.g_returns_correct_data(False)
|
|
@ -0,0 +1,54 @@
|
|||
#include <cstdint>
|
||||
|
||||
struct alignas(16) xmm_t {
|
||||
uint64_t a, b;
|
||||
};
|
||||
|
||||
int main() {
|
||||
uint64_t r8 = 0x0102030405060708;
|
||||
uint64_t r9 = 0x1112131415161718;
|
||||
uint64_t r10 = 0x2122232425262728;
|
||||
uint64_t r11 = 0x3132333435363738;
|
||||
uint64_t r12 = 0x4142434445464748;
|
||||
uint64_t r13 = 0x5152535455565758;
|
||||
uint64_t r14 = 0x6162636465666768;
|
||||
uint64_t r15 = 0x7172737475767778;
|
||||
|
||||
xmm_t xmm8 = {0x020406080A0C0E01, 0x030507090B0D0F00};
|
||||
xmm_t xmm9 = {0x121416181A1C1E11, 0x131517191B1D1F10};
|
||||
xmm_t xmm10 = {0x222426282A2C2E21, 0x232527292B2D2F20};
|
||||
xmm_t xmm11 = {0x323436383A3C3E31, 0x333537393B3D3F30};
|
||||
xmm_t xmm12 = {0x424446484A4C4E41, 0x434547494B4D4F40};
|
||||
xmm_t xmm13 = {0x525456585A5C5E51, 0x535557595B5D5F50};
|
||||
xmm_t xmm14 = {0x626466686A6C6E61, 0x636567696B6D6F60};
|
||||
xmm_t xmm15 = {0x727476787A7C7E71, 0x737577797B7D7F70};
|
||||
|
||||
asm volatile("movq %0, %%r8\n\t"
|
||||
"movq %1, %%r9\n\t"
|
||||
"movq %2, %%r10\n\t"
|
||||
"movq %3, %%r11\n\t"
|
||||
"movq %4, %%r12\n\t"
|
||||
"movq %5, %%r13\n\t"
|
||||
"movq %6, %%r14\n\t"
|
||||
"movq %7, %%r15\n\t"
|
||||
"\n\t"
|
||||
"movaps %8, %%xmm8\n\t"
|
||||
"movaps %9, %%xmm9\n\t"
|
||||
"movaps %10, %%xmm10\n\t"
|
||||
"movaps %11, %%xmm11\n\t"
|
||||
"movaps %12, %%xmm12\n\t"
|
||||
"movaps %13, %%xmm13\n\t"
|
||||
"movaps %14, %%xmm14\n\t"
|
||||
"movaps %15, %%xmm15\n\t"
|
||||
"\n\t"
|
||||
"int3"
|
||||
:
|
||||
: "g"(r8), "g"(r9), "g"(r10), "g"(r11), "g"(r12), "g"(r13),
|
||||
"g"(r14), "g"(r15), "m"(xmm8), "m"(xmm9), "m"(xmm10),
|
||||
"m"(xmm11), "m"(xmm12), "m"(xmm13), "m"(xmm14), "m"(xmm15)
|
||||
: "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
|
||||
"%xmm8", "%xmm9", "%xmm10", "%xmm11", "%xmm12", "%xmm13",
|
||||
"%xmm14", "%xmm15");
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,115 @@
|
|||
# This test makes sure that lldb-server supports and properly handles
|
||||
# QPassSignals GDB protocol package.
|
||||
from __future__ import print_function
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
class TestGdbRemote_QPassSignals(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def expect_signal(self, expected_signo):
|
||||
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
|
||||
{"direction": "send",
|
||||
"regex": r"^\$T([0-9a-fA-F]{2}).*#[0-9a-fA-F]{2}$",
|
||||
"capture": {1: "hex_exit_code"}},
|
||||
],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
hex_exit_code = context.get("hex_exit_code")
|
||||
self.assertIsNotNone(hex_exit_code)
|
||||
self.assertEqual(int(hex_exit_code, 16), expected_signo)
|
||||
|
||||
def expect_exit_code(self, exit_code):
|
||||
self.test_sequence.add_log_lines(
|
||||
["read packet: $vCont;c#a8",
|
||||
"send packet: $W{0:02x}#00".format(exit_code)],
|
||||
True)
|
||||
self.expect_gdbremote_sequence()
|
||||
|
||||
|
||||
def ignore_signals(self, signals):
|
||||
def signal_name_to_hex(signame):
|
||||
return format(lldbutil.get_signal_number(signame), 'x')
|
||||
signals_str = ";".join(map(signal_name_to_hex, signals))
|
||||
|
||||
self.test_sequence.add_log_lines(["read packet: $QPassSignals:"
|
||||
+ signals_str + " #00",
|
||||
"send packet: $OK#00"],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
@llgs_test
|
||||
@skipUnlessPlatform(["linux", "android"])
|
||||
def test_q_pass_signals(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
expected_signals = ["SIGSEGV",
|
||||
"SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"]
|
||||
signals_to_ignore = ["SIGUSR1", "SIGUSR2"]
|
||||
self.ignore_signals(signals_to_ignore)
|
||||
for signal_name in expected_signals:
|
||||
signo = lldbutil.get_signal_number(signal_name)
|
||||
self.expect_signal(signo)
|
||||
self.expect_exit_code(len(signals_to_ignore))
|
||||
|
||||
@llgs_test
|
||||
@skipUnlessPlatform(["linux", "android"])
|
||||
def test_change_signals_at_runtime(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2",
|
||||
"SIGALRM", "SIGHUP"]
|
||||
signals_to_ignore = ["SIGFPE", "SIGBUS", "SIGINT"]
|
||||
|
||||
for signal_name in expected_signals:
|
||||
signo = lldbutil.get_signal_number(signal_name)
|
||||
self.expect_signal(signo)
|
||||
if signal_name == "SIGALRM":
|
||||
self.ignore_signals(signals_to_ignore)
|
||||
self.expect_exit_code(len(signals_to_ignore))
|
||||
|
||||
@llgs_test
|
||||
def test_default_signals_behavior(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
expected_signals = ["SIGSEGV", "SIGUSR1", "SIGUSR2",
|
||||
"SIGALRM", "SIGFPE", "SIGBUS", "SIGINT", "SIGHUP"]
|
||||
for signal_name in expected_signals:
|
||||
signo = lldbutil.get_signal_number(signal_name)
|
||||
self.expect_signal(signo)
|
||||
self.expect_exit_code(0)
|
||||
|
||||
|
||||
@llgs_test
|
||||
@skipUnlessPlatform(["linux", "android"])
|
||||
def test_support_q_pass_signals(self):
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
|
||||
# Start up the stub and start/prep the inferior.
|
||||
self.set_inferior_startup_launch()
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
self.add_qSupported_packets()
|
||||
|
||||
# Run the packet stream.
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
# Retrieve the qSupported features and check QPassSignals+
|
||||
supported_dict = self.parse_qSupported_response(context)
|
||||
self.assertEqual(supported_dict["QPassSignals"], "+")
|
|
@ -0,0 +1,36 @@
|
|||
//===-- main.cpp ------------------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||
// See https://llvm.org/LICENSE.txt for license information.
|
||||
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <vector>
|
||||
|
||||
static int signal_counter = 0;
|
||||
|
||||
static void count_signal(int signo) {
|
||||
++signal_counter;
|
||||
printf("Signal %d\n", signo);
|
||||
}
|
||||
|
||||
static void raise_signals() {
|
||||
std::vector<int> signals(
|
||||
{SIGSEGV, SIGUSR1, SIGUSR2, SIGALRM, SIGFPE, SIGBUS, SIGINT, SIGHUP});
|
||||
|
||||
for (int signal_num : signals) {
|
||||
signal(signal_num, count_signal);
|
||||
}
|
||||
|
||||
for (int signal_num : signals) {
|
||||
raise(signal_num);
|
||||
}
|
||||
}
|
||||
|
||||
int main() {
|
||||
raise_signals();
|
||||
return signal_counter;
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
|
||||
from __future__ import print_function
|
||||
|
||||
|
||||
import re
|
||||
import select
|
||||
import threading
|
||||
import traceback
|
||||
import codecs
|
||||
|
||||
from six.moves import queue
|
||||
from lldbsuite.support import seven
|
||||
|
||||
|
||||
def _handle_output_packet_string(packet_contents):
|
||||
if (not packet_contents) or (len(packet_contents) < 1):
|
||||
return None
|
||||
elif packet_contents[0] != "O":
|
||||
return None
|
||||
elif packet_contents == "OK":
|
||||
return None
|
||||
else:
|
||||
return seven.unhexlify(packet_contents[1:])
|
||||
|
||||
|
||||
def _dump_queue(the_queue):
|
||||
while not the_queue.empty():
|
||||
print(codecs.encode(the_queue.get(True), "string_escape"))
|
||||
print("\n")
|
||||
|
||||
|
||||
class PumpQueues(object):
|
||||
|
||||
def __init__(self):
|
||||
self._output_queue = queue.Queue()
|
||||
self._packet_queue = queue.Queue()
|
||||
|
||||
def output_queue(self):
|
||||
return self._output_queue
|
||||
|
||||
def packet_queue(self):
|
||||
return self._packet_queue
|
||||
|
||||
def verify_queues_empty(self):
|
||||
# Warn if there is any content left in any of the queues.
|
||||
# That would represent unmatched packets.
|
||||
if not self.output_queue().empty():
|
||||
print("warning: output queue entries still exist:")
|
||||
_dump_queue(self.output_queue())
|
||||
print("from here:")
|
||||
traceback.print_stack()
|
||||
|
||||
if not self.packet_queue().empty():
|
||||
print("warning: packet queue entries still exist:")
|
||||
_dump_queue(self.packet_queue())
|
||||
print("from here:")
|
||||
traceback.print_stack()
|
||||
|
||||
|
||||
class SocketPacketPump(object):
|
||||
"""A threaded packet reader that partitions packets into two streams.
|
||||
|
||||
All incoming $O packet content is accumulated with the current accumulation
|
||||
state put into the OutputQueue.
|
||||
|
||||
All other incoming packets are placed in the packet queue.
|
||||
|
||||
A select thread can be started and stopped, and runs to place packet
|
||||
content into the two queues.
|
||||
"""
|
||||
|
||||
_GDB_REMOTE_PACKET_REGEX = re.compile(r'^\$([^\#]*)#[0-9a-fA-F]{2}')
|
||||
|
||||
def __init__(self, pump_socket, pump_queues, logger=None):
|
||||
if not pump_socket:
|
||||
raise Exception("pump_socket cannot be None")
|
||||
|
||||
self._thread = None
|
||||
self._stop_thread = False
|
||||
self._socket = pump_socket
|
||||
self._logger = logger
|
||||
self._receive_buffer = ""
|
||||
self._accumulated_output = ""
|
||||
self._pump_queues = pump_queues
|
||||
|
||||
def __enter__(self):
|
||||
"""Support the python 'with' statement.
|
||||
|
||||
Start the pump thread."""
|
||||
self.start_pump_thread()
|
||||
return self
|
||||
|
||||
def __exit__(self, exit_type, value, the_traceback):
|
||||
"""Support the python 'with' statement.
|
||||
|
||||
Shut down the pump thread."""
|
||||
self.stop_pump_thread()
|
||||
|
||||
def start_pump_thread(self):
|
||||
if self._thread:
|
||||
raise Exception("pump thread is already running")
|
||||
self._stop_thread = False
|
||||
self._thread = threading.Thread(target=self._run_method)
|
||||
self._thread.start()
|
||||
|
||||
def stop_pump_thread(self):
|
||||
self._stop_thread = True
|
||||
if self._thread:
|
||||
self._thread.join()
|
||||
|
||||
def _process_new_bytes(self, new_bytes):
|
||||
if not new_bytes:
|
||||
return
|
||||
if len(new_bytes) < 1:
|
||||
return
|
||||
|
||||
# Add new bytes to our accumulated unprocessed packet bytes.
|
||||
self._receive_buffer += new_bytes
|
||||
|
||||
# Parse fully-formed packets into individual packets.
|
||||
has_more = len(self._receive_buffer) > 0
|
||||
while has_more:
|
||||
if len(self._receive_buffer) <= 0:
|
||||
has_more = False
|
||||
# handle '+' ack
|
||||
elif self._receive_buffer[0] == "+":
|
||||
self._pump_queues.packet_queue().put("+")
|
||||
self._receive_buffer = self._receive_buffer[1:]
|
||||
if self._logger:
|
||||
self._logger.debug(
|
||||
"parsed packet from stub: +\n" +
|
||||
"new receive_buffer: {}".format(
|
||||
self._receive_buffer))
|
||||
else:
|
||||
packet_match = self._GDB_REMOTE_PACKET_REGEX.match(
|
||||
self._receive_buffer)
|
||||
if packet_match:
|
||||
# Our receive buffer matches a packet at the
|
||||
# start of the receive buffer.
|
||||
new_output_content = _handle_output_packet_string(
|
||||
packet_match.group(1))
|
||||
if new_output_content:
|
||||
# This was an $O packet with new content.
|
||||
self._accumulated_output += new_output_content
|
||||
self._pump_queues.output_queue().put(self._accumulated_output)
|
||||
else:
|
||||
# Any packet other than $O.
|
||||
self._pump_queues.packet_queue().put(packet_match.group(0))
|
||||
|
||||
# Remove the parsed packet from the receive
|
||||
# buffer.
|
||||
self._receive_buffer = self._receive_buffer[
|
||||
len(packet_match.group(0)):]
|
||||
if self._logger:
|
||||
self._logger.debug(
|
||||
"parsed packet from stub: " +
|
||||
packet_match.group(0))
|
||||
self._logger.debug(
|
||||
"new receive_buffer: " +
|
||||
self._receive_buffer)
|
||||
else:
|
||||
# We don't have enough in the receive bufferto make a full
|
||||
# packet. Stop trying until we read more.
|
||||
has_more = False
|
||||
|
||||
def _run_method(self):
|
||||
self._receive_buffer = ""
|
||||
self._accumulated_output = ""
|
||||
|
||||
if self._logger:
|
||||
self._logger.info("socket pump starting")
|
||||
|
||||
# Keep looping around until we're asked to stop the thread.
|
||||
while not self._stop_thread:
|
||||
can_read, _, _ = select.select([self._socket], [], [], 0)
|
||||
if can_read and self._socket in can_read:
|
||||
try:
|
||||
new_bytes = seven.bitcast_to_string(self._socket.recv(4096))
|
||||
if self._logger and new_bytes and len(new_bytes) > 0:
|
||||
self._logger.debug(
|
||||
"pump received bytes: {}".format(new_bytes))
|
||||
except:
|
||||
# Likely a closed socket. Done with the pump thread.
|
||||
if self._logger:
|
||||
self._logger.debug(
|
||||
"socket read failed, stopping pump read thread\n" +
|
||||
traceback.format_exc(3))
|
||||
break
|
||||
self._process_new_bytes(new_bytes)
|
||||
|
||||
if self._logger:
|
||||
self._logger.info("socket pump exiting")
|
||||
|
||||
def get_accumulated_output(self):
|
||||
return self._accumulated_output
|
||||
|
||||
def get_receive_buffer(self):
|
||||
return self._receive_buffer
|
|
@ -0,0 +1,65 @@
|
|||
from __future__ import print_function
|
||||
|
||||
|
||||
import unittest2
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
|
||||
from lldbgdbserverutils import *
|
||||
|
||||
|
||||
class TestLldbGdbServerUtils(unittest2.TestCase):
|
||||
|
||||
def test_entry_exact_payload_match(self):
|
||||
entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a")
|
||||
entry.assert_match(self, "$OK#9a")
|
||||
|
||||
def test_entry_exact_payload_match_ignores_checksum(self):
|
||||
entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a")
|
||||
entry.assert_match(self, "$OK#00")
|
||||
|
||||
def test_entry_creates_context(self):
|
||||
entry = GdbRemoteEntry(is_send_to_remote=False, exact_payload="$OK#9a")
|
||||
context = entry.assert_match(self, "$OK#9a")
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
def test_entry_regex_matches(self):
|
||||
entry = GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"),
|
||||
capture={
|
||||
1: "thread_id"})
|
||||
context = entry.assert_match(self, "$QC980#00")
|
||||
|
||||
def test_entry_regex_saves_match(self):
|
||||
entry = GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
regex=re.compile(r"^\$QC([0-9a-fA-F]+)#"),
|
||||
capture={
|
||||
1: "thread_id"})
|
||||
context = entry.assert_match(self, "$QC980#00")
|
||||
self.assertEqual(context["thread_id"], "980")
|
||||
|
||||
def test_entry_regex_expect_captures_success(self):
|
||||
context = {"thread_id": "980"}
|
||||
entry = GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"),
|
||||
expect_captures={
|
||||
2: "thread_id"})
|
||||
entry.assert_match(self, "$T11thread:980;", context=context)
|
||||
|
||||
def test_entry_regex_expect_captures_raises_on_fail(self):
|
||||
context = {"thread_id": "980"}
|
||||
entry = GdbRemoteEntry(
|
||||
is_send_to_remote=False,
|
||||
regex=re.compile(r"^\$T([0-9a-fA-F]{2})thread:([0-9a-fA-F]+)"),
|
||||
expect_captures={
|
||||
2: "thread_id"})
|
||||
try:
|
||||
entry.assert_match(self, "$T11thread:970;", context=context)
|
||||
self.fail()
|
||||
except AssertionError:
|
||||
# okay
|
||||
return None
|
|
@ -0,0 +1,6 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
ENABLE_THREADS := YES
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,41 @@
|
|||
from __future__ import print_function
|
||||
|
||||
import gdbremote_testcase
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
|
||||
|
||||
class TestGdbRemoteThreadName(gdbremote_testcase.GdbRemoteTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def run_and_check_name(self, expected_name):
|
||||
self.test_sequence.add_log_lines(["read packet: $vCont;c#a8",
|
||||
{"direction": "send",
|
||||
"regex":
|
||||
r"^\$T([0-9a-fA-F]{2})([^#]+)#[0-9a-fA-F]{2}$",
|
||||
"capture": {
|
||||
1: "signal",
|
||||
2: "key_vals_text"}},
|
||||
],
|
||||
True)
|
||||
|
||||
context = self.expect_gdbremote_sequence()
|
||||
self.assertIsNotNone(context)
|
||||
|
||||
sigint = lldbutil.get_signal_number("SIGINT")
|
||||
self.assertEqual(sigint, int(context.get("signal"), 16))
|
||||
kv_dict = self.parse_key_val_dict(context.get("key_vals_text"))
|
||||
self.assertEqual(expected_name, kv_dict.get("name"))
|
||||
|
||||
@llgs_test
|
||||
def test(self):
|
||||
""" Make sure lldb-server can retrieve inferior thread name"""
|
||||
self.init_llgs_test()
|
||||
self.build()
|
||||
self.set_inferior_startup_launch()
|
||||
procs = self.prep_debug_monitor_and_inferior()
|
||||
|
||||
self.run_and_check_name("hello world")
|
||||
self.run_and_check_name("goodbye world")
|
|
@ -0,0 +1,22 @@
|
|||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
|
||||
void set_thread_name(const char *name) {
|
||||
#if defined(__APPLE__)
|
||||
::pthread_setname_np(name);
|
||||
#elif defined(__FreeBSD__)
|
||||
::pthread_set_name_np(::pthread_self(), name);
|
||||
#elif defined(__linux__)
|
||||
::pthread_setname_np(::pthread_self(), name);
|
||||
#elif defined(__NetBSD__)
|
||||
::pthread_setname_np(::pthread_self(), "%s", name);
|
||||
#endif
|
||||
}
|
||||
|
||||
int main() {
|
||||
set_thread_name("hello world");
|
||||
raise(SIGINT);
|
||||
set_thread_name("goodbye world");
|
||||
raise(SIGINT);
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
lldb-vscode
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
C_SOURCES := main.c
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,193 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
def spawn_and_wait(program, delay):
|
||||
if delay:
|
||||
time.sleep(delay)
|
||||
process = subprocess.Popen([program],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
process.wait()
|
||||
|
||||
|
||||
class TestVSCode_attach(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def set_and_hit_breakpoint(self, continueToExit=True):
|
||||
source = 'main.c'
|
||||
breakpoint1_line = line_number(source, '// breakpoint 1')
|
||||
lines = [breakpoint1_line]
|
||||
# Set breakoint in the thread function so we can step the threads
|
||||
breakpoint_ids = self.set_source_breakpoints(source, lines)
|
||||
self.assertEqual(len(breakpoint_ids), len(lines),
|
||||
"expect correct number of breakpoints")
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
if continueToExit:
|
||||
self.continue_to_exit()
|
||||
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@skipIfNetBSD # Hangs on NetBSD as well
|
||||
@no_debug_info_test
|
||||
def test_by_pid(self):
|
||||
'''
|
||||
Tests attaching to a process by process ID.
|
||||
'''
|
||||
self.build_and_create_debug_adaptor()
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.process = subprocess.Popen([program],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
self.attach(pid=self.process.pid)
|
||||
self.set_and_hit_breakpoint(continueToExit=True)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@skipIfNetBSD # Hangs on NetBSD as well
|
||||
@no_debug_info_test
|
||||
def test_by_name(self):
|
||||
'''
|
||||
Tests attaching to a process by process name.
|
||||
'''
|
||||
self.build_and_create_debug_adaptor()
|
||||
orig_program = self.getBuildArtifact("a.out")
|
||||
# Since we are going to attach by process name, we need a unique
|
||||
# process name that has minimal chance to match a process that is
|
||||
# already running. To do this we use tempfile.mktemp() to give us a
|
||||
# full path to a location where we can copy our executable. We then
|
||||
# run this copy to ensure we don't get the error "more that one
|
||||
# process matches 'a.out'".
|
||||
program = tempfile.mktemp()
|
||||
shutil.copyfile(orig_program, program)
|
||||
shutil.copymode(orig_program, program)
|
||||
|
||||
def cleanup():
|
||||
if os.path.exists(program):
|
||||
os.unlink(program)
|
||||
# Execute the cleanup function during test case tear down.
|
||||
self.addTearDownHook(cleanup)
|
||||
|
||||
self.process = subprocess.Popen([program],
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
# Wait for a bit to ensure the process is launched, but not for so long
|
||||
# that the process has already finished by the time we attach.
|
||||
time.sleep(3)
|
||||
self.attach(program=program)
|
||||
self.set_and_hit_breakpoint(continueToExit=True)
|
||||
|
||||
@skipUnlessDarwin
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@skipIfNetBSD # Hangs on NetBSD as well
|
||||
@no_debug_info_test
|
||||
def test_by_name_waitFor(self):
|
||||
'''
|
||||
Tests attaching to a process by process name and waiting for the
|
||||
next instance of a process to be launched, ingoring all current
|
||||
ones.
|
||||
'''
|
||||
self.build_and_create_debug_adaptor()
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.spawn_thread = threading.Thread(target=spawn_and_wait,
|
||||
args=(program, 1.0,))
|
||||
self.spawn_thread.start()
|
||||
self.attach(program=program, waitFor=True)
|
||||
self.set_and_hit_breakpoint(continueToExit=True)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@skipIfNetBSD # Hangs on NetBSD as well
|
||||
@no_debug_info_test
|
||||
def test_commands(self):
|
||||
'''
|
||||
Tests the "initCommands", "preRunCommands", "stopCommands",
|
||||
"exitCommands", and "attachCommands" that can be passed during
|
||||
attach.
|
||||
|
||||
"initCommands" are a list of LLDB commands that get executed
|
||||
before the targt is created.
|
||||
"preRunCommands" are a list of LLDB commands that get executed
|
||||
after the target has been created and before the launch.
|
||||
"stopCommands" are a list of LLDB commands that get executed each
|
||||
time the program stops.
|
||||
"exitCommands" are a list of LLDB commands that get executed when
|
||||
the process exits
|
||||
"attachCommands" are a list of LLDB commands that get executed and
|
||||
must have a valid process in the selected target in LLDB after
|
||||
they are done executing. This allows custom commands to create any
|
||||
kind of debug session.
|
||||
'''
|
||||
self.build_and_create_debug_adaptor()
|
||||
program = self.getBuildArtifact("a.out")
|
||||
# Here we just create a target and launch the process as a way to test
|
||||
# if we are able to use attach commands to create any kind of a target
|
||||
# and use it for debugging
|
||||
attachCommands = [
|
||||
'target create -d "%s"' % (program),
|
||||
'process launch -- arg1'
|
||||
]
|
||||
initCommands = ['target list', 'platform list']
|
||||
preRunCommands = ['image list a.out', 'image dump sections a.out']
|
||||
stopCommands = ['frame variable', 'bt']
|
||||
exitCommands = ['expr 2+3', 'expr 3+4']
|
||||
self.attach(program=program,
|
||||
attachCommands=attachCommands,
|
||||
initCommands=initCommands,
|
||||
preRunCommands=preRunCommands,
|
||||
stopCommands=stopCommands,
|
||||
exitCommands=exitCommands)
|
||||
|
||||
# Get output from the console. This should contain both the
|
||||
# "initCommands" and the "preRunCommands".
|
||||
output = self.get_console()
|
||||
# Verify all "initCommands" were found in console output
|
||||
self.verify_commands('initCommands', output, initCommands)
|
||||
# Verify all "preRunCommands" were found in console output
|
||||
self.verify_commands('preRunCommands', output, preRunCommands)
|
||||
|
||||
functions = ['main']
|
||||
breakpoint_ids = self.set_function_breakpoints(functions)
|
||||
self.assertTrue(len(breakpoint_ids) == len(functions),
|
||||
"expect one breakpoint")
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('stopCommands', output, stopCommands)
|
||||
|
||||
# Continue after launch and hit the "pause()" call and stop the target.
|
||||
# Get output from the console. This should contain both the
|
||||
# "stopCommands" that were run after we stop.
|
||||
self.vscode.request_continue()
|
||||
time.sleep(0.5)
|
||||
self.vscode.request_pause()
|
||||
self.vscode.wait_for_stopped()
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('stopCommands', output, stopCommands)
|
||||
|
||||
# Continue until the program exits
|
||||
self.continue_to_exit()
|
||||
# Get output from the console. This should contain both the
|
||||
# "exitCommands" that were run after the second breakpoint was hit
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('exitCommands', output, exitCommands)
|
|
@ -0,0 +1,11 @@
|
|||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int main(int argc, char const *argv[])
|
||||
{
|
||||
lldb_enable_attach();
|
||||
|
||||
printf("pid = %i\n", getpid());
|
||||
sleep(10);
|
||||
return 0; // breakpoint 1
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,211 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
class TestVSCode_setBreakpoints(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_set_and_clear(self):
|
||||
'''Tests setting and clearing source file and line breakpoints.
|
||||
This packet is a bit tricky on the debug adaptor side since there
|
||||
is no "clearBreakpoints" packet. Source file and line breakpoints
|
||||
are set by sending a "setBreakpoints" packet with a source file
|
||||
specified and zero or more source lines. If breakpoints have been
|
||||
set in the source file before, any exising breakpoints must remain
|
||||
set, and any new breakpoints must be created, and any breakpoints
|
||||
that were in previous requests and are not in the current request
|
||||
must be removed. This function tests this setting and clearing
|
||||
and makes sure things happen correctly. It doesn't test hitting
|
||||
breakpoints and the functionality of each breakpoint, like
|
||||
'conditions' and 'hitCondition' settings.'''
|
||||
source_basename = 'main.cpp'
|
||||
source_path = os.path.join(os.getcwd(), source_basename)
|
||||
first_line = line_number('main.cpp', 'break 12')
|
||||
second_line = line_number('main.cpp', 'break 13')
|
||||
third_line = line_number('main.cpp', 'break 14')
|
||||
lines = [first_line, second_line, third_line]
|
||||
|
||||
# Visual Studio Code Debug Adaptors have no way to specify the file
|
||||
# without launching or attaching to a process, so we must start a
|
||||
# process in order to be able to set breakpoints.
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
|
||||
# Set 3 breakoints and verify that they got set correctly
|
||||
response = self.vscode.request_setBreakpoints(source_path, lines)
|
||||
line_to_id = {}
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
for breakpoint in breakpoints:
|
||||
line = breakpoint['line']
|
||||
# Store the "id" of the breakpoint that was set for later
|
||||
line_to_id[line] = breakpoint['id']
|
||||
self.assertTrue(line in lines, "line expected in lines array")
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint verified")
|
||||
|
||||
# There is no breakpoint delete packet, clients just send another
|
||||
# setBreakpoints packet with the same source file with fewer lines.
|
||||
# Below we remove the second line entry and call the setBreakpoints
|
||||
# function again. We want to verify that any breakpoints that were set
|
||||
# before still have the same "id". This means we didn't clear the
|
||||
# breakpoint and set it again at the same location. We also need to
|
||||
# verify that the second line location was actually removed.
|
||||
lines.remove(second_line)
|
||||
# Set 2 breakoints and verify that the previous breakoints that were
|
||||
# set above are still set.
|
||||
response = self.vscode.request_setBreakpoints(source_path, lines)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
for breakpoint in breakpoints:
|
||||
line = breakpoint['line']
|
||||
# Verify the same breakpoints are still set within LLDB by
|
||||
# making sure the breakpoint ID didn't change
|
||||
self.assertTrue(line_to_id[line] == breakpoint['id'],
|
||||
"verify previous breakpoints stayed the same")
|
||||
self.assertTrue(line in lines, "line expected in lines array")
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
# Now get the full list of breakpoints set in the target and verify
|
||||
# we have only 2 breakpoints set. The response above could have told
|
||||
# us about 2 breakpoints, but we want to make sure we don't have the
|
||||
# third one still set in the target
|
||||
response = self.vscode.request_testGetTargetBreakpoints()
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
for breakpoint in breakpoints:
|
||||
line = breakpoint['line']
|
||||
# Verify the same breakpoints are still set within LLDB by
|
||||
# making sure the breakpoint ID didn't change
|
||||
self.assertTrue(line_to_id[line] == breakpoint['id'],
|
||||
"verify previous breakpoints stayed the same")
|
||||
self.assertTrue(line in lines, "line expected in lines array")
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
# Now clear all breakpoints for the source file by passing down an
|
||||
# empty lines array
|
||||
lines = []
|
||||
response = self.vscode.request_setBreakpoints(source_path, lines)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
|
||||
# Verify with the target that all breakpoints have been cleared
|
||||
response = self.vscode.request_testGetTargetBreakpoints()
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
|
||||
# Now set a breakpoint again in the same source file and verify it
|
||||
# was added.
|
||||
lines = [second_line]
|
||||
response = self.vscode.request_setBreakpoints(source_path, lines)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
for breakpoint in breakpoints:
|
||||
line = breakpoint['line']
|
||||
self.assertTrue(line in lines, "line expected in lines array")
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
# Now get the full list of breakpoints set in the target and verify
|
||||
# we have only 2 breakpoints set. The response above could have told
|
||||
# us about 2 breakpoints, but we want to make sure we don't have the
|
||||
# third one still set in the target
|
||||
response = self.vscode.request_testGetTargetBreakpoints()
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(lines),
|
||||
"expect %u source breakpoints" % (len(lines)))
|
||||
for breakpoint in breakpoints:
|
||||
line = breakpoint['line']
|
||||
self.assertTrue(line in lines, "line expected in lines array")
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_functionality(self):
|
||||
'''Tests hitting breakpoints and the functionality of a single
|
||||
breakpoint, like 'conditions' and 'hitCondition' settings.'''
|
||||
source_basename = 'main.cpp'
|
||||
source_path = os.path.join(os.getcwd(), source_basename)
|
||||
loop_line = line_number('main.cpp', '// break loop')
|
||||
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
# Set a breakpoint at the loop line with no condition and no
|
||||
# hitCondition
|
||||
breakpoint_ids = self.set_source_breakpoints(source_path, [loop_line])
|
||||
self.assertTrue(len(breakpoint_ids) == 1, "expect one breakpoint")
|
||||
self.vscode.request_continue()
|
||||
|
||||
# Verify we hit the breakpoint we just set
|
||||
self.verify_breakpoint_hit(breakpoint_ids)
|
||||
|
||||
# Make sure i is zero at first breakpoint
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 0, 'i != 0 after hitting breakpoint')
|
||||
|
||||
# Update the condition on our breakpoint
|
||||
new_breakpoint_ids = self.set_source_breakpoints(source_path,
|
||||
[loop_line],
|
||||
condition="i==4")
|
||||
self.assertTrue(breakpoint_ids == new_breakpoint_ids,
|
||||
"existing breakpoint should have its condition "
|
||||
"updated")
|
||||
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 4,
|
||||
'i != 4 showing conditional works')
|
||||
|
||||
new_breakpoint_ids = self.set_source_breakpoints(source_path,
|
||||
[loop_line],
|
||||
hitCondition="2")
|
||||
|
||||
self.assertTrue(breakpoint_ids == new_breakpoint_ids,
|
||||
"existing breakpoint should have its condition "
|
||||
"updated")
|
||||
|
||||
# Continue with a hitContidtion of 2 and expect it to skip 1 value
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 6,
|
||||
'i != 6 showing hitCondition works')
|
||||
|
||||
# continue after hitting our hitCondition and make sure it only goes
|
||||
# up by 1
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 7,
|
||||
'i != 7 showing post hitCondition hits every time')
|
|
@ -0,0 +1,52 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
class TestVSCode_setExceptionBreakpoints(
|
||||
lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@expectedFailureNetBSD
|
||||
@no_debug_info_test
|
||||
def test_functionality(self):
|
||||
'''Tests setting and clearing exception breakpoints.
|
||||
This packet is a bit tricky on the debug adaptor side since there
|
||||
is no "clear exception breakpoints" packet. Exception breakpoints
|
||||
are set by sending a "setExceptionBreakpoints" packet with zero or
|
||||
more exception filters. If exception breakpoints have been set
|
||||
before, any exising breakpoints must remain set, and any new
|
||||
breakpoints must be created, and any breakpoints that were in
|
||||
previous requests and are not in the current request must be
|
||||
removed. This exception tests this setting and clearing and makes
|
||||
sure things happen correctly. It doesn't test hitting breakpoints
|
||||
and the functionality of each breakpoint, like 'conditions' and
|
||||
x'hitCondition' settings.
|
||||
'''
|
||||
# Visual Studio Code Debug Adaptors have no way to specify the file
|
||||
# without launching or attaching to a process, so we must start a
|
||||
# process in order to be able to set breakpoints.
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
|
||||
filters = ['cpp_throw', 'cpp_catch']
|
||||
response = self.vscode.request_setExceptionBreakpoints(filters)
|
||||
if response:
|
||||
self.assertTrue(response['success'])
|
||||
|
||||
self.continue_to_exception_breakpoint('C++ Throw')
|
||||
self.continue_to_exception_breakpoint('C++ Catch')
|
|
@ -0,0 +1,166 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
class TestVSCode_setFunctionBreakpoints(
|
||||
lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_set_and_clear(self):
|
||||
'''Tests setting and clearing function breakpoints.
|
||||
This packet is a bit tricky on the debug adaptor side since there
|
||||
is no "clearFunction Breakpoints" packet. Function breakpoints
|
||||
are set by sending a "setFunctionBreakpoints" packet with zero or
|
||||
more function names. If function breakpoints have been set before,
|
||||
any exising breakpoints must remain set, and any new breakpoints
|
||||
must be created, and any breakpoints that were in previous requests
|
||||
and are not in the current request must be removed. This function
|
||||
tests this setting and clearing and makes sure things happen
|
||||
correctly. It doesn't test hitting breakpoints and the functionality
|
||||
of each breakpoint, like 'conditions' and 'hitCondition' settings.
|
||||
'''
|
||||
# Visual Studio Code Debug Adaptors have no way to specify the file
|
||||
# without launching or attaching to a process, so we must start a
|
||||
# process in order to be able to set breakpoints.
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
bp_id_12 = None
|
||||
functions = ['twelve']
|
||||
# Set a function breakpoint at 'twelve'
|
||||
response = self.vscode.request_setFunctionBreakpoints(functions)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
for breakpoint in breakpoints:
|
||||
bp_id_12 = breakpoint['id']
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint verified")
|
||||
|
||||
# Add an extra name and make sure we have two breakpoints after this
|
||||
functions.append('thirteen')
|
||||
response = self.vscode.request_setFunctionBreakpoints(functions)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
for breakpoint in breakpoints:
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint verified")
|
||||
|
||||
# There is no breakpoint delete packet, clients just send another
|
||||
# setFunctionBreakpoints packet with the different function names.
|
||||
functions.remove('thirteen')
|
||||
response = self.vscode.request_setFunctionBreakpoints(functions)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
for breakpoint in breakpoints:
|
||||
bp_id = breakpoint['id']
|
||||
self.assertTrue(bp_id == bp_id_12,
|
||||
'verify "twelve" breakpoint ID is same')
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
# Now get the full list of breakpoints set in the target and verify
|
||||
# we have only 1 breakpoints set. The response above could have told
|
||||
# us about 1 breakpoints, but we want to make sure we don't have the
|
||||
# second one still set in the target
|
||||
response = self.vscode.request_testGetTargetBreakpoints()
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
for breakpoint in breakpoints:
|
||||
bp_id = breakpoint['id']
|
||||
self.assertTrue(bp_id == bp_id_12,
|
||||
'verify "twelve" breakpoint ID is same')
|
||||
self.assertTrue(breakpoint['verified'],
|
||||
"expect breakpoint still verified")
|
||||
|
||||
# Now clear all breakpoints for the source file by passing down an
|
||||
# empty lines array
|
||||
functions = []
|
||||
response = self.vscode.request_setFunctionBreakpoints(functions)
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
|
||||
# Verify with the target that all breakpoints have been cleared
|
||||
response = self.vscode.request_testGetTargetBreakpoints()
|
||||
if response:
|
||||
breakpoints = response['body']['breakpoints']
|
||||
self.assertTrue(len(breakpoints) == len(functions),
|
||||
"expect %u source breakpoints" % (len(functions)))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_functionality(self):
|
||||
'''Tests hitting breakpoints and the functionality of a single
|
||||
breakpoint, like 'conditions' and 'hitCondition' settings.'''
|
||||
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
# Set a breakpoint on "twelve" with no condition and no hitCondition
|
||||
functions = ['twelve']
|
||||
breakpoint_ids = self.set_function_breakpoints(functions)
|
||||
|
||||
self.assertTrue(len(breakpoint_ids) == len(functions),
|
||||
"expect one breakpoint")
|
||||
|
||||
# Verify we hit the breakpoint we just set
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
|
||||
# Make sure i is zero at first breakpoint
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 0, 'i != 0 after hitting breakpoint')
|
||||
|
||||
# Update the condition on our breakpoint
|
||||
new_breakpoint_ids = self.set_function_breakpoints(functions,
|
||||
condition="i==4")
|
||||
self.assertTrue(breakpoint_ids == new_breakpoint_ids,
|
||||
"existing breakpoint should have its condition "
|
||||
"updated")
|
||||
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 4,
|
||||
'i != 4 showing conditional works')
|
||||
new_breakpoint_ids = self.set_function_breakpoints(functions,
|
||||
hitCondition="2")
|
||||
|
||||
self.assertTrue(breakpoint_ids == new_breakpoint_ids,
|
||||
"existing breakpoint should have its condition "
|
||||
"updated")
|
||||
|
||||
# Continue with a hitContidtion of 2 and expect it to skip 1 value
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 6,
|
||||
'i != 6 showing hitCondition works')
|
||||
|
||||
# continue after hitting our hitCondition and make sure it only goes
|
||||
# up by 1
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
i = int(self.vscode.get_local_variable_value('i'))
|
||||
self.assertTrue(i == 7,
|
||||
'i != 7 showing post hitCondition hits every time')
|
|
@ -0,0 +1,27 @@
|
|||
#include <stdio.h>
|
||||
#include <stdexcept>
|
||||
|
||||
int twelve(int i) {
|
||||
return 12 + i; // break 12
|
||||
}
|
||||
|
||||
int thirteen(int i) {
|
||||
return 13 + i; // break 13
|
||||
}
|
||||
|
||||
namespace a {
|
||||
int fourteen(int i) {
|
||||
return 14 + i; // break 14
|
||||
}
|
||||
}
|
||||
int main(int argc, char const *argv[]) {
|
||||
for (int i=0; i<10; ++i) {
|
||||
int x = twelve(i) + thirteen(i) + a::fourteen(i); // break loop
|
||||
}
|
||||
try {
|
||||
throw std::invalid_argument( "throwing exception for testing" );
|
||||
} catch (...) {
|
||||
puts("caught exception...");
|
||||
}
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
C_SOURCES := main.c
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,345 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import pprint
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
class TestVSCode_launch(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_default(self):
|
||||
'''
|
||||
Tests the default launch of a simple program. No arguments,
|
||||
environment, or anything else is specified.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
self.continue_to_exit()
|
||||
# Now get the STDOUT and verify our program argument is correct
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect program output")
|
||||
lines = output.splitlines()
|
||||
self.assertTrue(program in lines[0],
|
||||
"make sure program path is in first argument")
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_stopOnEntry(self):
|
||||
'''
|
||||
Tests the default launch of a simple program that stops at the
|
||||
entry point instead of continuing.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program, stopOnEntry=True)
|
||||
self.set_function_breakpoints(['main'])
|
||||
stopped_events = self.continue_to_next_stop()
|
||||
for stopped_event in stopped_events:
|
||||
if 'body' in stopped_event:
|
||||
body = stopped_event['body']
|
||||
if 'reason' in body:
|
||||
reason = body['reason']
|
||||
self.assertTrue(
|
||||
reason != 'breakpoint',
|
||||
'verify stop isn\'t "main" breakpoint')
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@expectedFailureNetBSD
|
||||
@no_debug_info_test
|
||||
def test_cwd(self):
|
||||
'''
|
||||
Tests the default launch of a simple program with a current working
|
||||
directory.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
program_parent_dir = os.path.split(os.path.split(program)[0])[0]
|
||||
self.build_and_launch(program,
|
||||
cwd=program_parent_dir)
|
||||
self.continue_to_exit()
|
||||
# Now get the STDOUT and verify our program argument is correct
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect program output")
|
||||
lines = output.splitlines()
|
||||
found = False
|
||||
for line in lines:
|
||||
if line.startswith('cwd = \"'):
|
||||
quote_path = '"%s"' % (program_parent_dir)
|
||||
found = True
|
||||
self.assertTrue(quote_path in line,
|
||||
"working directory '%s' not in '%s'" % (
|
||||
program_parent_dir, line))
|
||||
self.assertTrue(found, "verified program working directory")
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@expectedFailureNetBSD
|
||||
@no_debug_info_test
|
||||
def test_debuggerRoot(self):
|
||||
'''
|
||||
Tests the "debuggerRoot" will change the working directory of
|
||||
the lldb-vscode debug adaptor.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
program_parent_dir = os.path.split(os.path.split(program)[0])[0]
|
||||
commands = ['platform shell echo cwd = $PWD']
|
||||
self.build_and_launch(program,
|
||||
debuggerRoot=program_parent_dir,
|
||||
initCommands=commands)
|
||||
output = self.get_console()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect console output")
|
||||
lines = output.splitlines()
|
||||
prefix = 'cwd = '
|
||||
found = False
|
||||
for line in lines:
|
||||
if line.startswith(prefix):
|
||||
found = True
|
||||
self.assertTrue(program_parent_dir == line[len(prefix):],
|
||||
"lldb-vscode working dir '%s' == '%s'" % (
|
||||
program_parent_dir, line[6:]))
|
||||
self.assertTrue(found, "verified lldb-vscode working directory")
|
||||
self.continue_to_exit()
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_sourcePath(self):
|
||||
'''
|
||||
Tests the "sourcePath" will set the target.source-map.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
program_dir = os.path.split(program)[0]
|
||||
self.build_and_launch(program,
|
||||
sourcePath=program_dir)
|
||||
output = self.get_console()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect console output")
|
||||
lines = output.splitlines()
|
||||
prefix = '(lldb) settings set target.source-map "." '
|
||||
found = False
|
||||
for line in lines:
|
||||
if line.startswith(prefix):
|
||||
found = True
|
||||
quoted_path = '"%s"' % (program_dir)
|
||||
self.assertTrue(quoted_path == line[len(prefix):],
|
||||
"lldb-vscode working dir %s == %s" % (
|
||||
quoted_path, line[6:]))
|
||||
self.assertTrue(found, 'found "sourcePath" in console output')
|
||||
self.continue_to_exit()
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_disableSTDIO(self):
|
||||
'''
|
||||
Tests the default launch of a simple program with STDIO disabled.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program,
|
||||
disableSTDIO=True)
|
||||
self.continue_to_exit()
|
||||
# Now get the STDOUT and verify our program argument is correct
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output is None or len(output) == 0,
|
||||
"expect no program output")
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@skipIfLinux # shell argument expansion doesn't seem to work on Linux
|
||||
@expectedFailureNetBSD
|
||||
@no_debug_info_test
|
||||
def test_shellExpandArguments_enabled(self):
|
||||
'''
|
||||
Tests the default launch of a simple program with shell expansion
|
||||
enabled.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
program_dir = os.path.split(program)[0]
|
||||
glob = os.path.join(program_dir, '*.out')
|
||||
self.build_and_launch(program, args=[glob], shellExpandArguments=True)
|
||||
self.continue_to_exit()
|
||||
# Now get the STDOUT and verify our program argument is correct
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect no program output")
|
||||
lines = output.splitlines()
|
||||
for line in lines:
|
||||
quote_path = '"%s"' % (program)
|
||||
if line.startswith("arg[1] ="):
|
||||
self.assertTrue(quote_path in line,
|
||||
'verify "%s" expanded to "%s"' % (
|
||||
glob, program))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_shellExpandArguments_disabled(self):
|
||||
'''
|
||||
Tests the default launch of a simple program with shell expansion
|
||||
disabled.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
program_dir = os.path.split(program)[0]
|
||||
glob = os.path.join(program_dir, '*.out')
|
||||
self.build_and_launch(program,
|
||||
args=[glob],
|
||||
shellExpandArguments=False)
|
||||
self.continue_to_exit()
|
||||
# Now get the STDOUT and verify our program argument is correct
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect no program output")
|
||||
lines = output.splitlines()
|
||||
for line in lines:
|
||||
quote_path = '"%s"' % (glob)
|
||||
if line.startswith("arg[1] ="):
|
||||
self.assertTrue(quote_path in line,
|
||||
'verify "%s" stayed to "%s"' % (
|
||||
glob, glob))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_args(self):
|
||||
'''
|
||||
Tests launch of a simple program with arguments
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
args = ["one", "with space", "'with single quotes'",
|
||||
'"with double quotes"']
|
||||
self.build_and_launch(program,
|
||||
args=args)
|
||||
self.continue_to_exit()
|
||||
|
||||
# Now get the STDOUT and verify our arguments got passed correctly
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect program output")
|
||||
lines = output.splitlines()
|
||||
# Skip the first argument that contains the program name
|
||||
lines.pop(0)
|
||||
# Make sure arguments we specified are correct
|
||||
for (i, arg) in enumerate(args):
|
||||
quoted_arg = '"%s"' % (arg)
|
||||
self.assertTrue(quoted_arg in lines[i],
|
||||
'arg[%i] "%s" not in "%s"' % (i+1, quoted_arg, lines[i]))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_environment(self):
|
||||
'''
|
||||
Tests launch of a simple program with environment variables
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
env = ["NO_VALUE", "WITH_VALUE=BAR", "EMPTY_VALUE=",
|
||||
"SPACE=Hello World"]
|
||||
self.build_and_launch(program,
|
||||
env=env)
|
||||
self.continue_to_exit()
|
||||
|
||||
# Now get the STDOUT and verify our arguments got passed correctly
|
||||
output = self.get_stdout()
|
||||
self.assertTrue(output and len(output) > 0,
|
||||
"expect program output")
|
||||
lines = output.splitlines()
|
||||
# Skip the all arguments so we have only environment vars left
|
||||
while len(lines) and lines[0].startswith("arg["):
|
||||
lines.pop(0)
|
||||
# Make sure each environment variable in "env" is actually set in the
|
||||
# program environment that was printed to STDOUT
|
||||
for var in env:
|
||||
found = False
|
||||
for program_var in lines:
|
||||
if var in program_var:
|
||||
found = True
|
||||
break
|
||||
self.assertTrue(found,
|
||||
'"%s" must exist in program environment (%s)' % (
|
||||
var, lines))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_commands(self):
|
||||
'''
|
||||
Tests the "initCommands", "preRunCommands", "stopCommands" and
|
||||
"exitCommands" that can be passed during launch.
|
||||
|
||||
"initCommands" are a list of LLDB commands that get executed
|
||||
before the targt is created.
|
||||
"preRunCommands" are a list of LLDB commands that get executed
|
||||
after the target has been created and before the launch.
|
||||
"stopCommands" are a list of LLDB commands that get executed each
|
||||
time the program stops.
|
||||
"exitCommands" are a list of LLDB commands that get executed when
|
||||
the process exits
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
initCommands = ['target list', 'platform list']
|
||||
preRunCommands = ['image list a.out', 'image dump sections a.out']
|
||||
stopCommands = ['frame variable', 'bt']
|
||||
exitCommands = ['expr 2+3', 'expr 3+4']
|
||||
self.build_and_launch(program,
|
||||
initCommands=initCommands,
|
||||
preRunCommands=preRunCommands,
|
||||
stopCommands=stopCommands,
|
||||
exitCommands=exitCommands)
|
||||
|
||||
# Get output from the console. This should contain both the
|
||||
# "initCommands" and the "preRunCommands".
|
||||
output = self.get_console()
|
||||
# Verify all "initCommands" were found in console output
|
||||
self.verify_commands('initCommands', output, initCommands)
|
||||
# Verify all "preRunCommands" were found in console output
|
||||
self.verify_commands('preRunCommands', output, preRunCommands)
|
||||
|
||||
source = 'main.c'
|
||||
first_line = line_number(source, '// breakpoint 1')
|
||||
second_line = line_number(source, '// breakpoint 2')
|
||||
lines = [first_line, second_line]
|
||||
|
||||
# Set 2 breakoints so we can verify that "stopCommands" get run as the
|
||||
# breakpoints get hit
|
||||
breakpoint_ids = self.set_source_breakpoints(source, lines)
|
||||
self.assertTrue(len(breakpoint_ids) == len(lines),
|
||||
"expect correct number of breakpoints")
|
||||
|
||||
# Continue after launch and hit the first breakpoint.
|
||||
# Get output from the console. This should contain both the
|
||||
# "stopCommands" that were run after the first breakpoint was hit
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('stopCommands', output, stopCommands)
|
||||
|
||||
# Continue again and hit the second breakpoint.
|
||||
# Get output from the console. This should contain both the
|
||||
# "stopCommands" that were run after the second breakpoint was hit
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('stopCommands', output, stopCommands)
|
||||
|
||||
# Continue until the program exits
|
||||
self.continue_to_exit()
|
||||
# Get output from the console. This should contain both the
|
||||
# "exitCommands" that were run after the second breakpoint was hit
|
||||
output = self.get_console(timeout=1.0)
|
||||
self.verify_commands('exitCommands', output, exitCommands)
|
|
@ -0,0 +1,15 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int main(int argc, char const *argv[], char const *envp[]) {
|
||||
for (int i=0; i<argc; ++i)
|
||||
printf("arg[%i] = \"%s\"\n", i, argv[i]);
|
||||
for (int i=0; envp[i]; ++i)
|
||||
printf("env[%i] = \"%s\"\n", i, envp[i]);
|
||||
char *cwd = getcwd(NULL, 0);
|
||||
printf("cwd = \"%s\"\n", cwd); // breakpoint 1
|
||||
free(cwd);
|
||||
cwd = NULL;
|
||||
return 0; // breakpoint 2
|
||||
}
|
|
@ -0,0 +1,288 @@
|
|||
from __future__ import print_function
|
||||
|
||||
from lldbsuite.test.lldbtest import *
|
||||
import os
|
||||
import vscode
|
||||
|
||||
|
||||
class VSCodeTestCaseBase(TestBase):
|
||||
|
||||
def create_debug_adaptor(self):
|
||||
'''Create the Visual Studio Code debug adaptor'''
|
||||
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
|
||||
'lldb-vscode must exist')
|
||||
self.vscode = vscode.DebugAdaptor(executable=self.lldbVSCodeExec)
|
||||
|
||||
def build_and_create_debug_adaptor(self):
|
||||
self.build()
|
||||
self.create_debug_adaptor()
|
||||
|
||||
def set_source_breakpoints(self, source_path, lines, condition=None,
|
||||
hitCondition=None):
|
||||
'''Sets source breakpoints and returns an array of strings containing
|
||||
the breakpoint location IDs ("1.1", "1.2") for each breakpoint
|
||||
that was set.
|
||||
'''
|
||||
response = self.vscode.request_setBreakpoints(
|
||||
source_path, lines, condition=condition, hitCondition=hitCondition)
|
||||
if response is None:
|
||||
return []
|
||||
breakpoints = response['body']['breakpoints']
|
||||
breakpoint_ids = []
|
||||
for breakpoint in breakpoints:
|
||||
response_id = breakpoint['id']
|
||||
bp_id = response_id >> 32
|
||||
bp_loc_id = response_id & 0xffffffff
|
||||
breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id))
|
||||
return breakpoint_ids
|
||||
|
||||
def set_function_breakpoints(self, functions, condition=None,
|
||||
hitCondition=None):
|
||||
'''Sets breakpoints by function name given an array of function names
|
||||
and returns an array of strings containing the breakpoint location
|
||||
IDs ("1.1", "1.2") for each breakpoint that was set.
|
||||
'''
|
||||
response = self.vscode.request_setFunctionBreakpoints(
|
||||
functions, condition=condition, hitCondition=hitCondition)
|
||||
if response is None:
|
||||
return []
|
||||
breakpoints = response['body']['breakpoints']
|
||||
breakpoint_ids = []
|
||||
for breakpoint in breakpoints:
|
||||
response_id = breakpoint['id']
|
||||
bp_id = response_id >> 32
|
||||
bp_loc_id = response_id & 0xffffffff
|
||||
breakpoint_ids.append('%i.%i' % (bp_id, bp_loc_id))
|
||||
return breakpoint_ids
|
||||
|
||||
def verify_breakpoint_hit(self, breakpoint_ids):
|
||||
'''Wait for the process we are debugging to stop, and verify we hit
|
||||
any breakpoint location in the "breakpoint_ids" array.
|
||||
"breakpoint_ids" should be a list of breakpoint location ID strings
|
||||
(["1.1", "2.1"]). The return value from
|
||||
self.set_source_breakpoints() can be passed to this function'''
|
||||
stopped_events = self.vscode.wait_for_stopped()
|
||||
for stopped_event in stopped_events:
|
||||
if 'body' in stopped_event:
|
||||
body = stopped_event['body']
|
||||
if 'reason' not in body:
|
||||
continue
|
||||
if body['reason'] != 'breakpoint':
|
||||
continue
|
||||
if 'description' not in body:
|
||||
continue
|
||||
# Description is "breakpoint 1.1", so look for any location id
|
||||
# ("1.1") in the description field as verification that one of
|
||||
# the breakpoint locations was hit
|
||||
description = body['description']
|
||||
for breakpoint_id in breakpoint_ids:
|
||||
if breakpoint_id in description:
|
||||
return True
|
||||
return False
|
||||
|
||||
def verify_exception_breakpoint_hit(self, filter_label):
|
||||
'''Wait for the process we are debugging to stop, and verify the stop
|
||||
reason is 'exception' and that the description matches
|
||||
'filter_label'
|
||||
'''
|
||||
stopped_events = self.vscode.wait_for_stopped()
|
||||
for stopped_event in stopped_events:
|
||||
if 'body' in stopped_event:
|
||||
body = stopped_event['body']
|
||||
if 'reason' not in body:
|
||||
continue
|
||||
if body['reason'] != 'exception':
|
||||
continue
|
||||
if 'description' not in body:
|
||||
continue
|
||||
description = body['description']
|
||||
if filter_label == description:
|
||||
return True
|
||||
return False
|
||||
|
||||
def verify_commands(self, flavor, output, commands):
|
||||
self.assertTrue(output and len(output) > 0, "expect console output")
|
||||
lines = output.splitlines()
|
||||
prefix = '(lldb) '
|
||||
for cmd in commands:
|
||||
found = False
|
||||
for line in lines:
|
||||
if line.startswith(prefix) and cmd in line:
|
||||
found = True
|
||||
break
|
||||
self.assertTrue(found,
|
||||
"verify '%s' found in console output for '%s'" % (
|
||||
cmd, flavor))
|
||||
|
||||
def get_dict_value(self, d, key_path):
|
||||
'''Verify each key in the key_path array is in contained in each
|
||||
dictionary within "d". Assert if any key isn't in the
|
||||
corresponding dictionary. This is handy for grabbing values from VS
|
||||
Code response dictionary like getting
|
||||
response['body']['stackFrames']
|
||||
'''
|
||||
value = d
|
||||
for key in key_path:
|
||||
if key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
self.assertTrue(key in value,
|
||||
'key "%s" from key_path "%s" not in "%s"' % (
|
||||
key, key_path, d))
|
||||
return value
|
||||
|
||||
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
|
||||
dump=False):
|
||||
response = self.vscode.request_stackTrace(threadId=threadId,
|
||||
startFrame=startFrame,
|
||||
levels=levels,
|
||||
dump=dump)
|
||||
if response:
|
||||
return self.get_dict_value(response, ['body', 'stackFrames'])
|
||||
return None
|
||||
|
||||
def get_source_and_line(self, threadId=None, frameIndex=0):
|
||||
stackFrames = self.get_stackFrames(threadId=threadId,
|
||||
startFrame=frameIndex,
|
||||
levels=1)
|
||||
if stackFrames is not None:
|
||||
stackFrame = stackFrames[0]
|
||||
['source', 'path']
|
||||
if 'source' in stackFrame:
|
||||
source = stackFrame['source']
|
||||
if 'path' in source:
|
||||
if 'line' in stackFrame:
|
||||
return (source['path'], stackFrame['line'])
|
||||
return ('', 0)
|
||||
|
||||
def get_stdout(self, timeout=0.0):
|
||||
return self.vscode.get_output('stdout', timeout=timeout)
|
||||
|
||||
def get_console(self, timeout=0.0):
|
||||
return self.vscode.get_output('console', timeout=timeout)
|
||||
|
||||
def get_local_as_int(self, name, threadId=None):
|
||||
value = self.vscode.get_local_variable_value(name, threadId=threadId)
|
||||
if value.startswith('0x'):
|
||||
return int(value, 16)
|
||||
elif value.startswith('0'):
|
||||
return int(value, 8)
|
||||
else:
|
||||
return int(value)
|
||||
|
||||
def set_local(self, name, value, id=None):
|
||||
'''Set a top level local variable only.'''
|
||||
return self.vscode.request_setVariable(1, name, str(value), id=id)
|
||||
|
||||
def set_global(self, name, value, id=None):
|
||||
'''Set a top level global variable only.'''
|
||||
return self.vscode.request_setVariable(2, name, str(value), id=id)
|
||||
|
||||
def stepIn(self, threadId=None, waitForStop=True):
|
||||
self.vscode.request_stepIn(threadId=threadId)
|
||||
if waitForStop:
|
||||
return self.vscode.wait_for_stopped()
|
||||
return None
|
||||
|
||||
def stepOver(self, threadId=None, waitForStop=True):
|
||||
self.vscode.request_next(threadId=threadId)
|
||||
if waitForStop:
|
||||
return self.vscode.wait_for_stopped()
|
||||
return None
|
||||
|
||||
def stepOut(self, threadId=None, waitForStop=True):
|
||||
self.vscode.request_stepOut(threadId=threadId)
|
||||
if waitForStop:
|
||||
return self.vscode.wait_for_stopped()
|
||||
return None
|
||||
|
||||
def continue_to_next_stop(self):
|
||||
self.vscode.request_continue()
|
||||
return self.vscode.wait_for_stopped()
|
||||
|
||||
def continue_to_breakpoints(self, breakpoint_ids):
|
||||
self.vscode.request_continue()
|
||||
self.verify_breakpoint_hit(breakpoint_ids)
|
||||
|
||||
def continue_to_exception_breakpoint(self, filter_label):
|
||||
self.vscode.request_continue()
|
||||
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
|
||||
'verify we got "%s"' % (filter_label))
|
||||
|
||||
def continue_to_exit(self, exitCode=0):
|
||||
self.vscode.request_continue()
|
||||
stopped_events = self.vscode.wait_for_stopped()
|
||||
self.assertTrue(len(stopped_events) == 1,
|
||||
"expecting single 'exited' event")
|
||||
self.assertTrue(stopped_events[0]['event'] == 'exited',
|
||||
'make sure program ran to completion')
|
||||
self.assertTrue(stopped_events[0]['body']['exitCode'] == exitCode,
|
||||
'exitCode == %i' % (exitCode))
|
||||
|
||||
def attach(self, program=None, pid=None, waitFor=None, trace=None,
|
||||
initCommands=None, preRunCommands=None, stopCommands=None,
|
||||
exitCommands=None, attachCommands=None):
|
||||
'''Build the default Makefile target, create the VSCode debug adaptor,
|
||||
and attach to the process.
|
||||
'''
|
||||
# Make sure we disconnect and terminate the VSCode debug adaptor even
|
||||
# if we throw an exception during the test case.
|
||||
def cleanup():
|
||||
self.vscode.request_disconnect(terminateDebuggee=True)
|
||||
self.vscode.terminate()
|
||||
|
||||
# Execute the cleanup function during test case tear down.
|
||||
self.addTearDownHook(cleanup)
|
||||
# Initialize and launch the program
|
||||
self.vscode.request_initialize()
|
||||
response = self.vscode.request_attach(
|
||||
program=program, pid=pid, waitFor=waitFor, trace=trace,
|
||||
initCommands=initCommands, preRunCommands=preRunCommands,
|
||||
stopCommands=stopCommands, exitCommands=exitCommands,
|
||||
attachCommands=attachCommands)
|
||||
if not (response and response['success']):
|
||||
self.assertTrue(response['success'],
|
||||
'attach failed (%s)' % (response['message']))
|
||||
|
||||
def build_and_launch(self, program, args=None, cwd=None, env=None,
|
||||
stopOnEntry=False, disableASLR=True,
|
||||
disableSTDIO=False, shellExpandArguments=False,
|
||||
trace=False, initCommands=None, preRunCommands=None,
|
||||
stopCommands=None, exitCommands=None,
|
||||
sourcePath=None, debuggerRoot=None):
|
||||
'''Build the default Makefile target, create the VSCode debug adaptor,
|
||||
and launch the process.
|
||||
'''
|
||||
self.build_and_create_debug_adaptor()
|
||||
self.assertTrue(os.path.exists(program), 'executable must exist')
|
||||
|
||||
# Make sure we disconnect and terminate the VSCode debug adaptor even
|
||||
# if we throw an exception during the test case.
|
||||
def cleanup():
|
||||
self.vscode.request_disconnect(terminateDebuggee=True)
|
||||
self.vscode.terminate()
|
||||
|
||||
# Execute the cleanup function during test case tear down.
|
||||
self.addTearDownHook(cleanup)
|
||||
|
||||
# Initialize and launch the program
|
||||
self.vscode.request_initialize()
|
||||
response = self.vscode.request_launch(
|
||||
program,
|
||||
args=args,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
stopOnEntry=stopOnEntry,
|
||||
disableASLR=disableASLR,
|
||||
disableSTDIO=disableSTDIO,
|
||||
shellExpandArguments=shellExpandArguments,
|
||||
trace=trace,
|
||||
initCommands=initCommands,
|
||||
preRunCommands=preRunCommands,
|
||||
stopCommands=stopCommands,
|
||||
exitCommands=exitCommands,
|
||||
sourcePath=sourcePath,
|
||||
debuggerRoot=debuggerRoot)
|
||||
if not (response and response['success']):
|
||||
self.assertTrue(response['success'],
|
||||
'launch failed (%s)' % (response['message']))
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
C_SOURCES := main.c
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,160 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
class TestVSCode_stackTrace(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
name_key_path = ['name']
|
||||
source_key_path = ['source', 'path']
|
||||
line_key_path = ['line']
|
||||
|
||||
def verify_stackFrames(self, start_idx, stackFrames):
|
||||
frame_idx = start_idx
|
||||
for stackFrame in stackFrames:
|
||||
# Don't care about frame above main
|
||||
if frame_idx > 20:
|
||||
return
|
||||
self.verify_stackFrame(frame_idx, stackFrame)
|
||||
frame_idx += 1
|
||||
|
||||
def verify_stackFrame(self, frame_idx, stackFrame):
|
||||
frame_name = self.get_dict_value(stackFrame, self.name_key_path)
|
||||
frame_source = self.get_dict_value(stackFrame, self.source_key_path)
|
||||
frame_line = self.get_dict_value(stackFrame, self.line_key_path)
|
||||
if frame_idx == 0:
|
||||
expected_line = self.recurse_end
|
||||
expected_name = 'recurse'
|
||||
elif frame_idx < 20:
|
||||
expected_line = self.recurse_call
|
||||
expected_name = 'recurse'
|
||||
else:
|
||||
expected_line = self.recurse_invocation
|
||||
expected_name = 'main'
|
||||
self.assertTrue(frame_name == expected_name,
|
||||
'frame #%i name "%s" == "%s"' % (
|
||||
frame_idx, frame_name, expected_name))
|
||||
self.assertTrue(frame_source == self.source_path,
|
||||
'frame #%i source "%s" == "%s"' % (
|
||||
frame_idx, frame_source, self.source_path))
|
||||
self.assertTrue(frame_line == expected_line,
|
||||
'frame #%i line %i == %i' % (frame_idx, frame_line,
|
||||
expected_line))
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_stackTrace(self):
|
||||
'''
|
||||
Tests the 'stackTrace' packet and all its variants.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
source = 'main.c'
|
||||
self.source_path = os.path.join(os.getcwd(), source)
|
||||
self.recurse_end = line_number(source, 'recurse end')
|
||||
self.recurse_call = line_number(source, 'recurse call')
|
||||
self.recurse_invocation = line_number(source, 'recurse invocation')
|
||||
|
||||
lines = [self.recurse_end]
|
||||
|
||||
# Set breakoint at a point of deepest recuusion
|
||||
breakpoint_ids = self.set_source_breakpoints(source, lines)
|
||||
self.assertTrue(len(breakpoint_ids) == len(lines),
|
||||
"expect correct number of breakpoints")
|
||||
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
startFrame = 0
|
||||
# Verify we get all stack frames with no arguments
|
||||
stackFrames = self.get_stackFrames()
|
||||
frameCount = len(stackFrames)
|
||||
self.assertTrue(frameCount >= 20,
|
||||
'verify we get at least 20 frames for all frames')
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Verify all stack frames by specifying startFrame = 0 and levels not
|
||||
# specified
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame)
|
||||
self.assertTrue(frameCount == len(stackFrames),
|
||||
('verify same number of frames with startFrame=%i') % (
|
||||
startFrame))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Verify all stack frames by specifying startFrame = 0 and levels = 0
|
||||
levels = 0
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(frameCount == len(stackFrames),
|
||||
('verify same number of frames with startFrame=%i and'
|
||||
' levels=%i') % (startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Get only the first stack frame by sepcifying startFrame = 0 and
|
||||
# levels = 1
|
||||
levels = 1
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(levels == len(stackFrames),
|
||||
('verify one frame with startFrame=%i and'
|
||||
' levels=%i') % (startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Get only the first 3 stack frames by sepcifying startFrame = 0 and
|
||||
# levels = 3
|
||||
levels = 3
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(levels == len(stackFrames),
|
||||
('verify %i frames with startFrame=%i and'
|
||||
' levels=%i') % (levels, startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Get only the first 15 stack frames by sepcifying startFrame = 5 and
|
||||
# levels = 16
|
||||
startFrame = 5
|
||||
levels = 16
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(levels == len(stackFrames),
|
||||
('verify %i frames with startFrame=%i and'
|
||||
' levels=%i') % (levels, startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Verify we cap things correctly when we ask for too many frames
|
||||
startFrame = 5
|
||||
levels = 1000
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(len(stackFrames) == frameCount - startFrame,
|
||||
('verify less than 1000 frames with startFrame=%i and'
|
||||
' levels=%i') % (startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Verify level=0 works with non-zerp start frame
|
||||
startFrame = 5
|
||||
levels = 0
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(len(stackFrames) == frameCount - startFrame,
|
||||
('verify less than 1000 frames with startFrame=%i and'
|
||||
' levels=%i') % (startFrame, levels))
|
||||
self.verify_stackFrames(startFrame, stackFrames)
|
||||
|
||||
# Verify we get not frames when startFrame is too high
|
||||
startFrame = 1000
|
||||
levels = 1
|
||||
stackFrames = self.get_stackFrames(startFrame=startFrame,
|
||||
levels=levels)
|
||||
self.assertTrue(0 == len(stackFrames),
|
||||
'verify zero frames with startFrame out of bounds')
|
|
@ -0,0 +1,13 @@
|
|||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
|
||||
int recurse(int x) {
|
||||
if (x <= 1)
|
||||
return 1; // recurse end
|
||||
return recurse(x-1) + x; // recurse call
|
||||
}
|
||||
|
||||
int main(int argc, char const *argv[]) {
|
||||
recurse(20); // recurse invocation
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
ENABLE_THREADS := YES
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,79 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
class TestVSCode_step(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_step(self):
|
||||
'''
|
||||
Tests the stepping in/out/over in threads.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
source = 'main.cpp'
|
||||
# source_path = os.path.join(os.getcwd(), source)
|
||||
breakpoint1_line = line_number(source, '// breakpoint 1')
|
||||
lines = [breakpoint1_line]
|
||||
# Set breakoint in the thread function so we can step the threads
|
||||
breakpoint_ids = self.set_source_breakpoints(source, lines)
|
||||
self.assertEqual(len(breakpoint_ids), len(lines),
|
||||
"expect correct number of breakpoints")
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
threads = self.vscode.get_threads()
|
||||
for thread in threads:
|
||||
if 'reason' in thread:
|
||||
reason = thread['reason']
|
||||
if reason == 'breakpoint':
|
||||
# We have a thread that is stopped at our breakpoint.
|
||||
# Get the value of "x" and get the source file and line.
|
||||
# These will help us determine if we are stepping
|
||||
# correctly. If we step a thread correctly we will verify
|
||||
# the correct falue for x as it progresses through the
|
||||
# program.
|
||||
tid = thread['id']
|
||||
x1 = self.get_local_as_int('x', threadId=tid)
|
||||
(src1, line1) = self.get_source_and_line(threadId=tid)
|
||||
|
||||
# Now step into the "recurse()" function call again and
|
||||
# verify, using the new value of "x" and the source file
|
||||
# and line if we stepped correctly
|
||||
self.stepIn(threadId=tid, waitForStop=True)
|
||||
x2 = self.get_local_as_int('x', threadId=tid)
|
||||
(src2, line2) = self.get_source_and_line(threadId=tid)
|
||||
self.assertEqual(x1, x2 + 1, 'verify step in variable')
|
||||
self.assertLess(line2, line1, 'verify step in line')
|
||||
self.assertEqual(src1, src2, 'verify step in source')
|
||||
|
||||
# Now step out and verify
|
||||
self.stepOut(threadId=tid, waitForStop=True)
|
||||
x3 = self.get_local_as_int('x', threadId=tid)
|
||||
(src3, line3) = self.get_source_and_line(threadId=tid)
|
||||
self.assertEqual(x1, x3, 'verify step out variable')
|
||||
self.assertGreaterEqual(line3, line1, 'verify step out line')
|
||||
self.assertEqual(src1, src3, 'verify step in source')
|
||||
|
||||
# Step over and verify
|
||||
self.stepOver(threadId=tid, waitForStop=True)
|
||||
x4 = self.get_local_as_int('x', threadId=tid)
|
||||
(src4, line4) = self.get_source_and_line(threadId=tid)
|
||||
self.assertEqual(x4, x3, 'verify step over variable')
|
||||
self.assertGreater(line4, line3, 'verify step over line')
|
||||
self.assertEqual(src1, src4, 'verify step over source')
|
||||
# only step one thread that is at the breakpoint and stop
|
||||
break
|
|
@ -0,0 +1,10 @@
|
|||
int function(int x) {
|
||||
if ((x % 2) == 0)
|
||||
return function(x-1) + x; // breakpoint 1
|
||||
else
|
||||
return x;
|
||||
}
|
||||
|
||||
int main(int argc, char const *argv[]) {
|
||||
return function(2);
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
LEVEL = ../../../make
|
||||
|
||||
CXX_SOURCES := main.cpp
|
||||
|
||||
include $(LEVEL)/Makefile.rules
|
|
@ -0,0 +1,225 @@
|
|||
"""
|
||||
Test lldb-vscode setBreakpoints request
|
||||
"""
|
||||
|
||||
from __future__ import print_function
|
||||
|
||||
import unittest2
|
||||
import vscode
|
||||
from lldbsuite.test.decorators import *
|
||||
from lldbsuite.test.lldbtest import *
|
||||
from lldbsuite.test import lldbutil
|
||||
import lldbvscode_testcase
|
||||
import os
|
||||
|
||||
|
||||
def make_buffer_verify_dict(start_idx, count, offset=0):
|
||||
verify_dict = {}
|
||||
for i in range(start_idx, start_idx + count):
|
||||
verify_dict['[%i]' % (i)] = {'type': 'int', 'value': str(i+offset)}
|
||||
return verify_dict
|
||||
|
||||
|
||||
class TestVSCode_variables(lldbvscode_testcase.VSCodeTestCaseBase):
|
||||
|
||||
mydir = TestBase.compute_mydir(__file__)
|
||||
|
||||
def verify_values(self, verify_dict, actual, varref_dict=None):
|
||||
if 'equals' in verify_dict:
|
||||
verify = verify_dict['equals']
|
||||
for key in verify:
|
||||
verify_value = verify[key]
|
||||
actual_value = actual[key]
|
||||
self.assertTrue(verify_value == actual_value,
|
||||
'"%s" keys don\'t match (%s != %s)' % (
|
||||
key, actual_value, verify_value))
|
||||
if 'startswith' in verify_dict:
|
||||
verify = verify_dict['startswith']
|
||||
for key in verify:
|
||||
verify_value = verify[key]
|
||||
actual_value = actual[key]
|
||||
startswith = actual_value.startswith(verify_value)
|
||||
self.assertTrue(startswith,
|
||||
('"%s" value "%s" doesn\'t start with'
|
||||
' "%s")') % (
|
||||
key, actual_value,
|
||||
verify_value))
|
||||
hasVariablesReference = 'variablesReference' in actual
|
||||
varRef = None
|
||||
if hasVariablesReference:
|
||||
# Remember variable references in case we want to test further
|
||||
# by using the evaluate name.
|
||||
varRef = actual['variablesReference']
|
||||
if varRef != 0 and varref_dict is not None:
|
||||
varref_dict[actual['evaluateName']] = varRef
|
||||
if ('hasVariablesReference' in verify_dict and
|
||||
verify_dict['hasVariablesReference']):
|
||||
self.assertTrue(hasVariablesReference,
|
||||
"verify variable reference")
|
||||
if 'children' in verify_dict:
|
||||
self.assertTrue(hasVariablesReference and varRef is not None and
|
||||
varRef != 0,
|
||||
("children verify values specified for "
|
||||
"variable without children"))
|
||||
|
||||
response = self.vscode.request_variables(varRef)
|
||||
self.verify_variables(verify_dict['children'],
|
||||
response['body']['variables'],
|
||||
varref_dict)
|
||||
|
||||
def verify_variables(self, verify_dict, variables, varref_dict=None):
|
||||
for variable in variables:
|
||||
name = variable['name']
|
||||
self.assertTrue(name in verify_dict,
|
||||
'variable "%s" in verify dictionary' % (name))
|
||||
self.verify_values(verify_dict[name], variable, varref_dict)
|
||||
|
||||
@skipIfWindows
|
||||
@skipIfDarwin # Skip this test for now until we can figure out why tings aren't working on build bots
|
||||
@no_debug_info_test
|
||||
def test_scopes_variables_setVariable_evaluate(self):
|
||||
'''
|
||||
Tests the "scopes", "variables", "setVariable", and "evaluate"
|
||||
packets.
|
||||
'''
|
||||
program = self.getBuildArtifact("a.out")
|
||||
self.build_and_launch(program)
|
||||
source = 'main.cpp'
|
||||
breakpoint1_line = line_number(source, '// breakpoint 1')
|
||||
lines = [breakpoint1_line]
|
||||
# Set breakoint in the thread function so we can step the threads
|
||||
breakpoint_ids = self.set_source_breakpoints(source, lines)
|
||||
self.assertTrue(len(breakpoint_ids) == len(lines),
|
||||
"expect correct number of breakpoints")
|
||||
self.continue_to_breakpoints(breakpoint_ids)
|
||||
locals = self.vscode.get_local_variables()
|
||||
globals = self.vscode.get_global_variables()
|
||||
buffer_children = make_buffer_verify_dict(0, 32)
|
||||
verify_locals = {
|
||||
'argc': {
|
||||
'equals': {'type': 'int', 'value': '1'}
|
||||
},
|
||||
'argv': {
|
||||
'equals': {'type': 'const char **'},
|
||||
'startswith': {'value': '0x'},
|
||||
'hasVariablesReference': True
|
||||
},
|
||||
'pt': {
|
||||
'equals': {'type': 'PointType'},
|
||||
'hasVariablesReference': True,
|
||||
'children': {
|
||||
'x': {'equals': {'type': 'int', 'value': '11'}},
|
||||
'y': {'equals': {'type': 'int', 'value': '22'}},
|
||||
'buffer': {'children': buffer_children}
|
||||
}
|
||||
}
|
||||
}
|
||||
verify_globals = {
|
||||
's_local': {
|
||||
'equals': {'type': 'float', 'value': '2.25'}
|
||||
},
|
||||
'::g_global': {
|
||||
'equals': {'type': 'int', 'value': '123'}
|
||||
},
|
||||
's_global': {
|
||||
'equals': {'type': 'int', 'value': '234'}
|
||||
},
|
||||
}
|
||||
varref_dict = {}
|
||||
self.verify_variables(verify_locals, locals, varref_dict)
|
||||
self.verify_variables(verify_globals, globals, varref_dict)
|
||||
# pprint.PrettyPrinter(indent=4).pprint(varref_dict)
|
||||
# We need to test the functionality of the "variables" request as it
|
||||
# has optional parameters like "start" and "count" to limit the number
|
||||
# of variables that are fetched
|
||||
varRef = varref_dict['pt.buffer']
|
||||
response = self.vscode.request_variables(varRef)
|
||||
self.verify_variables(buffer_children, response['body']['variables'])
|
||||
# Verify setting start=0 in the arguments still gets all children
|
||||
response = self.vscode.request_variables(varRef, start=0)
|
||||
self.verify_variables(buffer_children, response['body']['variables'])
|
||||
# Verify setting count=0 in the arguments still gets all children.
|
||||
# If count is zero, it means to get all children.
|
||||
response = self.vscode.request_variables(varRef, count=0)
|
||||
self.verify_variables(buffer_children, response['body']['variables'])
|
||||
# Verify setting count to a value that is too large in the arguments
|
||||
# still gets all children, and no more
|
||||
response = self.vscode.request_variables(varRef, count=1000)
|
||||
self.verify_variables(buffer_children, response['body']['variables'])
|
||||
# Verify setting the start index and count gets only the children we
|
||||
# want
|
||||
response = self.vscode.request_variables(varRef, start=5, count=5)
|
||||
self.verify_variables(make_buffer_verify_dict(5, 5),
|
||||
response['body']['variables'])
|
||||
# Verify setting the start index to a value that is out of range
|
||||
# results in an empty list
|
||||
response = self.vscode.request_variables(varRef, start=32, count=1)
|
||||
self.assertTrue(len(response['body']['variables']) == 0,
|
||||
'verify we get no variable back for invalid start')
|
||||
|
||||
# Test evaluate
|
||||
expressions = {
|
||||
'pt.x': {
|
||||
'equals': {'result': '11', 'type': 'int'},
|
||||
'hasVariablesReference': False
|
||||
},
|
||||
'pt.buffer[2]': {
|
||||
'equals': {'result': '2', 'type': 'int'},
|
||||
'hasVariablesReference': False
|
||||
},
|
||||
'pt': {
|
||||
'equals': {'type': 'PointType'},
|
||||
'startswith': {'result': 'PointType @ 0x'},
|
||||
'hasVariablesReference': True
|
||||
},
|
||||
'pt.buffer': {
|
||||
'equals': {'type': 'int [32]'},
|
||||
'startswith': {'result': 'int [32] @ 0x'},
|
||||
'hasVariablesReference': True
|
||||
},
|
||||
'argv': {
|
||||
'equals': {'type': 'const char **'},
|
||||
'startswith': {'result': '0x'},
|
||||
'hasVariablesReference': True
|
||||
},
|
||||
'argv[0]': {
|
||||
'equals': {'type': 'const char *'},
|
||||
'startswith': {'result': '0x'},
|
||||
'hasVariablesReference': True
|
||||
},
|
||||
'2+3': {
|
||||
'equals': {'result': '5', 'type': 'int'},
|
||||
'hasVariablesReference': False
|
||||
},
|
||||
}
|
||||
for expression in expressions:
|
||||
response = self.vscode.request_evaluate(expression)
|
||||
self.verify_values(expressions[expression], response['body'])
|
||||
|
||||
# Test setting variables
|
||||
self.set_local('argc', 123)
|
||||
argc = self.get_local_as_int('argc')
|
||||
self.assertTrue(argc == 123,
|
||||
'verify argc was set to 123 (123 != %i)' % (argc))
|
||||
|
||||
self.set_local('argv', 0x1234)
|
||||
argv = self.get_local_as_int('argv')
|
||||
self.assertTrue(argv == 0x1234,
|
||||
'verify argv was set to 0x1234 (0x1234 != %#x)' % (
|
||||
argv))
|
||||
|
||||
# Set a variable value whose name is synthetic, like a variable index
|
||||
# and verify the value by reading it
|
||||
self.vscode.request_setVariable(varRef, "[0]", 100)
|
||||
response = self.vscode.request_variables(varRef, start=0, count=1)
|
||||
self.verify_variables(make_buffer_verify_dict(0, 1, 100),
|
||||
response['body']['variables'])
|
||||
|
||||
# Set a variable value whose name is a real child value, like "pt.x"
|
||||
# and verify the value by reading it
|
||||
varRef = varref_dict['pt']
|
||||
self.vscode.request_setVariable(varRef, "x", 111)
|
||||
response = self.vscode.request_variables(varRef, start=0, count=1)
|
||||
value = response['body']['variables'][0]['value']
|
||||
self.assertTrue(value == '111',
|
||||
'verify pt.x got set to 111 (111 != %s)' % (value))
|
|
@ -0,0 +1,18 @@
|
|||
|
||||
#define BUFFER_SIZE 32
|
||||
struct PointType {
|
||||
int x;
|
||||
int y;
|
||||
int buffer[BUFFER_SIZE];
|
||||
};
|
||||
|
||||
int g_global = 123;
|
||||
static int s_global = 234;
|
||||
|
||||
int main(int argc, char const *argv[]) {
|
||||
static float s_local = 2.25;
|
||||
PointType pt = { 11,22, {0}};
|
||||
for (int i=0; i<BUFFER_SIZE; ++i)
|
||||
pt.buffer[i] = i;
|
||||
return s_global - g_global - pt.y; // breakpoint 1
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue