Add '-e' and '-x' options to the test driver to be able to specify an executable (full path)

and the breakpoint specification for the benchmark purpose.  This is used by TestSteppingSpeed.py
to benchmark the lldb stepping speed.  Without '-e' and 'x' specified, the test defaults to
run the built lldb against itself and stopped on Driver::MainLoop, then stepping for 50 times.

rdar://problem/7511193

llvm-svn: 141584
This commit is contained in:
Johnny Chen 2011-10-10 22:03:44 +00:00
parent ffd530f2c0
commit a10e6c1052
2 changed files with 114 additions and 1 deletions

View File

@ -0,0 +1,82 @@
"""Test lldb's stepping speed."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class SteppingSpeedBench(BenchBase):
mydir = os.path.join("benchmarks", "stepping")
def setUp(self):
BenchBase.setUp(self)
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
bmExecutableDefauled = False
else:
self.exe = self.lldbHere
bmExecutableDefauled = True
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
if bmExecutableDefauled:
self.break_spec = '-F Driver::MainLoop()'
else:
self.break_spec = '-n main'
self.stepping_avg = None
#print "self.exe=%s" % self.exe
#print "self.break_spec=%s" % self.break_spec
@benchmarks_test
def test_run_lldb_steppings(self):
"""Test lldb steppings on a large executable."""
print
self.run_lldb_steppings(self.exe, self.break_spec, 50)
print "lldb stepping benchmark:", self.stopwatch
def run_lldb_steppings(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s %s' % (self.lldbHere, self.lldbOption, exe))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
child.expect_exact(prompt)
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('run')
child.expect_exact(prompt)
# Reset the stopwatch now.
self.stopwatch.reset()
for i in range(count):
with self.stopwatch:
# Disassemble the function.
child.sendline('next') # Aka 'thread step-over'.
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
self.stepping_avg = self.stopwatch.avg()
if self.TraceOn():
print "lldb stepping benchmark:", str(self.stopwatch)
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()

View File

@ -103,6 +103,11 @@ delay = False
# Dump the Python sys.path variable. Use '-D' to dump sys.path.
dumpSysPath = False
# Full path of the benchmark executable, as specified by the '-e' option.
bmExecutable = None
# The breakpoint specification of bmExecutable, as specified by the '-x' option.
bmBreakpointSpec = None
# By default, failfast is False. Use '-F' to overwrite it.
failfast = False
@ -174,6 +179,8 @@ where options:
will override those specified via a config file
(see also lldb-trunk/example/test/usage-config)
-d : delay startup for 10 seconds (in order for the debugger to attach)
-e : specify the full path of an executable used for benchmark purpose;
see also '-x', which provides the breakpoint sepcification
-F : failfast, stop the test suite on the first error/failure
-f : specify a filter, which consists of the test class name, a dot, followed by
the test method, to only admit such test into the test suite
@ -193,6 +200,8 @@ where options:
timestamp as the session dir name
-t : turn on tracing of lldb command and other detailed test executions
-v : do verbose mode of unittest framework (print out each test case invocation)
-x : specify the breakpoint specification for the benchmark executable;
see also '-e', which provides the full path of the executable
-w : insert some wait time (currently 0.5 sec) between consecutive test cases
-# : Repeat the test suite for a specified number of times
@ -302,6 +311,8 @@ def parseOptionsAndInitTestdirs():
global count
global delay
global dumpSysPath
global bmExecutable
global bmBreakpointSpec
global failfast
global filters
global fs4all
@ -383,6 +394,15 @@ def parseOptionsAndInitTestdirs():
elif sys.argv[index].startswith('-d'):
delay = True
index += 1
elif sys.argv[index].startswith('-e'):
# Increment by 1 to fetch the full path of the benchmark executable.
index += 1
if index >= len(sys.argv) or sys.argv[index].startswith('-'):
usage()
bmExecutable = sys.argv[index]
if not is_exe(bmExecutable):
usage()
index += 1
elif sys.argv[index].startswith('-F'):
failfast = True
index += 1
@ -435,6 +455,13 @@ def parseOptionsAndInitTestdirs():
elif sys.argv[index].startswith('-w'):
os.environ["LLDB_WAIT_BETWEEN_TEST_CASES"] = 'YES'
index += 1
elif sys.argv[index].startswith('-x'):
# Increment by 1 to fetch the breakpoint specification of the benchmark executable.
index += 1
if index >= len(sys.argv) or sys.argv[index].startswith('-'):
usage()
bmBreakpointSpec = sys.argv[index]
index += 1
elif sys.argv[index].startswith('-#'):
# Increment by 1 to fetch the repeat count argument.
index += 1
@ -854,11 +881,15 @@ lldb.DBG = lldb.SBDebugger.Create()
# Put the blacklist in the lldb namespace, to be used by lldb.TestBase.
lldb.blacklist = blacklist
# Put dont/just_do_python_api_test in the lldb namespace, too.
# Put dont/just_do_python_api_test in the lldb namespace.
lldb.dont_do_python_api_test = dont_do_python_api_test
lldb.just_do_python_api_test = just_do_python_api_test
lldb.just_do_benchmarks_test = just_do_benchmarks_test
# Put bmExecutable and bmBreakpointSpec into the lldb namespace, too.
lldb.bmExecutable = bmExecutable
lldb.bmBreakpointSpec = bmBreakpointSpec
# Turn on lldb loggings if necessary.
lldbLoggings()