def setUp(self):
BenchBase.setUp(self)
- if configuration.bmExecutable:
- self.exe = configuration.bmExecutable
- else:
- self.exe = lldbtest_config.lldbExec
+ self.exe = lldbtest_config.lldbExec
self.count = configuration.bmIterationCount
if self.count <= 0:
self.count = 10
import os, sys
import lldb
from lldbsuite.test import configuration
+from lldbsuite.test import lldbtest_config
from lldbsuite.test.lldbbench import *
class FrameVariableResponseBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
- if configuration.bmExecutable:
- self.exe = configuration.bmExecutable
- else:
- self.exe = lldbtest_config.lldbExec
+ self.exe = lldbtest_config.lldbExec
if configuration.bmBreakpointSpec:
self.break_spec = configuration.bmBreakpointSpec
else:
import os, sys
import lldb
from lldbsuite.test import configuration
+from lldbsuite.test import lldbtest_config
from lldbsuite.test.lldbbench import *
class StartupDelaysBench(BenchBase):
self.stopwatch2 = Stopwatch()
# Create self.stopwatch3 for measuring "run to breakpoint".
self.stopwatch3 = Stopwatch()
- if configuration.bmExecutable:
- self.exe = configuration.bmExecutable
- else:
- self.exe = lldbtest_config.lldbExec
+ self.exe = lldbtest_config.lldbExec
if configuration.bmBreakpointSpec:
self.break_spec = configuration.bmBreakpointSpec
else:
import os, sys
import lldb
from lldbsuite.test import configuration
+from lldbsuite.test import lldbtest_config
from lldbsuite.test.lldbbench import *
class SteppingSpeedBench(BenchBase):
def setUp(self):
BenchBase.setUp(self)
- if configuration.bmExecutable:
- self.exe = configuration.bmExecutable
- else:
- self.exe = lldbtest_config.lldbExec
+ self.exe = lldbtest_config.lldbExec
if configuration.bmBreakpointSpec:
self.break_spec = configuration.bmBreakpointSpec
else:
# just that.
cflags_extras = ''
-# Full path of the benchmark executable, as specified by the '-e' option.
-bmExecutable = None
-# The breakpoint specification of bmExecutable, as specified by the '-x' option.
+# The breakpoint specification of the benchmark exe, as specified by the '-x' option.
bmBreakpointSpec = None
# The benchmark iteration count, as specified by the '-y' option.
bmIterationCount = -1
sys.stdout.flush()
os.kill(os.getpid(), signal.SIGSTOP)
- if args.e:
- if args.e.startswith('-'):
- usage(parser)
- configuration.bmExecutable = args.e
- if not is_exe(configuration.bmExecutable):
- usage(parser)
-
if args.F:
configuration.failfast = True
group = parser.add_argument_group('Configuration options')
group.add_argument('--framework', metavar='framework-path', help='The path to LLDB.framework')
group.add_argument('--executable', metavar='executable-path', help='The path to the lldb executable')
- group.add_argument('-e', metavar='benchmark-exe', help='Specify the full path of an executable used for benchmark purposes (see also: -x)')
group.add_argument('-k', metavar='command', action='append', help="Specify a runhook, which is an lldb command to be executed by the debugger; The option can occur multiple times. The commands are executed one after the other to bring the debugger to a desired state, so that, for example, further benchmarking can be done")
group.add_argument('-R', metavar='dir', help='Specify a directory to relocate the tests and their intermediate files to. BE WARNED THAT the directory, if exists, will be deleted before running this test driver. No cleanup of intermediate test files is performed in this case')
group.add_argument('-r', metavar='dir', help="Similar to '-R', except that the directory must not exist before running this test driver")