2 # Copyright 2014 the V8 project authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
7 Performance runner for d8.
9 Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
11 The suite json format is expected to be:
13 "path": <relative path chunks to benchmark resources and main file>,
14 "name": <optional suite name, file name is default>,
15 "archs": [<architecture name for which this suite is run>, ...],
16 "binary": <name of binary to run, default "d8">,
17 "flags": [<flag to d8>, ...],
18 "run_count": <how often will this suite run (optional)>,
19 "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
20 "resources": [<js file to be loaded before main>, ...]
21 "main": <main js benchmark runner file>,
22 "results_regexp": <optional regexp>,
23 "results_processor": <optional python results processor script>,
24 "units": <the unit specification for the performance dashboard>,
27 "name": <name of the benchmark>,
28 "results_regexp": <optional more specific regexp>,
29 "results_processor": <optional python results processor script>,
30 "units": <the unit specification for the performance dashboard>,
35 The benchmarks field can also nest other suites in arbitrary depth. A suite
36 with a "main" file is a leaf suite that can contain one more level of
39 A suite's results_regexp is expected to have one string place holder
40 "%s" for the benchmark name. A benchmark's results_regexp overwrites suite
43 A suite's results_processor may point to an optional python script. If
44 specified, it is called after running the benchmarks like this (with a path
45 relatve to the suite level's path):
46 <results_processor file> <same flags as for d8> <suite level name> <output>
48 The <output> is a temporary file containing d8 output. The results_regexp will
49 be applied to the output of this script.
51 A suite without "benchmarks" is considered a benchmark itself.
53 Full example (suite with one runner):
56 "flags": ["--expose-gc"],
57 "archs": ["ia32", "x64"],
61 "results_regexp": "^%s: (.+)$",
65 {"name": "DeltaBlue"},
66 {"name": "NavierStokes",
67 "results_regexp": "^NavierStokes: (.+)$"}
71 Full example (suite with several runners):
74 "flags": ["--expose-gc"],
75 "archs": ["ia32", "x64"],
83 "results_regexp": "^Richards: (.+)$"},
84 {"name": "NavierStokes",
85 "path": ["navier_stokes"],
87 "results_regexp": "^NavierStokes: (.+)$"}
91 Path pieces are concatenated. D8 is always run with the suite's path as cwd.
100 from testrunner.local import commands
101 from testrunner.local import utils
103 ARCH_GUESS = utils.DefaultArch()
104 SUPPORTED_ARCHS = ["android_arm",
117 class Results(object):
118 """Place holder for result traces."""
119 def __init__(self, traces=None, errors=None):
120 self.traces = traces or []
121 self.errors = errors or []
124 return {"traces": self.traces, "errors": self.errors}
126 def WriteToFile(self, file_name):
127 with open(file_name, "w") as f:
128 f.write(json.dumps(self.ToDict()))
130 def __add__(self, other):
131 self.traces += other.traces
132 self.errors += other.errors
135 def __str__(self): # pragma: no cover
136 return str(self.ToDict())
140 """Represents a node in the benchmark suite tree structure."""
141 def __init__(self, *args):
144 def AppendChild(self, child):
145 self._children.append(child)
148 class DefaultSentinel(Node):
149 """Fake parent node with all default values."""
151 super(DefaultSentinel, self).__init__()
158 self.results_regexp = None
163 """Represents a benchmark suite definition.
165 Can either be a leaf or an inner node that provides default values.
167 def __init__(self, suite, parent, arch):
168 super(Graph, self).__init__()
171 assert isinstance(suite.get("path", []), list)
172 assert isinstance(suite["name"], basestring)
173 assert isinstance(suite.get("flags", []), list)
174 assert isinstance(suite.get("resources", []), list)
176 # Accumulated values.
177 self.path = parent.path[:] + suite.get("path", [])
178 self.graphs = parent.graphs[:] + [suite["name"]]
179 self.flags = parent.flags[:] + suite.get("flags", [])
180 self.resources = parent.resources[:] + suite.get("resources", [])
182 # Descrete values (with parent defaults).
183 self.binary = suite.get("binary", parent.binary)
184 self.run_count = suite.get("run_count", parent.run_count)
185 self.run_count = suite.get("run_count_%s" % arch, self.run_count)
186 self.units = suite.get("units", parent.units)
188 # A regular expression for results. If the parent graph provides a
189 # regexp and the current suite has none, a string place holder for the
190 # suite name is expected.
191 # TODO(machenbach): Currently that makes only sense for the leaf level.
192 # Multiple place holders for multiple levels are not supported.
193 if parent.results_regexp:
194 regexp_default = parent.results_regexp % suite["name"]
196 regexp_default = None
197 self.results_regexp = suite.get("results_regexp", regexp_default)
201 """Represents a leaf in the benchmark suite tree structure.
203 Handles collection of measurements.
205 def __init__(self, suite, parent, arch):
206 super(Trace, self).__init__(suite, parent, arch)
207 assert self.results_regexp
211 def ConsumeOutput(self, stdout):
214 re.search(self.results_regexp, stdout, re.M).group(1))
216 self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
217 % (self.results_regexp, self.graphs[-1]))
219 def GetResults(self):
221 "graphs": self.graphs,
223 "results": self.results,
227 class Runnable(Graph):
228 """Represents a runnable benchmark suite definition (i.e. has a main file).
232 return self._suite["main"]
234 def ChangeCWD(self, suite_path):
235 """Changes the cwd to to path defined in the current graph.
237 The benchmarks are supposed to be relative to the suite configuration.
239 suite_dir = os.path.abspath(os.path.dirname(suite_path))
240 bench_dir = os.path.normpath(os.path.join(*self.path))
241 os.chdir(os.path.join(suite_dir, bench_dir))
243 def GetCommand(self, shell_dir):
244 # TODO(machenbach): This requires +.exe if run on windows.
246 [os.path.join(shell_dir, self.binary)] +
252 def Run(self, runner):
253 """Iterates over several runs and handles the output for all traces."""
254 for stdout in runner():
255 for trace in self._children:
256 trace.ConsumeOutput(stdout)
257 return reduce(lambda r, t: r + t.GetResults(), self._children, Results())
260 class RunnableTrace(Trace, Runnable):
261 """Represents a runnable benchmark suite definition that is a leaf."""
262 def __init__(self, suite, parent, arch):
263 super(RunnableTrace, self).__init__(suite, parent, arch)
265 def Run(self, runner):
266 """Iterates over several runs and handles the output."""
267 for stdout in runner():
268 self.ConsumeOutput(stdout)
269 return self.GetResults()
272 def MakeGraph(suite, arch, parent):
273 """Factory method for making graph objects."""
274 if isinstance(parent, Runnable):
275 # Below a runnable can only be traces.
276 return Trace(suite, parent, arch)
277 elif suite.get("main"):
278 # A main file makes this graph runnable.
279 if suite.get("benchmarks"):
280 # This graph has subbenchmarks (traces).
281 return Runnable(suite, parent, arch)
283 # This graph has no subbenchmarks, it's a leaf.
284 return RunnableTrace(suite, parent, arch)
285 elif suite.get("benchmarks"):
286 # This is neither a leaf nor a runnable.
287 return Graph(suite, parent, arch)
288 else: # pragma: no cover
289 raise Exception("Invalid benchmark suite configuration.")
292 def BuildGraphs(suite, arch, parent=None):
293 """Builds a tree structure of graph objects that corresponds to the suite
296 parent = parent or DefaultSentinel()
298 # TODO(machenbach): Implement notion of cpu type?
299 if arch not in suite.get("archs", ["ia32", "x64"]):
302 graph = MakeGraph(suite, arch, parent)
303 for subsuite in suite.get("benchmarks", []):
304 BuildGraphs(subsuite, arch, graph)
305 parent.AppendChild(graph)
309 def FlattenRunnables(node):
310 """Generator that traverses the tree structure and iterates over all
313 if isinstance(node, Runnable):
315 elif isinstance(node, Node):
316 for child in node._children:
317 for result in FlattenRunnables(child):
319 else: # pragma: no cover
320 raise Exception("Invalid benchmark suite configuration.")
323 # TODO: Implement results_processor.
325 parser = optparse.OptionParser()
326 parser.add_option("--arch",
327 help=("The architecture to run tests for, "
328 "'auto' or 'native' for auto-detect"),
330 parser.add_option("--buildbot",
331 help="Adapt to path structure used on buildbots",
332 default=False, action="store_true")
333 parser.add_option("--json-test-results",
334 help="Path to a file for storing json results.")
335 parser.add_option("--outdir", help="Base directory with compile output",
337 (options, args) = parser.parse_args(args)
339 if len(args) == 0: # pragma: no cover
343 if options.arch in ["auto", "native"]: # pragma: no cover
344 options.arch = ARCH_GUESS
346 if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
347 print "Unknown architecture %s" % options.arch
350 workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
353 shell_dir = os.path.join(workspace, options.outdir, "Release")
355 shell_dir = os.path.join(workspace, options.outdir,
356 "%s.release" % options.arch)
360 path = os.path.abspath(path)
362 if not os.path.exists(path): # pragma: no cover
363 results.errors.append("Benchmark file %s does not exist." % path)
366 with open(path) as f:
367 suite = json.loads(f.read())
369 # If no name is given, default to the file name without .json.
370 suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
372 for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
373 print ">>> Running suite: %s" % "/".join(runnable.graphs)
374 runnable.ChangeCWD(path)
377 """Output generator that reruns several times."""
378 for i in xrange(0, max(1, runnable.run_count)):
379 # TODO(machenbach): Make timeout configurable in the suite definition.
380 # Allow timeout per arch like with run_count per arch.
381 output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
382 print ">>> Stdout (#%d):" % (i + 1)
384 if output.stderr: # pragma: no cover
385 # Print stderr for debugging.
386 print ">>> Stderr (#%d):" % (i + 1)
390 # Let runnable iterate over all runs and handle output.
391 results += runnable.Run(Runner)
393 if options.json_test_results:
394 results.WriteToFile(options.json_test_results)
395 else: # pragma: no cover
398 return min(1, len(results.errors))
400 if __name__ == "__main__": # pragma: no cover
401 sys.exit(Main(sys.argv[1:]))