Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / v8 / tools / unittests / run_benchmarks_test.py
1 #!/usr/bin/env python
2 # Copyright 2014 the V8 project authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 from collections import namedtuple
7 import coverage
8 import json
9 from mock import DEFAULT
10 from mock import MagicMock
11 import os
12 from os import path, sys
13 import shutil
14 import tempfile
15 import unittest
16
17 # Requires python-coverage and python-mock. Native python coverage
18 # version >= 3.7.1 should be installed to get the best speed.
19
20 TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks")
21
22 V8_JSON = {
23   "path": ["."],
24   "binary": "d7",
25   "flags": ["--flag"],
26   "main": "run.js",
27   "run_count": 1,
28   "results_regexp": "^%s: (.+)$",
29   "benchmarks": [
30     {"name": "Richards"},
31     {"name": "DeltaBlue"},
32   ]
33 }
34
35 V8_NESTED_SUITES_JSON = {
36   "path": ["."],
37   "flags": ["--flag"],
38   "run_count": 1,
39   "units": "score",
40   "benchmarks": [
41     {"name": "Richards",
42      "path": ["richards"],
43      "binary": "d7",
44      "main": "run.js",
45      "resources": ["file1.js", "file2.js"],
46      "run_count": 2,
47      "results_regexp": "^Richards: (.+)$"},
48     {"name": "Sub",
49      "path": ["sub"],
50      "benchmarks": [
51        {"name": "Leaf",
52         "path": ["leaf"],
53         "run_count_x64": 3,
54         "units": "ms",
55         "main": "run.js",
56         "results_regexp": "^Simple: (.+) ms.$"},
57      ]
58     },
59     {"name": "DeltaBlue",
60      "path": ["delta_blue"],
61      "main": "run.js",
62      "flags": ["--flag2"],
63      "results_regexp": "^DeltaBlue: (.+)$"},
64     {"name": "ShouldntRun",
65      "path": ["."],
66      "archs": ["arm"],
67      "main": "run.js"},
68   ]
69 }
70
71 Output = namedtuple("Output", "stdout, stderr")
72
73 class BenchmarksTest(unittest.TestCase):
74   @classmethod
75   def setUpClass(cls):
76     cls.base = path.dirname(path.dirname(path.abspath(__file__)))
77     sys.path.append(cls.base)
78     cls._cov = coverage.coverage(
79         include=([os.path.join(cls.base, "run_benchmarks.py")]))
80     cls._cov.start()
81     import run_benchmarks
82     from testrunner.local import commands
83     global commands
84     global run_benchmarks
85
86   @classmethod
87   def tearDownClass(cls):
88     cls._cov.stop()
89     print ""
90     print cls._cov.report()
91
92   def setUp(self):
93     self.maxDiff = None
94     if path.exists(TEST_WORKSPACE):
95       shutil.rmtree(TEST_WORKSPACE)
96     os.makedirs(TEST_WORKSPACE)
97
98   def tearDown(self):
99     if path.exists(TEST_WORKSPACE):
100       shutil.rmtree(TEST_WORKSPACE)
101
102   def _WriteTestInput(self, json_content):
103     self._test_input = path.join(TEST_WORKSPACE, "test.json")
104     with open(self._test_input, "w") as f:
105       f.write(json.dumps(json_content))
106
107   def _MockCommand(self, *args):
108     # Fake output for each benchmark run.
109     benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
110     def execute(*args, **kwargs):
111       return benchmark_outputs.pop()
112     commands.Execute = MagicMock(side_effect=execute)
113
114     # Check that d8 is called from the correct cwd for each benchmark run.
115     dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
116     def chdir(*args, **kwargs):
117       self.assertEquals(dirs.pop(), args[0])
118     os.chdir = MagicMock(side_effect=chdir)
119
120   def _CallMain(self, *args):
121     self._test_output = path.join(TEST_WORKSPACE, "results.json")
122     all_args=[
123       "--json-test-results",
124       self._test_output,
125       self._test_input,
126     ]
127     all_args += args
128     return run_benchmarks.Main(all_args)
129
130   def _LoadResults(self):
131     with open(self._test_output) as f:
132       return json.load(f)
133
134   def _VerifyResults(self, suite, units, traces):
135     self.assertEquals([
136       {"units": units,
137        "graphs": [suite, trace["name"]],
138        "results": trace["results"],
139        "stddev": trace["stddev"]} for trace in traces],
140       self._LoadResults()["traces"])
141
142   def _VerifyErrors(self, errors):
143     self.assertEquals(errors, self._LoadResults()["errors"])
144
145   def _VerifyMock(self, binary, *args):
146     arg = [path.join(path.dirname(self.base), binary)]
147     arg += args
148     commands.Execute.assert_called_with(arg, timeout=60)
149
150   def _VerifyMockMultiple(self, *args):
151     expected = []
152     for arg in args:
153       a = [path.join(path.dirname(self.base), arg[0])]
154       a += arg[1:]
155       expected.append(((a,), {"timeout": 60}))
156     self.assertEquals(expected, commands.Execute.call_args_list)
157
158   def testOneRun(self):
159     self._WriteTestInput(V8_JSON)
160     self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
161     self.assertEquals(0, self._CallMain())
162     self._VerifyResults("test", "score", [
163       {"name": "Richards", "results": ["1.234"], "stddev": ""},
164       {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
165     ])
166     self._VerifyErrors([])
167     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
168
169   def testTwoRuns_Units_SuiteName(self):
170     test_input = dict(V8_JSON)
171     test_input["run_count"] = 2
172     test_input["name"] = "v8"
173     test_input["units"] = "ms"
174     self._WriteTestInput(test_input)
175     self._MockCommand([".", "."],
176                       ["Richards: 100\nDeltaBlue: 200\n",
177                        "Richards: 50\nDeltaBlue: 300\n"])
178     self.assertEquals(0, self._CallMain())
179     self._VerifyResults("v8", "ms", [
180       {"name": "Richards", "results": ["50", "100"], "stddev": ""},
181       {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
182     ])
183     self._VerifyErrors([])
184     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
185
186   def testTwoRuns_SubRegexp(self):
187     test_input = dict(V8_JSON)
188     test_input["run_count"] = 2
189     del test_input["results_regexp"]
190     test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$"
191     test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
192     self._WriteTestInput(test_input)
193     self._MockCommand([".", "."],
194                       ["Richards: 100\nDeltaBlue: 200\n",
195                        "Richards: 50\nDeltaBlue: 300\n"])
196     self.assertEquals(0, self._CallMain())
197     self._VerifyResults("test", "score", [
198       {"name": "Richards", "results": ["50", "100"], "stddev": ""},
199       {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
200     ])
201     self._VerifyErrors([])
202     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
203
204   def testNestedSuite(self):
205     self._WriteTestInput(V8_NESTED_SUITES_JSON)
206     self._MockCommand(["delta_blue", "sub/leaf", "richards"],
207                       ["DeltaBlue: 200\n",
208                        "Simple: 1 ms.\n",
209                        "Simple: 2 ms.\n",
210                        "Simple: 3 ms.\n",
211                        "Richards: 100\n",
212                        "Richards: 50\n"])
213     self.assertEquals(0, self._CallMain())
214     self.assertEquals([
215       {"units": "score",
216        "graphs": ["test", "Richards"],
217        "results": ["50", "100"],
218        "stddev": ""},
219       {"units": "ms",
220        "graphs": ["test", "Sub", "Leaf"],
221        "results": ["3", "2", "1"],
222        "stddev": ""},
223       {"units": "score",
224        "graphs": ["test", "DeltaBlue"],
225        "results": ["200"],
226        "stddev": ""},
227       ], self._LoadResults()["traces"])
228     self._VerifyErrors([])
229     self._VerifyMockMultiple(
230         (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
231          "file2.js", "run.js"),
232         (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
233          "file2.js", "run.js"),
234         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
235         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
236         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
237         (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
238
239   def testOneRunStdDevRegExp(self):
240     test_input = dict(V8_JSON)
241     test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
242     self._WriteTestInput(test_input)
243     self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
244                               "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
245     self.assertEquals(0, self._CallMain())
246     self._VerifyResults("test", "score", [
247       {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
248       {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"},
249     ])
250     self._VerifyErrors([])
251     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
252
253   def testTwoRunsStdDevRegExp(self):
254     test_input = dict(V8_JSON)
255     test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
256     test_input["run_count"] = 2
257     self._WriteTestInput(test_input)
258     self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
259                               "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
260                               "Richards: 2\nRichards-stddev: 0.5\n"
261                               "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
262     self.assertEquals(1, self._CallMain())
263     self._VerifyResults("test", "score", [
264       {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
265       {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
266     ])
267     self._VerifyErrors(
268         ["Benchmark Richards should only run once since a stddev is provided "
269          "by the benchmark.",
270          "Benchmark DeltaBlue should only run once since a stddev is provided "
271          "by the benchmark.",
272          "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for benchmark "
273          "DeltaBlue."])
274     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
275
276   def testBuildbot(self):
277     self._WriteTestInput(V8_JSON)
278     self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
279     self.assertEquals(0, self._CallMain("--buildbot"))
280     self._VerifyResults("test", "score", [
281       {"name": "Richards", "results": ["1.234"], "stddev": ""},
282       {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
283     ])
284     self._VerifyErrors([])
285     self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
286
287   def testRegexpNoMatch(self):
288     self._WriteTestInput(V8_JSON)
289     self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
290     self.assertEquals(1, self._CallMain())
291     self._VerifyResults("test", "score", [
292       {"name": "Richards", "results": [], "stddev": ""},
293       {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
294     ])
295     self._VerifyErrors(
296         ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."])
297     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")