Upstream version 11.40.277.0
[platform/framework/web/crosswalk.git] / src / v8 / tools / run-tests.py
1 #!/usr/bin/env python
2 #
3 # Copyright 2012 the V8 project authors. All rights reserved.
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
6 # met:
7 #
8 #     * Redistributions of source code must retain the above copyright
9 #       notice, this list of conditions and the following disclaimer.
10 #     * Redistributions in binary form must reproduce the above
11 #       copyright notice, this list of conditions and the following
12 #       disclaimer in the documentation and/or other materials provided
13 #       with the distribution.
14 #     * Neither the name of Google Inc. nor the names of its
15 #       contributors may be used to endorse or promote products derived
16 #       from this software without specific prior written permission.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 from collections import OrderedDict
32 import itertools
33 import multiprocessing
34 import optparse
35 import os
36 from os.path import join
37 import platform
38 import random
39 import shlex
40 import subprocess
41 import sys
42 import time
43
44 from testrunner.local import execution
45 from testrunner.local import progress
46 from testrunner.local import testsuite
47 from testrunner.local import utils
48 from testrunner.local import verbose
49 from testrunner.network import network_execution
50 from testrunner.objects import context
51
52
53 ARCH_GUESS = utils.DefaultArch()
54 DEFAULT_TESTS = [
55   "mjsunit",
56   "unittests",
57   "cctest",
58   "message",
59   "preparser",
60 ]
61
62 # Map of test name synonyms to lists of test suites. Should be ordered by
63 # expected runtimes (suites with slow test cases first). These groups are
64 # invoked in seperate steps on the bots.
65 TEST_MAP = {
66   "default": [
67     "mjsunit",
68     "cctest",
69     "message",
70     "preparser",
71   ],
72   "optimize_for_size": [
73     "mjsunit",
74     "cctest",
75     "webkit",
76   ],
77   "unittests": [
78     "unittests",
79   ],
80 }
81
82 TIMEOUT_DEFAULT = 60
83 TIMEOUT_SCALEFACTOR = {"debug"   : 4,
84                        "release" : 1 }
85
86 # Use this to run several variants of the tests.
87 VARIANT_FLAGS = {
88     "default": [],
89     "stress": ["--stress-opt", "--always-opt"],
90     "turbofan": ["--turbo-asm", "--turbo-filter=*", "--always-opt"],
91     "nocrankshaft": ["--nocrankshaft"]}
92
93 VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
94
95 MODE_FLAGS = {
96     "debug"   : ["--nohard-abort", "--nodead-code-elimination",
97                  "--nofold-constants", "--enable-slow-asserts",
98                  "--debug-code", "--verify-heap"],
99     "release" : ["--nohard-abort", "--nodead-code-elimination",
100                  "--nofold-constants"]}
101
102 GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
103                    "--concurrent-recompilation-queue-length=64",
104                    "--concurrent-recompilation-delay=500",
105                    "--concurrent-recompilation"]
106
107 SUPPORTED_ARCHS = ["android_arm",
108                    "android_arm64",
109                    "android_ia32",
110                    "arm",
111                    "ia32",
112                    "x87",
113                    "mips",
114                    "mipsel",
115                    "mips64el",
116                    "nacl_ia32",
117                    "nacl_x64",
118                    "x64",
119                    "x32",
120                    "arm64"]
121 # Double the timeout for these:
122 SLOW_ARCHS = ["android_arm",
123               "android_arm64",
124               "android_ia32",
125               "arm",
126               "mips",
127               "mipsel",
128               "mips64el",
129               "nacl_ia32",
130               "nacl_x64",
131               "x87",
132               "arm64"]
133
134
135 def BuildOptions():
136   result = optparse.OptionParser()
137   result.add_option("--arch",
138                     help=("The architecture to run tests for, "
139                           "'auto' or 'native' for auto-detect"),
140                     default="ia32,x64,arm")
141   result.add_option("--arch-and-mode",
142                     help="Architecture and mode in the format 'arch.mode'",
143                     default=None)
144   result.add_option("--asan",
145                     help="Regard test expectations for ASAN",
146                     default=False, action="store_true")
147   result.add_option("--buildbot",
148                     help="Adapt to path structure used on buildbots",
149                     default=False, action="store_true")
150   result.add_option("--cat", help="Print the source of the tests",
151                     default=False, action="store_true")
152   result.add_option("--flaky-tests",
153                     help="Regard tests marked as flaky (run|skip|dontcare)",
154                     default="dontcare")
155   result.add_option("--slow-tests",
156                     help="Regard slow tests (run|skip|dontcare)",
157                     default="dontcare")
158   result.add_option("--pass-fail-tests",
159                     help="Regard pass|fail tests (run|skip|dontcare)",
160                     default="dontcare")
161   result.add_option("--gc-stress",
162                     help="Switch on GC stress mode",
163                     default=False, action="store_true")
164   result.add_option("--command-prefix",
165                     help="Prepended to each shell command used to run a test",
166                     default="")
167   result.add_option("--download-data", help="Download missing test suite data",
168                     default=False, action="store_true")
169   result.add_option("--extra-flags",
170                     help="Additional flags to pass to each test command",
171                     default="")
172   result.add_option("--isolates", help="Whether to test isolates",
173                     default=False, action="store_true")
174   result.add_option("-j", help="The number of parallel tasks to run",
175                     default=0, type="int")
176   result.add_option("-m", "--mode",
177                     help="The test modes in which to run (comma-separated)",
178                     default="release,debug")
179   result.add_option("--no-i18n", "--noi18n",
180                     help="Skip internationalization tests",
181                     default=False, action="store_true")
182   result.add_option("--no-network", "--nonetwork",
183                     help="Don't distribute tests on the network",
184                     default=(utils.GuessOS() != "linux"),
185                     dest="no_network", action="store_true")
186   result.add_option("--no-presubmit", "--nopresubmit",
187                     help='Skip presubmit checks',
188                     default=False, dest="no_presubmit", action="store_true")
189   result.add_option("--no-snap", "--nosnap",
190                     help='Test a build compiled without snapshot.',
191                     default=False, dest="no_snap", action="store_true")
192   result.add_option("--no-sorting", "--nosorting",
193                     help="Don't sort tests according to duration of last run.",
194                     default=False, dest="no_sorting", action="store_true")
195   result.add_option("--no-stress", "--nostress",
196                     help="Don't run crankshaft --always-opt --stress-op test",
197                     default=False, dest="no_stress", action="store_true")
198   result.add_option("--no-variants", "--novariants",
199                     help="Don't run any testing variants",
200                     default=False, dest="no_variants", action="store_true")
201   result.add_option("--variants",
202                     help="Comma-separated list of testing variants")
203   result.add_option("--outdir", help="Base directory with compile output",
204                     default="out")
205   result.add_option("--predictable",
206                     help="Compare output of several reruns of each test",
207                     default=False, action="store_true")
208   result.add_option("-p", "--progress",
209                     help=("The style of progress indicator"
210                           " (verbose, dots, color, mono)"),
211                     choices=progress.PROGRESS_INDICATORS.keys(), default="mono")
212   result.add_option("--quickcheck", default=False, action="store_true",
213                     help=("Quick check mode (skip slow/flaky tests)"))
214   result.add_option("--report", help="Print a summary of the tests to be run",
215                     default=False, action="store_true")
216   result.add_option("--json-test-results",
217                     help="Path to a file for storing json results.")
218   result.add_option("--rerun-failures-count",
219                     help=("Number of times to rerun each failing test case. "
220                           "Very slow tests will be rerun only once."),
221                     default=0, type="int")
222   result.add_option("--rerun-failures-max",
223                     help="Maximum number of failing test cases to rerun.",
224                     default=100, type="int")
225   result.add_option("--shard-count",
226                     help="Split testsuites into this number of shards",
227                     default=1, type="int")
228   result.add_option("--shard-run",
229                     help="Run this shard from the split up tests.",
230                     default=1, type="int")
231   result.add_option("--shell", help="DEPRECATED! use --shell-dir", default="")
232   result.add_option("--shell-dir", help="Directory containing executables",
233                     default="")
234   result.add_option("--dont-skip-slow-simulator-tests",
235                     help="Don't skip more slow tests when using a simulator.",
236                     default=False, action="store_true",
237                     dest="dont_skip_simulator_slow_tests")
238   result.add_option("--stress-only",
239                     help="Only run tests with --always-opt --stress-opt",
240                     default=False, action="store_true")
241   result.add_option("--time", help="Print timing information after running",
242                     default=False, action="store_true")
243   result.add_option("-t", "--timeout", help="Timeout in seconds",
244                     default= -1, type="int")
245   result.add_option("--tsan",
246                     help="Regard test expectations for TSAN",
247                     default=False, action="store_true")
248   result.add_option("-v", "--verbose", help="Verbose output",
249                     default=False, action="store_true")
250   result.add_option("--valgrind", help="Run tests through valgrind",
251                     default=False, action="store_true")
252   result.add_option("--warn-unused", help="Report unused rules",
253                     default=False, action="store_true")
254   result.add_option("--junitout", help="File name of the JUnit output")
255   result.add_option("--junittestsuite",
256                     help="The testsuite name in the JUnit output file",
257                     default="v8tests")
258   result.add_option("--random-seed", default=0, dest="random_seed",
259                     help="Default seed for initializing random generator")
260   result.add_option("--msan",
261                     help="Regard test expectations for MSAN",
262                     default=False, action="store_true")
263   return result
264
265
266 def ProcessOptions(options):
267   global VARIANT_FLAGS
268   global VARIANTS
269
270   # Architecture and mode related stuff.
271   if options.arch_and_mode:
272     options.arch_and_mode = [arch_and_mode.split(".")
273         for arch_and_mode in options.arch_and_mode.split(",")]
274     options.arch = ",".join([tokens[0] for tokens in options.arch_and_mode])
275     options.mode = ",".join([tokens[1] for tokens in options.arch_and_mode])
276   options.mode = options.mode.split(",")
277   for mode in options.mode:
278     if not mode.lower() in ["debug", "release", "optdebug"]:
279       print "Unknown mode %s" % mode
280       return False
281   if options.arch in ["auto", "native"]:
282     options.arch = ARCH_GUESS
283   options.arch = options.arch.split(",")
284   for arch in options.arch:
285     if not arch in SUPPORTED_ARCHS:
286       print "Unknown architecture %s" % arch
287       return False
288
289   # Store the final configuration in arch_and_mode list. Don't overwrite
290   # predefined arch_and_mode since it is more expressive than arch and mode.
291   if not options.arch_and_mode:
292     options.arch_and_mode = itertools.product(options.arch, options.mode)
293
294   # Special processing of other options, sorted alphabetically.
295
296   if options.buildbot:
297     # Buildbots run presubmit tests as a separate step.
298     options.no_presubmit = True
299     options.no_network = True
300   if options.command_prefix:
301     print("Specifying --command-prefix disables network distribution, "
302           "running tests locally.")
303     options.no_network = True
304   options.command_prefix = shlex.split(options.command_prefix)
305   options.extra_flags = shlex.split(options.extra_flags)
306
307   if options.gc_stress:
308     options.extra_flags += GC_STRESS_FLAGS
309
310   if options.asan:
311     options.extra_flags.append("--invoke-weak-callbacks")
312
313   if options.tsan:
314     VARIANTS = ["default"]
315     suppressions_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
316                                      'sanitizers', 'tsan_suppressions.txt')
317     tsan_options = '%s suppressions=%s' % (
318         os.environ.get('TSAN_OPTIONS', ''), suppressions_file)
319     os.environ['TSAN_OPTIONS'] = tsan_options
320
321   if options.j == 0:
322     options.j = multiprocessing.cpu_count()
323
324   while options.random_seed == 0:
325     options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
326
327   def excl(*args):
328     """Returns true if zero or one of multiple arguments are true."""
329     return reduce(lambda x, y: x + y, args) <= 1
330
331   if not excl(options.no_stress, options.stress_only, options.no_variants,
332               bool(options.variants)):
333     print("Use only one of --no-stress, --stress-only, --no-variants, "
334           "or --variants.")
335     return False
336   if options.quickcheck:
337     VARIANTS = ["default", "stress"]
338     options.flaky_tests = "skip"
339     options.slow_tests = "skip"
340     options.pass_fail_tests = "skip"
341   if options.no_stress:
342     VARIANTS = ["default", "nocrankshaft"]
343   if options.no_variants:
344     VARIANTS = ["default"]
345   if options.stress_only:
346     VARIANTS = ["stress"]
347   if options.variants:
348     VARIANTS = options.variants.split(",")
349     if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
350       print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
351       return False
352   if options.predictable:
353     VARIANTS = ["default"]
354     options.extra_flags.append("--predictable")
355     options.extra_flags.append("--verify_predictable")
356     options.extra_flags.append("--no-inline-new")
357
358   if not options.shell_dir:
359     if options.shell:
360       print "Warning: --shell is deprecated, use --shell-dir instead."
361       options.shell_dir = os.path.dirname(options.shell)
362   if options.valgrind:
363     run_valgrind = os.path.join("tools", "run-valgrind.py")
364     # This is OK for distributed running, so we don't need to set no_network.
365     options.command_prefix = (["python", "-u", run_valgrind] +
366                               options.command_prefix)
367   def CheckTestMode(name, option):
368     if not option in ["run", "skip", "dontcare"]:
369       print "Unknown %s mode %s" % (name, option)
370       return False
371     return True
372   if not CheckTestMode("flaky test", options.flaky_tests):
373     return False
374   if not CheckTestMode("slow test", options.slow_tests):
375     return False
376   if not CheckTestMode("pass|fail test", options.pass_fail_tests):
377     return False
378   if not options.no_i18n:
379     DEFAULT_TESTS.append("intl")
380   return True
381
382
383 def ShardTests(tests, shard_count, shard_run):
384   if shard_count < 2:
385     return tests
386   if shard_run < 1 or shard_run > shard_count:
387     print "shard-run not a valid number, should be in [1:shard-count]"
388     print "defaulting back to running all tests"
389     return tests
390   count = 0
391   shard = []
392   for test in tests:
393     if count % shard_count == shard_run - 1:
394       shard.append(test)
395     count += 1
396   return shard
397
398
399 def Main():
400   parser = BuildOptions()
401   (options, args) = parser.parse_args()
402   if not ProcessOptions(options):
403     parser.print_help()
404     return 1
405
406   exit_code = 0
407   workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
408   if not options.no_presubmit:
409     print ">>> running presubmit tests"
410     exit_code = subprocess.call(
411         [sys.executable, join(workspace, "tools", "presubmit.py")])
412
413   suite_paths = utils.GetSuitePaths(join(workspace, "test"))
414
415   # Expand arguments with grouped tests. The args should reflect the list of
416   # suites as otherwise filters would break.
417   def ExpandTestGroups(name):
418     if name in TEST_MAP:
419       return [suite for suite in TEST_MAP[arg]]
420     else:
421       return [name]
422   args = reduce(lambda x, y: x + y,
423          [ExpandTestGroups(arg) for arg in args],
424          [])
425
426   if len(args) == 0:
427     suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
428   else:
429     args_suites = OrderedDict() # Used as set
430     for arg in args:
431       args_suites[arg.split(os.path.sep)[0]] = True
432     suite_paths = [ s for s in args_suites if s in suite_paths ]
433
434   suites = []
435   for root in suite_paths:
436     suite = testsuite.TestSuite.LoadTestSuite(
437         os.path.join(workspace, "test", root))
438     if suite:
439       suites.append(suite)
440
441   if options.download_data:
442     for s in suites:
443       s.DownloadData()
444
445   for (arch, mode) in options.arch_and_mode:
446     try:
447       code = Execute(arch, mode, args, options, suites, workspace)
448     except KeyboardInterrupt:
449       return 2
450     exit_code = exit_code or code
451   return exit_code
452
453
454 def Execute(arch, mode, args, options, suites, workspace):
455   print(">>> Running tests for %s.%s" % (arch, mode))
456
457   shell_dir = options.shell_dir
458   if not shell_dir:
459     if options.buildbot:
460       shell_dir = os.path.join(workspace, options.outdir, mode)
461       mode = mode.lower()
462     else:
463       shell_dir = os.path.join(workspace, options.outdir,
464                                "%s.%s" % (arch, mode))
465   shell_dir = os.path.relpath(shell_dir)
466
467   if mode == "optdebug":
468     mode = "debug"  # "optdebug" is just an alias.
469
470   # Populate context object.
471   mode_flags = MODE_FLAGS[mode]
472   timeout = options.timeout
473   if timeout == -1:
474     # Simulators are slow, therefore allow a longer default timeout.
475     if arch in SLOW_ARCHS:
476       timeout = 2 * TIMEOUT_DEFAULT;
477     else:
478       timeout = TIMEOUT_DEFAULT;
479
480   timeout *= TIMEOUT_SCALEFACTOR[mode]
481
482   if options.predictable:
483     # Predictable mode is slower.
484     timeout *= 2
485
486   ctx = context.Context(arch, mode, shell_dir,
487                         mode_flags, options.verbose,
488                         timeout, options.isolates,
489                         options.command_prefix,
490                         options.extra_flags,
491                         options.no_i18n,
492                         options.random_seed,
493                         options.no_sorting,
494                         options.rerun_failures_count,
495                         options.rerun_failures_max,
496                         options.predictable)
497
498   # TODO(all): Combine "simulator" and "simulator_run".
499   simulator_run = not options.dont_skip_simulator_slow_tests and \
500       arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
501   # Find available test suites and read test cases from them.
502   variables = {
503     "arch": arch,
504     "asan": options.asan,
505     "deopt_fuzzer": False,
506     "gc_stress": options.gc_stress,
507     "isolates": options.isolates,
508     "mode": mode,
509     "no_i18n": options.no_i18n,
510     "no_snap": options.no_snap,
511     "simulator_run": simulator_run,
512     "simulator": utils.UseSimulator(arch),
513     "system": utils.GuessOS(),
514     "tsan": options.tsan,
515     "msan": options.msan,
516   }
517   all_tests = []
518   num_tests = 0
519   test_id = 0
520   for s in suites:
521     s.ReadStatusFile(variables)
522     s.ReadTestCases(ctx)
523     if len(args) > 0:
524       s.FilterTestCasesByArgs(args)
525     all_tests += s.tests
526     s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests,
527                               options.slow_tests, options.pass_fail_tests)
528     if options.cat:
529       verbose.PrintTestSource(s.tests)
530       continue
531     variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS]
532     s.tests = [ t.CopyAddingFlags(v)
533                 for t in s.tests
534                 for v in s.VariantFlags(t, variant_flags) ]
535     s.tests = ShardTests(s.tests, options.shard_count, options.shard_run)
536     num_tests += len(s.tests)
537     for t in s.tests:
538       t.id = test_id
539       test_id += 1
540
541   if options.cat:
542     return 0  # We're done here.
543
544   if options.report:
545     verbose.PrintReport(all_tests)
546
547   if num_tests == 0:
548     print "No tests to run."
549     return 0
550
551   # Run the tests, either locally or distributed on the network.
552   start_time = time.time()
553   progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
554   if options.junitout:
555     progress_indicator = progress.JUnitTestProgressIndicator(
556         progress_indicator, options.junitout, options.junittestsuite)
557   if options.json_test_results:
558     progress_indicator = progress.JsonTestProgressIndicator(
559         progress_indicator, options.json_test_results, arch, mode)
560
561   run_networked = not options.no_network
562   if not run_networked:
563     print("Network distribution disabled, running tests locally.")
564   elif utils.GuessOS() != "linux":
565     print("Network distribution is only supported on Linux, sorry!")
566     run_networked = False
567   peers = []
568   if run_networked:
569     peers = network_execution.GetPeers()
570     if not peers:
571       print("No connection to distribution server; running tests locally.")
572       run_networked = False
573     elif len(peers) == 1:
574       print("No other peers on the network; running tests locally.")
575       run_networked = False
576     elif num_tests <= 100:
577       print("Less than 100 tests, running them locally.")
578       run_networked = False
579
580   if run_networked:
581     runner = network_execution.NetworkedRunner(suites, progress_indicator,
582                                                ctx, peers, workspace)
583   else:
584     runner = execution.Runner(suites, progress_indicator, ctx)
585
586   exit_code = runner.Run(options.j)
587   overall_duration = time.time() - start_time
588
589   if options.time:
590     verbose.PrintTestDurations(suites, overall_duration)
591   return exit_code
592
593
594 if __name__ == "__main__":
595   sys.exit(Main())