Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / v8 / tools / run-tests.py
index 41a9fe9..6e9f554 100755 (executable)
@@ -34,6 +34,7 @@ import optparse
 import os
 from os.path import join
 import platform
+import random
 import shlex
 import subprocess
 import sys
@@ -49,7 +50,8 @@ from testrunner.objects import context
 
 
 ARCH_GUESS = utils.DefaultArch()
-DEFAULT_TESTS = ["mjsunit", "cctest", "message", "preparser"]
+DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "base-unittests",
+                 "cctest", "compiler-unittests", "message", "preparser"]
 TIMEOUT_DEFAULT = 60
 TIMEOUT_SCALEFACTOR = {"debug"   : 4,
                        "release" : 1 }
@@ -58,9 +60,10 @@ TIMEOUT_SCALEFACTOR = {"debug"   : 4,
 VARIANT_FLAGS = {
     "default": [],
     "stress": ["--stress-opt", "--always-opt"],
+    "turbofan": ["--turbo-filter=*", "--always-opt"],
     "nocrankshaft": ["--nocrankshaft"]}
 
-VARIANTS = ["default", "stress", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
 
 MODE_FLAGS = {
     "debug"   : ["--nohard-abort", "--nodead-code-elimination",
@@ -75,21 +78,30 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
                    "--concurrent-recompilation"]
 
 SUPPORTED_ARCHS = ["android_arm",
+                   "android_arm64",
                    "android_ia32",
                    "arm",
                    "ia32",
+                   "x87",
+                   "mips",
                    "mipsel",
+                   "mips64el",
                    "nacl_ia32",
                    "nacl_x64",
                    "x64",
+                   "x32",
                    "arm64"]
 # Double the timeout for these:
 SLOW_ARCHS = ["android_arm",
+              "android_arm64",
               "android_ia32",
               "arm",
+              "mips",
               "mipsel",
+              "mips64el",
               "nacl_ia32",
               "nacl_x64",
+              "x87",
               "arm64"]
 
 
@@ -147,6 +159,12 @@ def BuildOptions():
   result.add_option("--no-presubmit", "--nopresubmit",
                     help='Skip presubmit checks',
                     default=False, dest="no_presubmit", action="store_true")
+  result.add_option("--no-snap", "--nosnap",
+                    help='Test a build compiled without snapshot.',
+                    default=False, dest="no_snap", action="store_true")
+  result.add_option("--no-sorting", "--nosorting",
+                    help="Don't sort tests according to duration of last run.",
+                    default=False, dest="no_sorting", action="store_true")
   result.add_option("--no-stress", "--nostress",
                     help="Don't run crankshaft --always-opt --stress-op test",
                     default=False, dest="no_stress", action="store_true")
@@ -157,6 +175,9 @@ def BuildOptions():
                     help="Comma-separated list of testing variants")
   result.add_option("--outdir", help="Base directory with compile output",
                     default="out")
+  result.add_option("--predictable",
+                    help="Compare output of several reruns of each test",
+                    default=False, action="store_true")
   result.add_option("-p", "--progress",
                     help=("The style of progress indicator"
                           " (verbose, dots, color, mono)"),
@@ -165,6 +186,15 @@ def BuildOptions():
                     help=("Quick check mode (skip slow/flaky tests)"))
   result.add_option("--report", help="Print a summary of the tests to be run",
                     default=False, action="store_true")
+  result.add_option("--json-test-results",
+                    help="Path to a file for storing json results.")
+  result.add_option("--rerun-failures-count",
+                    help=("Number of times to rerun each failing test case. "
+                          "Very slow tests will be rerun only once."),
+                    default=0, type="int")
+  result.add_option("--rerun-failures-max",
+                    help="Maximum number of failing test cases to rerun.",
+                    default=100, type="int")
   result.add_option("--shard-count",
                     help="Split testsuites into this number of shards",
                     default=1, type="int")
@@ -185,6 +215,9 @@ def BuildOptions():
                     default=False, action="store_true")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
                     default= -1, type="int")
+  result.add_option("--tsan",
+                    help="Regard test expectations for TSAN",
+                    default=False, action="store_true")
   result.add_option("-v", "--verbose", help="Verbose output",
                     default=False, action="store_true")
   result.add_option("--valgrind", help="Run tests through valgrind",
@@ -195,6 +228,8 @@ def BuildOptions():
   result.add_option("--junittestsuite",
                     help="The testsuite name in the JUnit output file",
                     default="v8tests")
+  result.add_option("--random-seed", default=0, dest="random_seed",
+                    help="Default seed for initializing random generator")
   return result
 
 
@@ -242,9 +277,18 @@ def ProcessOptions(options):
   if options.gc_stress:
     options.extra_flags += GC_STRESS_FLAGS
 
+  if options.asan:
+    options.extra_flags.append("--invoke-weak-callbacks")
+
+  if options.tsan:
+    VARIANTS = ["default"]
+
   if options.j == 0:
     options.j = multiprocessing.cpu_count()
 
+  while options.random_seed == 0:
+    options.random_seed = random.SystemRandom().randint(-2147483648, 2147483647)
+
   def excl(*args):
     """Returns true if zero or one of multiple arguments are true."""
     return reduce(lambda x, y: x + y, args) <= 1
@@ -270,6 +314,11 @@ def ProcessOptions(options):
     options.flaky_tests = "skip"
     options.slow_tests = "skip"
     options.pass_fail_tests = "skip"
+  if options.predictable:
+    VARIANTS = ["default"]
+    options.extra_flags.append("--predictable")
+    options.extra_flags.append("--verify_predictable")
+    options.extra_flags.append("--no-inline-new")
 
   if not options.shell_dir:
     if options.shell:
@@ -323,9 +372,8 @@ def Main():
   workspace = os.path.abspath(join(os.path.dirname(sys.argv[0]), ".."))
   if not options.no_presubmit:
     print ">>> running presubmit tests"
-    code = subprocess.call(
+    exit_code = subprocess.call(
         [sys.executable, join(workspace, "tools", "presubmit.py")])
-    exit_code = code
 
   suite_paths = utils.GetSuitePaths(join(workspace, "test"))
 
@@ -386,12 +434,22 @@ def Execute(arch, mode, args, options, suites, workspace):
       timeout = TIMEOUT_DEFAULT;
 
   timeout *= TIMEOUT_SCALEFACTOR[mode]
+
+  if options.predictable:
+    # Predictable mode is slower.
+    timeout *= 2
+
   ctx = context.Context(arch, mode, shell_dir,
                         mode_flags, options.verbose,
                         timeout, options.isolates,
                         options.command_prefix,
                         options.extra_flags,
-                        options.no_i18n)
+                        options.no_i18n,
+                        options.random_seed,
+                        options.no_sorting,
+                        options.rerun_failures_count,
+                        options.rerun_failures_max,
+                        options.predictable)
 
   # TODO(all): Combine "simulator" and "simulator_run".
   simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -405,9 +463,11 @@ def Execute(arch, mode, args, options, suites, workspace):
     "isolates": options.isolates,
     "mode": mode,
     "no_i18n": options.no_i18n,
+    "no_snap": options.no_snap,
     "simulator_run": simulator_run,
     "simulator": utils.UseSimulator(arch),
     "system": utils.GuessOS(),
+    "tsan": options.tsan,
   }
   all_tests = []
   num_tests = 0
@@ -444,44 +504,42 @@ def Execute(arch, mode, args, options, suites, workspace):
     return 0
 
   # Run the tests, either locally or distributed on the network.
-  try:
-    start_time = time.time()
-    progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
-    if options.junitout:
-      progress_indicator = progress.JUnitTestProgressIndicator(
-          progress_indicator, options.junitout, options.junittestsuite)
-
-    run_networked = not options.no_network
-    if not run_networked:
-      print("Network distribution disabled, running tests locally.")
-    elif utils.GuessOS() != "linux":
-      print("Network distribution is only supported on Linux, sorry!")
+  start_time = time.time()
+  progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+  if options.junitout:
+    progress_indicator = progress.JUnitTestProgressIndicator(
+        progress_indicator, options.junitout, options.junittestsuite)
+  if options.json_test_results:
+    progress_indicator = progress.JsonTestProgressIndicator(
+        progress_indicator, options.json_test_results, arch, mode)
+
+  run_networked = not options.no_network
+  if not run_networked:
+    print("Network distribution disabled, running tests locally.")
+  elif utils.GuessOS() != "linux":
+    print("Network distribution is only supported on Linux, sorry!")
+    run_networked = False
+  peers = []
+  if run_networked:
+    peers = network_execution.GetPeers()
+    if not peers:
+      print("No connection to distribution server; running tests locally.")
       run_networked = False
-    peers = []
-    if run_networked:
-      peers = network_execution.GetPeers()
-      if not peers:
-        print("No connection to distribution server; running tests locally.")
-        run_networked = False
-      elif len(peers) == 1:
-        print("No other peers on the network; running tests locally.")
-        run_networked = False
-      elif num_tests <= 100:
-        print("Less than 100 tests, running them locally.")
-        run_networked = False
-
-    if run_networked:
-      runner = network_execution.NetworkedRunner(suites, progress_indicator,
-                                                 ctx, peers, workspace)
-    else:
-      runner = execution.Runner(suites, progress_indicator, ctx)
-
-    exit_code = runner.Run(options.j)
-    if runner.terminate:
-      return exit_code
-    overall_duration = time.time() - start_time
-  except KeyboardInterrupt:
-    raise
+    elif len(peers) == 1:
+      print("No other peers on the network; running tests locally.")
+      run_networked = False
+    elif num_tests <= 100:
+      print("Less than 100 tests, running them locally.")
+      run_networked = False
+
+  if run_networked:
+    runner = network_execution.NetworkedRunner(suites, progress_indicator,
+                                               ctx, peers, workspace)
+  else:
+    runner = execution.Runner(suites, progress_indicator, ctx)
+
+  exit_code = runner.Run(options.j)
+  overall_duration = time.time() - start_time
 
   if options.time:
     verbose.PrintTestDurations(suites, overall_duration)