From e6e8acd4463344d4bf8bbfcec3cd7d3a032af415 Mon Sep 17 00:00:00 2001 From: "machenbach@chromium.org" Date: Mon, 25 Nov 2013 17:34:52 +0000 Subject: [PATCH] Make test runner more flexible for running fast tests. Transformed variant flags into named variants. Now, all combinations of variants can be specified on the command line. The old command-line flags are kept for backwards compatibility on the bots. Added two new test groups: slow and pass|fail. Both are implemented similar to the flaky test feature and allow to either skip or run tests marked as slow or as pass|fail. R=yangguo@chromium.org Review URL: https://codereview.chromium.org/85733003 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@18062 ce2b1a6d-e550-0410-aec6-3dcde31c8c00 --- test/mozilla/mozilla.status | 8 +++--- tools/run-tests.py | 56 +++++++++++++++++++++++++++--------- tools/testrunner/local/statusfile.py | 6 +++- tools/testrunner/local/testsuite.py | 23 +++++++++++++-- 4 files changed, 73 insertions(+), 20 deletions(-) diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status index d5e851c..fdea0a9 100644 --- a/test/mozilla/mozilla.status +++ b/test/mozilla/mozilla.status @@ -81,23 +81,23 @@ # This takes a long time to run (~100 seconds). It should only be run # by the really patient. - 'js1_5/GC/regress-324278': [SLOW], + 'js1_5/GC/regress-324278': [SKIP], # This takes a long time to run because our indexOf operation is # pretty slow - it causes a lot of GCs; see issue # #926379. We could consider marking this SKIP because it takes a # while to run to completion. - 'js1_5/GC/regress-338653': [SLOW], + 'js1_5/GC/regress-338653': [SKIP], # This test is designed to run until it runs out of memory. This takes # a very long time because it builds strings character by character # and compiles a lot of regular expressions. We could consider marking # this SKIP because it takes a while to run to completion. - 'js1_5/GC/regress-346794': [SLOW], + 'js1_5/GC/regress-346794': [SKIP], # Runs out of memory while trying to build huge string of 'x' # characters. This takes a long time to run (~32 seconds). - 'js1_5/GC/regress-348532': [SLOW], + 'js1_5/GC/regress-348532': [SKIP], ##################### FLAKY TESTS ##################### diff --git a/tools/run-tests.py b/tools/run-tests.py index 2fdbeb9..32b3f49 100755 --- a/tools/run-tests.py +++ b/tools/run-tests.py @@ -53,9 +53,13 @@ TIMEOUT_SCALEFACTOR = {"debug" : 4, "release" : 1 } # Use this to run several variants of the tests. -VARIANT_FLAGS = [[], - ["--stress-opt", "--always-opt"], - ["--nocrankshaft"]] +VARIANT_FLAGS = { + "default": [], + "stress": ["--stress-opt", "--always-opt"], + "nocrankshaft": ["--nocrankshaft"]} + +VARIANTS = ["default", "stress", "nocrankshaft"] + MODE_FLAGS = { "debug" : ["--nobreak-on-abort", "--nodead-code-elimination", "--nofold-constants", "--enable-slow-asserts", @@ -97,6 +101,12 @@ def BuildOptions(): result.add_option("--flaky-tests", help="Regard tests marked as flaky (run|skip|dontcare)", default="dontcare") + result.add_option("--slow-tests", + help="Regard slow tests (run|skip|dontcare)", + default="dontcare") + result.add_option("--pass-fail-tests", + help="Regard pass|fail tests (run|skip|dontcare)", + default="dontcare") result.add_option("--command-prefix", help="Prepended to each shell command used to run a test", default="") @@ -128,6 +138,8 @@ def BuildOptions(): result.add_option("--no-variants", "--novariants", help="Don't run any testing variants", default=False, dest="no_variants", action="store_true") + result.add_option("--variants", + help="Comma-separated list of testing variants") result.add_option("--outdir", help="Base directory with compile output", default="out") result.add_option("-p", "--progress", @@ -167,6 +179,7 @@ def BuildOptions(): def ProcessOptions(options): global VARIANT_FLAGS + global VARIANTS # Architecture and mode related stuff. if options.arch_and_mode: @@ -205,26 +218,41 @@ def ProcessOptions(options): """Returns true if zero or one of multiple arguments are true.""" return reduce(lambda x, y: x + y, args) <= 1 - if not excl(options.no_stress, options.stress_only, options.no_variants): - print "Use only one of --no-stress, --stress-only or --no-variants." + if not excl(options.no_stress, options.stress_only, options.no_variants, + bool(options.variants)): + print("Use only one of --no-stress, --stress-only, --no-variants or " + "--variants.") return False if options.no_stress: - VARIANT_FLAGS = [[], ["--nocrankshaft"]] + VARIANTS = ["default", "nocrankshaft"] if options.no_variants: - VARIANT_FLAGS = [[]] + VARIANTS = ["default"] + if options.stress_only: + VARIANTS = ["stress"] + if options.variants: + VARIANTS = options.variants.split(",") + if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()): + print "All variants must be in %s" % str(VARIANT_FLAGS.keys()) + return False if not options.shell_dir: if options.shell: print "Warning: --shell is deprecated, use --shell-dir instead." options.shell_dir = os.path.dirname(options.shell) - if options.stress_only: - VARIANT_FLAGS = [["--stress-opt", "--always-opt"]] if options.valgrind: run_valgrind = os.path.join("tools", "run-valgrind.py") # This is OK for distributed running, so we don't need to set no_network. options.command_prefix = (["python", "-u", run_valgrind] + options.command_prefix) - if not options.flaky_tests in ["run", "skip", "dontcare"]: - print "Unknown flaky test mode %s" % options.flaky_tests + def CheckTestMode(name, option): + if not option in ["run", "skip", "dontcare"]: + print "Unknown %s mode %s" % (name, option) + return False + return True + if not CheckTestMode("flaky test", options.flaky_tests): + return False + if not CheckTestMode("slow test", options.slow_tests): + return False + if not CheckTestMode("pass|fail test", options.pass_fail_tests): return False if not options.no_i18n: DEFAULT_TESTS.append("intl") @@ -341,13 +369,15 @@ def Execute(arch, mode, args, options, suites, workspace): if len(args) > 0: s.FilterTestCasesByArgs(args) all_tests += s.tests - s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests) + s.FilterTestCasesByStatus(options.warn_unused, options.flaky_tests, + options.slow_tests, options.pass_fail_tests) if options.cat: verbose.PrintTestSource(s.tests) continue + variant_flags = [VARIANT_FLAGS[var] for var in VARIANTS] s.tests = [ t.CopyAddingFlags(v) for t in s.tests - for v in s.VariantFlags(t, VARIANT_FLAGS) ] + for v in s.VariantFlags(t, variant_flags) ] s.tests = ShardTests(s.tests, options.shard_count, options.shard_run) num_tests += len(s.tests) for t in s.tests: diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py index da0c797..e290122 100644 --- a/tools/testrunner/local/statusfile.py +++ b/tools/testrunner/local/statusfile.py @@ -59,7 +59,11 @@ for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32", def DoSkip(outcomes): - return SKIP in outcomes or SLOW in outcomes + return SKIP in outcomes + + +def IsSlow(outcomes): + return SLOW in outcomes def OnlyStandardVariant(outcomes): diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py index 8517ce9..ff51196 100644 --- a/tools/testrunner/local/testsuite.py +++ b/tools/testrunner/local/testsuite.py @@ -93,11 +93,24 @@ class TestSuite(object): def _FilterFlaky(flaky, mode): return (mode == "run" and not flaky) or (mode == "skip" and flaky) - def FilterTestCasesByStatus(self, warn_unused_rules, flaky_tests="dontcare"): + @staticmethod + def _FilterSlow(slow, mode): + return (mode == "run" and not slow) or (mode == "skip" and slow) + + @staticmethod + def _FilterPassFail(pass_fail, mode): + return (mode == "run" and not pass_fail) or (mode == "skip" and pass_fail) + + def FilterTestCasesByStatus(self, warn_unused_rules, + flaky_tests="dontcare", + slow_tests="dontcare", + pass_fail_tests="dontcare"): filtered = [] used_rules = set() for t in self.tests: flaky = False + slow = False + pass_fail = False testname = self.CommonTestName(t) if testname in self.rules: used_rules.add(testname) @@ -107,6 +120,8 @@ class TestSuite(object): if statusfile.DoSkip(t.outcomes): continue # Don't add skipped tests to |filtered|. flaky = statusfile.IsFlaky(t.outcomes) + slow = statusfile.IsSlow(t.outcomes) + pass_fail = statusfile.IsPassOrFail(t.outcomes) skip = False for rule in self.wildcards: assert rule[-1] == '*' @@ -117,7 +132,11 @@ class TestSuite(object): skip = True break # "for rule in self.wildcards" flaky = flaky or statusfile.IsFlaky(t.outcomes) - if skip or self._FilterFlaky(flaky, flaky_tests): + slow = slow or statusfile.IsSlow(t.outcomes) + pass_fail = pass_fail or statusfile.IsPassOrFail(t.outcomes) + if (skip or self._FilterFlaky(flaky, flaky_tests) + or self._FilterSlow(slow, slow_tests) + or self._FilterPassFail(pass_fail, pass_fail_tests)): continue # "for t in self.tests" filtered.append(t) self.tests = filtered -- 2.7.4