There are separated JS and C stacks on simulators so for some stack
extensive tests (like mozilla/js1_5/extensions/regress-355497) might
cause a C stack overflow and that overflow is not caught by V8. It is
not an issue on real HW. Increasing the C stack also solves the problem
but we have already FLAG_sim_stack_size flag to control the JS stack
size.
This patch makes it possible to add flags to tests conditionally in
.status files.
TEST=mozilla/js1_5/extensions/regress-355497
BUG=v8:3152
LOG=N
Review URL: https://codereview.chromium.org/
735723006
Cr-Commit-Position: refs/heads/master@{#25434}
['arch == arm64', {
- # BUG(v8:3152): Runs out of stack in debug mode.
- 'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
-
# BUG(v8:3503): Times out in debug mode.
'js1_5/Regress/regress-280769-2': [PASS, FAIL, ['mode == debug', SKIP]],
}], # 'arch == arm64'
'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
}], # 'arch == mipsel or arch == mips64el'
+['arch == mips64el and simulator_run == True', {
+ 'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
+}],
+
['arch == mips', {
# BUG(3251229): Times out when running new crankshaft test script.
'js1_5/extensions/regress-330569': [SKIP],
'js1_5/extensions/regress-351448': [SKIP],
'js1_5/extensions/regress-336410-1': [SKIP],
+
+ #BUG(3152): Avoid C stack overflow.
+ 'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
}], # 'arch == arm64 and simulator_run == True'
]
# TODO(all): Combine "simulator" and "simulator_run".
simulator_run = not options.dont_skip_simulator_slow_tests and \
- arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
+ arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el'] and \
+ ARCH_GUESS and arch != ARCH_GUESS
# Find available test suites and read test cases from them.
variables = {
"arch": arch,
t.outcomes = self.rules[testname]
if statusfile.DoSkip(t.outcomes):
continue # Don't add skipped tests to |filtered|.
+ for outcome in t.outcomes:
+ if outcome.startswith('Flags: '):
+ t.flags += outcome[7:].split()
flaky = statusfile.IsFlaky(t.outcomes)
slow = statusfile.IsSlow(t.outcomes)
pass_fail = statusfile.IsPassOrFail(t.outcomes)
from . import output
class TestCase(object):
- def __init__(self, suite, path, flags=[], dependency=None):
- self.suite = suite # TestSuite object
- self.path = path # string, e.g. 'div-mod', 'test-api/foo'
- self.flags = flags # list of strings, flags specific to this test case
+ def __init__(self, suite, path, flags=None, dependency=None):
+ self.suite = suite # TestSuite object
+ self.path = path # string, e.g. 'div-mod', 'test-api/foo'
+ self.flags = flags or [] # list of strings, flags specific to this test
self.dependency = dependency # |path| for testcase that must be run first
self.outcomes = None
self.output = None