1 # Library for JSTest tests.
3 # This contains classes that represent an individual test, including
4 # metadata, and know how to run the tests and determine failures.
6 import datetime, os, re, sys, time
7 from subprocess import *
8 from threading import *
16 # resource module not supported on all platforms
20 resource.setrlimit(resource.RLIMIT_AS, (1*GB, 1*GB))
24 def th_run_cmd(cmd, l):
25 t0 = datetime.datetime.now()
27 # close_fds and preexec_fn are not supported on Windows and will
30 if sys.platform != 'win32':
31 options["close_fds"] = True
32 options["preexec_fn"] = set_limits
33 p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, **options)
36 out, err = p.communicate()
37 t1 = datetime.datetime.now()
39 dt = dd.seconds + 1e-6 * dd.microseconds
40 l[1] = (out, err, p.returncode, dt)
42 def run_cmd(cmd, timeout=60.0):
44 return do_run_cmd(cmd)
47 th = Thread(target=th_run_cmd, args=(cmd, l))
53 # In Python 3, we could just do l[0].kill().
55 if sys.platform != 'win32':
56 os.kill(l[0].pid, signal.SIGKILL)
59 # Expecting a "No such process" error
65 """A runnable test."""
66 def __init__(self, path):
67 self.path = path # str: path of JS file relative to tests root dir
70 def prefix_command(path):
71 """Return the '-f shell.js' options needed to run a test with the given path."""
73 return [ '-f', 'shell.js' ]
74 head, base = os.path.split(path)
75 return Test.prefix_command(head) + [ '-f', os.path.join(path, 'shell.js') ]
77 def get_command(self, js_cmd_prefix):
78 dir, filename = os.path.split(self.path)
79 # There is a test that requires the path to start with './'.
80 return js_cmd_prefix + Test.prefix_command(dir) + [ '-f', './' + self.path ]
82 def run(self, js_cmd_prefix, timeout=30.0):
83 cmd = self.get_command(js_cmd_prefix)
84 out, err, rc, dt = run_cmd(cmd, timeout)
85 return TestOutput(self, cmd, out, err, rc, dt);
88 """A test case consisting of a test and an expected result."""
90 def __init__(self, path, enable, expect, random, slow):
91 Test.__init__(self, path)
92 self.enable = enable # bool: True => run test, False => don't run
93 self.expect = expect # bool: expected result, True => pass
94 self.random = random # bool: True => ignore output as 'random'
95 self.slow = slow # bool: True => test may run slowly
110 """Output from a test run."""
111 def __init__(self, test, cmd, out, err, rc, dt):
112 self.test = test # Test
113 self.cmd = cmd # str: command line of test
114 self.out = out # str: stdout
115 self.err = err # str: stderr
116 self.rc = rc # int: return code
117 self.dt = dt # float: run time
119 class NullTestOutput:
120 """Variant of TestOutput that indicates a test was not run."""
121 def __init__(self, test):
134 """Classified result from a test run."""
135 def __init__(self, test, result, results):
138 self.results = results
141 def from_output(cls, output):
143 result = None # str: overall result, see class-level variables
144 results = [] # (str,str) list: subtest results (pass/fail, message)
146 out, rc = output.out, output.rc
152 if test.path.endswith('-n.js'):
153 expected_rcs.append(3)
155 for line in out.split('\n'):
156 if line.startswith(' FAILED!'):
158 msg = line[len(' FAILED! '):]
159 results.append((cls.FAIL, msg))
160 elif line.startswith(' PASSED!'):
162 msg = line[len(' PASSED! '):]
163 results.append((cls.PASS, msg))
165 m = re.match('--- NOTE: IN THIS TESTCASE, WE EXPECT EXIT CODE ((?:-|\\d)+) ---', line)
167 expected_rcs.append(int(m.group(1)))
169 if rc and not rc in expected_rcs:
175 if (rc or passes > 0) and failures == 0:
180 return cls(test, result, results)