UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
TIMEOUT = ResultCode('TIMEOUT', True)
+SKIPPED = ResultCode('SKIPPED', False)
# Test metric values.
run_tests(filtered_tests, lit_config, opts, len(discovered_tests))
elapsed = time.time() - start
- executed_tests = [t for t in filtered_tests if t.result]
+ # TODO(yln): eventually, all functions below should act on discovered_tests
+ executed_tests = [
+ t for t in filtered_tests if t.result.code != lit.Test.SKIPPED]
if opts.time_tests:
print_histogram(executed_tests)
- print_results(executed_tests, elapsed, opts)
+ print_results(filtered_tests, elapsed, opts)
if opts.output_path:
#TODO(yln): pass in discovered_tests
]
all_codes = [
+ (lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'),
(lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'),
(lit.Test.PASS, 'Expected Passes', ''),
(lit.Test.FLAKYPASS, 'Passes With Retry', ''),
def print_group(code, label, tests, opts):
if not tests:
return
- if code == lit.Test.PASS:
+ # TODO(yln): FLAKYPASS? Make this more consistent!
+ if code in {lit.Test.SKIPPED, lit.Test.PASS}:
return
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
- (lit.Test.UNSUPPORTED == code and not opts.show_unsupported) or \
- (lit.Test.UNRESOLVED == code and (opts.max_failures is not None)):
+ (lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
return
print('*' * 20)
print('%s Tests (%d):' % (label, len(tests)))
Upon completion, each test in the run will have its result
computed. Tests which were not actually executed (for any reason) will
- be given an UNRESOLVED result.
+ be marked SKIPPED.
"""
self.failures = 0
timeout = self.timeout or one_week
deadline = time.time() + timeout
- self._execute(deadline)
-
- # Mark any tests that weren't run as UNRESOLVED.
- for test in self.tests:
- if test.result is None:
- test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
+ try:
+ self._execute(deadline)
+ finally:
+ skipped = lit.Test.Result(lit.Test.SKIPPED)
+ for test in self.tests:
+ if test.result is None:
+ test.setResult(skipped)
def _execute(self, deadline):
self._increase_process_limit()
--- /dev/null
+import lit.formats
+config.name = 'lit-time'
+config.suffixes = ['.txt']
+config.test_format = lit.formats.ShTest()
+config.test_source_root = None
+config.test_exec_root = None
--- /dev/null
+RUN: sleep 60
# Check the behavior of --max-failures option.
#
-# RUN: not %{lit} -j 1 -v %{inputs}/max-failures > %t.out
-# RUN: not %{lit} --max-failures=1 -j 1 -v %{inputs}/max-failures >> %t.out
-# RUN: not %{lit} --max-failures=2 -j 1 -v %{inputs}/max-failures >> %t.out
-# RUN: not %{lit} --max-failures=0 -j 1 -v %{inputs}/max-failures 2>> %t.out
+# RUN: not %{lit} -j 1 %{inputs}/max-failures > %t.out 2>&1
+# RUN: not %{lit} --max-failures=1 -j 1 %{inputs}/max-failures >> %t.out 2>&1
+# RUN: not %{lit} --max-failures=2 -j 1 %{inputs}/max-failures >> %t.out 2>&1
+# RUN: not %{lit} --max-failures=0 -j 1 %{inputs}/max-failures 2>> %t.out
# RUN: FileCheck < %t.out %s
#
# END.
-# CHECK: Failing Tests (35)
-# CHECK: Failing Tests (1)
-# CHECK: Failing Tests (2)
+# CHECK-NOT: reached maximum number of test failures
+# CHECK-NOT: Skipped Tests
+# CHECK: Unexpected Failures: 35
+
+# CHECK: reached maximum number of test failures, skipping remaining tests
+# CHECK: Skipped Tests : 41
+# CHECK: Unexpected Failures: 1
+
+# CHECK: reached maximum number of test failures, skipping remaining tests
+# CHECK: Skipped Tests : 40
+# CHECK: Unexpected Failures: 2
+
# CHECK: error: argument --max-failures: requires positive integer, but found '0'
--- /dev/null
+# Test overall lit timeout (--max-time).
+#
+# RUN: %{lit} %{inputs}/max-time --max-time=1 2>&1 | FileCheck %s
+
+# CHECK: reached timeout, skipping remaining tests
+# CHECK: Skipped Tests : 1
+# CHECK: Expected Passes: 1