-----------------
By default, `lit` will run failing tests first, then run tests in descending
-execution time order to optimize concurrency.
+execution time order to optimize concurrency. The execution order can be
+changed using the :option:`--order` option.
The timing data is stored in the `test_exec_root` in a file named
`.lit_test_times.txt`. If this file does not exist, then `lit` checks the
.. option:: --shuffle
- Run the tests in a random order, not failing/slowest first.
+ Run the tests in a random order, not failing/slowest first. Deprecated,
+ use :option:`--order` instead.
.. option:: --max-failures N
testsuites, for parallel execution on separate machines (say in a large
testing farm).
+.. option:: --order={lexical,random,smart}
+
+ Define the order in which tests are run. The supported values are:
+
+ - lexical - tests will be run in lexical order according to the test file
+ path. This option is useful when predictable test order is desired.
+
+ - random - tests will be run in random order.
+
+ - smart - tests that failed previously will be run first, then the remaining
+ tests, all in descending execution time order. This is the default as it
+ optimizes concurrency.
+
.. option:: --run-shard=N
Select which shard to run, assuming the ``--num-shards=M`` option was
import lit.util
+@enum.unique
class TestOrder(enum.Enum):
- DEFAULT = enum.auto()
- RANDOM = enum.auto()
+ LEXICAL = 'lexical'
+ RANDOM = 'random'
+ SMART = 'smart'
def parse_args():
metavar="N",
help="Maximum time to spend testing (in seconds)",
type=_positive_int)
+ selection_group.add_argument("--order",
+ choices=[x.value for x in TestOrder],
+ default=TestOrder.SMART,
+ help="Test order to use (default: smart)")
selection_group.add_argument("--shuffle",
- help="Run tests in random order",
- action="store_true")
+ dest="order",
+ help="Run tests in random order (DEPRECATED: use --order=random)",
+ action="store_const",
+ const=TestOrder.RANDOM)
selection_group.add_argument("-i", "--incremental",
- help="Run failed tests first (DEPRECATED: now always enabled)",
+ help="Run failed tests first (DEPRECATED: use --order=smart)",
action="store_true")
selection_group.add_argument("--filter",
metavar="REGEX",
if opts.incremental:
print('WARNING: --incremental is deprecated. Failing tests now always run first.')
- if opts.shuffle:
- opts.order = TestOrder.RANDOM
- else:
- opts.order = TestOrder.DEFAULT
-
if opts.numShards or opts.runShard:
if not opts.numShards or not opts.runShard:
parser.error("--num-shards and --run-shard must be used together")
def determine_order(tests, order):
from lit.cl_arguments import TestOrder
- if order == TestOrder.RANDOM:
+ enum_order = TestOrder(order)
+ if enum_order == TestOrder.RANDOM:
import random
random.shuffle(tests)
+ elif enum_order == TestOrder.LEXICAL:
+ tests.sort(key=lambda t: t.getFullName())
else:
- assert order == TestOrder.DEFAULT, 'Unknown TestOrder value'
+ assert enum_order == TestOrder.SMART, 'Unknown TestOrder value'
tests.sort(key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName()))
# only succeed the fourth time it is retried.
#
# RUN: rm -f %t.counter
-# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
+# RUN: %{lit} %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
# CHECK-TEST1: Passed With Retry: 1
# Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
#
# RUN: rm -f %t.counter
-# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
+# RUN: %{lit} %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
# CHECK-TEST2: Passed With Retry: 1
# This test does not succeed within the allowed retry limit
#
-# RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
+# RUN: not %{lit} %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
# CHECK-TEST3: Failed Tests (1):
# CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py
# This test should be UNRESOLVED since it has more than one ALLOW_RETRIES
# lines, and that is not allowed.
#
-# RUN: not %{lit} -j 1 %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s
+# RUN: not %{lit} %{inputs}/allow-retries/more-than-one-allow-retries-lines.py | FileCheck --check-prefix=CHECK-TEST4 %s
# CHECK-TEST4: Unresolved Tests (1):
# CHECK-TEST4: allow-retries :: more-than-one-allow-retries-lines.py
# This test does not provide a valid integer to the ALLOW_RETRIES keyword.
# It should be unresolved.
#
-# RUN: not %{lit} -j 1 %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s
+# RUN: not %{lit} %{inputs}/allow-retries/not-a-valid-integer.py | FileCheck --check-prefix=CHECK-TEST5 %s
# CHECK-TEST5: Unresolved Tests (1):
# CHECK-TEST5: allow-retries :: not-a-valid-integer.py
# when no ALLOW_RETRIES keyword is present.
#
# RUN: rm -f %t.counter
-# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
+# RUN: %{lit} %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
# CHECK-TEST6: Passed With Retry: 1
# UNSUPPORTED: system-windows
# Test lit.main.add_result_category() extension API.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/custom-result-category/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 %{inputs}/custom-result-category | FileCheck %s
+# RUN: not %{lit} %{inputs}/custom-result-category | FileCheck %s
# CHECK: CUSTOM_PASS: custom-result-category :: test1.txt
# CHECK: CUSTOM_FAILURE: custom-result-category :: test2.txt
# Check the basic discovery process, including a sub-suite.
#
# RUN: %{lit} %{inputs}/discovery \
-# RUN: -j 1 --debug --show-tests --show-suites \
+# RUN: --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-BASIC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-BASIC-ERR < %t.err %s
# RUN: %{lit} \
# RUN: %{inputs}/discovery/subdir/test-three.py \
# RUN: %{inputs}/discovery/subsuite/test-one.txt \
-# RUN: -j 1 --show-tests --show-suites -v > %t.out
+# RUN: --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-DIRECT-TEST < %t.out %s
#
# CHECK-DIRECT-TEST: -- Available Tests --
# Check discovery when config files end in .py
# RUN: %{lit} %{inputs}/py-config-discovery \
-# RUN: -j 1 --debug --show-tests --show-suites \
+# RUN: --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-PYCONFIG-ERR < %t.err %s
# Check discovery when using an exec path.
#
# RUN: %{lit} %{inputs}/exec-discovery \
-# RUN: -j 1 --debug --show-tests --show-suites \
+# RUN: --debug --show-tests --show-suites \
# RUN: -v > %t.out 2> %t.err
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-OUT < %t.out %s
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-ERR < %t.err %s
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery/subdir/test-three.py \
-# RUN: -j 1 --show-tests --show-suites -v > %t.out
+# RUN: --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-DIRECT-TEST < %t.out %s
#
# CHECK-ASEXEC-DIRECT-TEST: -- Available Tests --
# indirectly (e.g. when the directory containing the test is specified).
#
# RUN: not %{lit} \
-# RUN: %{inputs}/discovery/test.not-txt -j 1 2>%t.err
+# RUN: %{inputs}/discovery/test.not-txt 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-ERROR-INDIRECT-RUN-CHECK < %t.err %s
#
# CHECK-ERROR-INDIRECT-RUN-CHECK: error: 'top-level-suite :: test.not-txt' would not be run indirectly
# Check that no error is emitted with --no-indirectly-run-check.
#
# RUN: %{lit} \
-# RUN: %{inputs}/discovery/test.not-txt -j 1 --no-indirectly-run-check
+# RUN: %{inputs}/discovery/test.not-txt --no-indirectly-run-check
# Check that a standalone test with no suffixes set is run without any errors.
#
-# RUN: %{lit} %{inputs}/standalone-tests/true.txt -j 1 > %t.out
+# RUN: %{lit} %{inputs}/standalone-tests/true.txt > %t.out
# RUN: FileCheck --check-prefix=CHECK-STANDALONE < %t.out %s
#
# CHECK-STANDALONE: PASS: Standalone tests :: true.txt
# Check that an error is produced if suffixes variable is set for a suite with
# standalone tests.
#
-# RUN: not %{lit} %{inputs}/standalone-tests-with-suffixes -j 1 2> %t.err
+# RUN: not %{lit} %{inputs}/standalone-tests-with-suffixes 2> %t.err
# RUN: FileCheck --check-prefixes=CHECK-STANDALONE-SUFFIXES,CHECK-STANDALONE-DISCOVERY < %t.err %s
#
# CHECK-STANDALONE-SUFFIXES: standalone_tests set {{.*}} but suffixes
# Check that an error is produced if excludes variable is set for a suite with
# standalone tests.
#
-# RUN: not %{lit} %{inputs}/standalone-tests-with-excludes -j 1 2> %t.err
+# RUN: not %{lit} %{inputs}/standalone-tests-with-excludes 2> %t.err
# RUN: FileCheck --check-prefixes=CHECK-STANDALONE-EXCLUDES,CHECK-STANDALONE-DISCOVERY < %t.err %s
#
# CHECK-STANDALONE-EXCLUDES: standalone_tests set {{.*}} but {{.*}} excludes
# Check that no discovery is done for testsuite with standalone tests.
#
-# RUN: not %{lit} %{inputs}/standalone-tests -j 1 2>%t.err
+# RUN: not %{lit} %{inputs}/standalone-tests 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-STANDALONE-DISCOVERY < %t.err %s
#
# CHECK-STANDALONE-DISCOVERY: error: did not discover any tests for provided path(s)
#
# RUN: %{lit} \
# RUN: %{inputs}/exec-discovery-in-tree/obj/ \
-# RUN: -j 1 --show-tests --show-suites -v > %t.out
+# RUN: --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# Try it again after cd'ing into the test suite using a short relative path.
#
# RUN: cd %{inputs}/exec-discovery-in-tree/obj/
# RUN: %{lit} . \
-# RUN: -j 1 --show-tests --show-suites -v > %t.out
+# RUN: --show-tests --show-suites -v > %t.out
# RUN: FileCheck --check-prefix=CHECK-ASEXEC-INTREE < %t.out %s
#
# CHECK-ASEXEC-INTREE: exec-discovery-in-tree-suite - 1 tests
# Check for correct error message when discovery of tests fails.
#
-# RUN: not %{lit} -j 1 -v %{inputs}/googletest-discovery-failed > %t.cmd.out
+# RUN: not %{lit} -v %{inputs}/googletest-discovery-failed > %t.cmd.out
# RUN: FileCheck < %t.cmd.out %s
# Check the various features of the GoogleTest format.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/googletest-format/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -v %{inputs}/googletest-format > %t.out
+# RUN: not %{lit} -v %{inputs}/googletest-format > %t.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.out
# Check that the per test timeout is enforced when running GTest tests.
#
-# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
+# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
-# RUN: not %{lit} -j 1 -v %{inputs}/googletest-timeout \
+# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: > %t.cfgset.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
# 3600 second timeout.
###############################################################################
-# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout \
+# RUN: %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
-# RUN: %{lit} -j 1 -v %{inputs}/googletest-timeout --filter=QuickSubTest \
+# RUN: %{lit} -v %{inputs}/googletest-timeout --filter=QuickSubTest \
# RUN: --param set_timeout=1 --timeout=3600 \
# RUN: > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s
# Check the various features of the GoogleTest format.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/googletest-upstream-format/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -v %{inputs}/googletest-upstream-format > %t.out
+# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# Check that --ignore-fail produces exit status 0 despite various kinds of
# test failures but doesn't otherwise suppress those failures.
-# RUN: not %{lit} -j 1 %{inputs}/ignore-fail | FileCheck %s
-# RUN: %{lit} -j 1 --ignore-fail %{inputs}/ignore-fail | FileCheck %s
+# RUN: not %{lit} %{inputs}/ignore-fail | FileCheck %s
+# RUN: %{lit} --ignore-fail %{inputs}/ignore-fail | FileCheck %s
# END.
# Check cases where LIT_OPTS has no effect.
#
-# RUN: %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
-# RUN: env LIT_OPTS= %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
-# RUN: env LIT_OPTS=-s %{lit} -j 1 -s %{inputs}/lit-opts | FileCheck %s
+# RUN: %{lit} -s %{inputs}/lit-opts | FileCheck %s
+# RUN: env LIT_OPTS= %{lit} -s %{inputs}/lit-opts | FileCheck %s
+# RUN: env LIT_OPTS=-s %{lit} -s %{inputs}/lit-opts | FileCheck %s
# Check that LIT_OPTS can override command-line options.
#
# RUN: env LIT_OPTS=-a \
-# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
+# RUN: %{lit} -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR= %s
# Check that LIT_OPTS understands multiple options with arbitrary spacing.
#
# RUN: env LIT_OPTS='-a -v -Dvar=foobar' \
-# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
+# RUN: %{lit} -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR=foobar %s
# Check that LIT_OPTS parses shell-like quotes and escapes.
#
# RUN: env LIT_OPTS='-a -v -Dvar="foo bar"\ baz' \
-# RUN: %{lit} -j 1 -s %{inputs}/lit-opts \
+# RUN: %{lit} -s %{inputs}/lit-opts \
# RUN: | FileCheck -check-prefix=SHOW-ALL -DVAR="foo bar baz" %s
# CHECK: Testing: 1 tests
# suites in %{inputs}. This test suite's results are then determined in part
# by %{lit}'s textual output, which includes the output of FileCheck calls
# within %{inputs}'s test suites. Thus, %{lit} clears environment variables
-# that can affect FileCheck's output.
+# that can affect FileCheck's output. It also includes "--order=lexical -j1"
+# to ensure predictable test order, as it is often required for FileCheck
+# matches.
config.substitutions.append(('%{inputs}', os.path.join(
config.test_source_root, 'Inputs')))
-config.substitutions.append(('%{lit}',
- "{env} %{{python}} {lit}".format(
+config.substitutions.append(('%{lit}', '%{lit-no-order-opt} --order=lexical'))
+config.substitutions.append(('%{lit-no-order-opt}',
+ "{env} %{{python}} {lit} -j1".format(
env="env -u FILECHECK_OPTS",
lit=os.path.join(lit_path, 'lit.py'))))
config.substitutions.append(('%{python}', '"%s"' % (sys.executable)))
# Check the behavior of --max-failures option.
#
-# RUN: not %{lit} -j 1 %{inputs}/max-failures > %t.out 2>&1
-# RUN: not %{lit} --max-failures=1 -j 1 %{inputs}/max-failures >> %t.out 2>&1
-# RUN: not %{lit} --max-failures=2 -j 1 %{inputs}/max-failures >> %t.out 2>&1
-# RUN: not %{lit} --max-failures=0 -j 1 %{inputs}/max-failures 2>> %t.out
+# RUN: not %{lit} %{inputs}/max-failures > %t.out 2>&1
+# RUN: not %{lit} --max-failures=1 %{inputs}/max-failures >> %t.out 2>&1
+# RUN: not %{lit} --max-failures=2 %{inputs}/max-failures >> %t.out 2>&1
+# RUN: not %{lit} --max-failures=0 %{inputs}/max-failures 2>> %t.out
# RUN: FileCheck < %t.out %s
#
# Check the simple progress bar.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/progress-bar/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -s %{inputs}/progress-bar > %t.out
+# RUN: not %{lit} -s %{inputs}/progress-bar > %t.out
# RUN: FileCheck < %t.out %s
#
# CHECK: Testing:
## Check that we can reorder test runs.
# RUN: cp %{inputs}/reorder/.lit_test_times.txt %{inputs}/reorder/.lit_test_times.txt.orig
-# RUN: %{lit} -j1 %{inputs}/reorder > %t.out
+# RUN: %{lit-no-order-opt} %{inputs}/reorder > %t.out
# RUN: cp %{inputs}/reorder/.lit_test_times.txt %{inputs}/reorder/.lit_test_times.txt.new
# RUN: cp %{inputs}/reorder/.lit_test_times.txt.orig %{inputs}/reorder/.lit_test_times.txt
# RUN: not diff %{inputs}/reorder/.lit_test_times.txt.new %{inputs}/reorder/.lit_test_times.txt.orig
# Check the env command
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-env/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -a -v %{inputs}/shtest-env \
+# RUN: not %{lit} -a -v %{inputs}/shtest-env \
# RUN: | FileCheck -match-full-lines %s
#
# END.
# and is not installed under PATH by default.
# UNSUPPORTED: system-aix
#
-# RUN: %{lit} -j 1 -v %{inputs}/shtest-format-argv0 | FileCheck %s
+# RUN: %{lit} -v %{inputs}/shtest-format-argv0 | FileCheck %s
# CHECK: -- Testing:
# CHECK: PASS: shtest-format-argv0 :: argv0.txt
# Check the various features of the ShTest format.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-format/.lit_test_times.txt
-
# RUN: rm -f %t.xml
-# RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out
+# RUN: not %{lit} -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out
# RUN: FileCheck < %t.out %s
# RUN: FileCheck --check-prefix=XUNIT < %t.xml %s
# Check that we can inject commands at the beginning of a ShTest.
-# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-empty.txt --show-all | FileCheck --check-prefix=CHECK-TEST1 %s
+# RUN: %{lit} %{inputs}/shtest-inject/test-empty.txt --show-all | FileCheck --check-prefix=CHECK-TEST1 %s
#
# CHECK-TEST1: Script:
# CHECK-TEST1: --
#
# CHECK-TEST1: Passed: 1
-# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
+# RUN: %{lit} %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
#
# CHECK-TEST2: Script:
# CHECK-TEST2: --
#
# CHECK-TEST2: Passed: 1
-# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
+# RUN: %{lit} %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
#
# CHECK-TEST3: Script:
# CHECK-TEST3: --
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-keyword-parse-errors/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-keyword-parse-errors > %t.out
+# RUN: not %{lit} -vv %{inputs}/shtest-keyword-parse-errors > %t.out
# RUN: FileCheck -input-file %t.out %s
#
# END.
# Check the not command
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-not/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -a -v %{inputs}/shtest-not \
+# RUN: not %{lit} -a -v %{inputs}/shtest-not \
# RUN: | FileCheck -match-full-lines %s
#
# END.
# Check the various features of the ShTest format.
#
-# RUN: not %{lit} -j 1 -v %{inputs}/shtest-output-printing > %t.out
+# RUN: not %{lit} -v %{inputs}/shtest-output-printing > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# Check that the config.recursiveExpansionLimit is picked up and will cause
# lit substitutions to be expanded recursively.
-# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/substitutes-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST1 %s
+# RUN: %{lit} %{inputs}/shtest-recursive-substitution/substitutes-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST1 %s
# CHECK-TEST1: PASS: substitutes-within-limit :: test.py
# CHECK-TEST1: $ "echo" "STOP"
-# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/does-not-substitute-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
+# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/does-not-substitute-within-limit --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
# CHECK-TEST2: UNRESOLVED: does-not-substitute-within-limit :: test.py
# CHECK-TEST2: ValueError: Recursive substitution of
-# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/does-not-substitute-no-limit --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
+# RUN: %{lit} %{inputs}/shtest-recursive-substitution/does-not-substitute-no-limit --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
# CHECK-TEST3: PASS: does-not-substitute-no-limit :: test.py
# CHECK-TEST3: $ "echo" "%rec4"
-# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/not-an-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST4 %s
+# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/not-an-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST4 %s
# CHECK-TEST4: recursiveExpansionLimit must be either None or an integer
-# RUN: not %{lit} -j 1 %{inputs}/shtest-recursive-substitution/negative-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST5 %s
+# RUN: not %{lit} %{inputs}/shtest-recursive-substitution/negative-integer --show-all 2>&1 | FileCheck --check-prefix=CHECK-TEST5 %s
# CHECK-TEST5: recursiveExpansionLimit must be a non-negative integer
-# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/set-to-none --show-all | FileCheck --check-prefix=CHECK-TEST6 %s
+# RUN: %{lit} %{inputs}/shtest-recursive-substitution/set-to-none --show-all | FileCheck --check-prefix=CHECK-TEST6 %s
# CHECK-TEST6: PASS: set-to-none :: test.py
-# RUN: %{lit} -j 1 %{inputs}/shtest-recursive-substitution/escaping --show-all | FileCheck --check-prefix=CHECK-TEST7 %s
+# RUN: %{lit} %{inputs}/shtest-recursive-substitution/escaping --show-all | FileCheck --check-prefix=CHECK-TEST7 %s
# CHECK-TEST7: PASS: escaping :: test.py
# CHECK-TEST7: $ "echo" "%s" "%s" "%%s"
# Check that -vv makes the line number of the failing RUN command clear.
# (-v is actually sufficient in the case of the internal shell.)
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-run-at-line/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -vv %{inputs}/shtest-run-at-line > %t.out
+# RUN: not %{lit} -vv %{inputs}/shtest-run-at-line > %t.out
# RUN: FileCheck --input-file %t.out %s
#
# END.
# Check the internal shell handling component of the ShTest format.
-# FIXME: this test depends on order of tests
-# RUN: rm -f %{inputs}/shtest-shell/.lit_test_times.txt
-
-# RUN: not %{lit} -j 1 -v %{inputs}/shtest-shell > %t.out
+# RUN: not %{lit} -v %{inputs}/shtest-shell > %t.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.out
#
# Test again in non-UTF shell to catch potential errors with python 2 seen
# on stdout-encoding.txt
-# FIXME: lit's testing sets source_root == exec_root which complicates running lit more than once per test.
-# RUN: rm -f %{inputs}/shtest-shell/.lit_test_times.txt
-# RUN: env PYTHONIOENCODING=ascii not %{lit} -j 1 -a %{inputs}/shtest-shell > %t.ascii.out
+# RUN: env PYTHONIOENCODING=ascii not %{lit} -a %{inputs}/shtest-shell > %t.ascii.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.ascii.out
# Test features related to formats which support reporting additional test data.
# and multiple test results.
-# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s
+# RUN: %{lit} -v %{inputs}/test-data-micro | FileCheck %s
# CHECK: -- Testing:
# Test features related to formats which support reporting additional test data.
-# RUN: %{lit} -j 1 -v %{inputs}/test-data > %t.out
+# RUN: %{lit} -v %{inputs}/test-data > %t.out
# RUN: FileCheck < %t.out %s
# CHECK: -- Testing:
-# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out
+# RUN: %{lit} -v %{inputs}/test-data-micro --output %t.results.out
# RUN: FileCheck < %t.results.out %s
# RUN: rm %t.results.out
-# RUN: %{lit} -j 1 -v %{inputs}/test-data --output %t.results.out > %t.out
+# RUN: %{lit} -v %{inputs}/test-data --output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# CHECK: {