from lit.cl_arguments import TestOrder
use_shuffle = TestOrder(litConfig.order) == TestOrder.RANDOM
shard_env = {
- 'GTEST_COLOR': 'no',
+ 'GTEST_OUTPUT': 'json:' + test.gtest_json_file,
'GTEST_SHUFFLE': '1' if use_shuffle else '0',
'GTEST_TOTAL_SHARDS': total_shards,
- 'GTEST_SHARD_INDEX': shard_idx,
- 'GTEST_OUTPUT': 'json:' + test.gtest_json_file
+ 'GTEST_SHARD_INDEX': shard_idx
}
test.config.environment.update(shard_env)
return lit.Test.PASS, ''
def get_shard_header(shard_env):
- shard_envs = '\n'.join([k + '=' + v for k, v in shard_env.items()])
- return f"Script(shard):\n--\n%s\n%s\n--\n" % (shard_envs, ' '.join(cmd))
+ shard_envs = ' '.join([k + '=' + v for k, v in shard_env.items()])
+ return f"Script(shard):\n--\n%s %s\n--\n" % (shard_envs, ' '.join(cmd))
shard_header = get_shard_header(shard_env)
try:
- _, _, exitCode = lit.util.executeCommand(
+ out, _, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
- timeout=litConfig.maxIndividualTestTime)
- except lit.util.ExecuteCommandTimeoutException:
- return (lit.Test.TIMEOUT, f'{shard_header}Reached timeout of '
- f'{litConfig.maxIndividualTestTime} seconds')
+ timeout=litConfig.maxIndividualTestTime, redirect_stderr=True)
+ except lit.util.ExecuteCommandTimeoutException as e:
+ stream_msg = f"\n{e.out}\n--\nexit: {e.exitCode}\n--\n"
+ return (lit.Test.TIMEOUT, f'{shard_header}{stream_msg}Reached '
+ f'timeout of {litConfig.maxIndividualTestTime} seconds')
if not os.path.exists(test.gtest_json_file):
errmsg = f"shard JSON output does not exist: %s" % (
test.gtest_json_file)
- return lit.Test.FAIL, shard_header + errmsg
+ stream_msg = f"\n{out}\n--\nexit: {exitCode}\n--\n"
+ return lit.Test.FAIL, shard_header + stream_msg + errmsg
if exitCode == 0:
return lit.Test.PASS, ''
+ def get_test_stdout(test_name):
+ res = []
+ header = f'[ RUN ] ' + test_name
+ footer = f'[ FAILED ] ' + test_name
+ in_range = False
+ for l in out.splitlines():
+ if l.startswith(header):
+ in_range = True
+ elif l.startswith(footer):
+ return f'' if len(res) == 0 else '\n'.join(res)
+ elif in_range:
+ res.append(l)
+ assert False, f'gtest did not report the result for ' + test_name
+
with open(test.gtest_json_file, encoding='utf-8') as f:
jf = json.load(f)
' '.join(cmd), testname)
if 'failures' in testinfo:
output += header
+ test_out = get_test_stdout(testname)
+ if test_out:
+ output += test_out + '\n\n'
for fail in testinfo['failures']:
output += fail['failure'] + '\n'
output += '\n'
kUseCloseFDs = not (platform.system() == 'Windows')
-def executeCommand(command, cwd=None, env=None, input=None, timeout=0):
+def executeCommand(command, cwd=None, env=None, input=None, timeout=0,
+ redirect_stderr=False):
"""Execute command ``command`` (list of arguments or string) with.
* working directory ``cwd`` (str), use None to use the current
* Input to the command ``input`` (str), use string to pass
no input.
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
+ * ``redirect_stderr`` (bool), use True if redirect stderr to stdout
Returns a tuple (out, err, exitCode) where
* ``out`` (str) is the standard output of running the command
"""
if input is not None:
input = to_bytes(input)
+ err_out = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
p = subprocess.Popen(command, cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
+ stderr=err_out,
env=env, close_fds=kUseCloseFDs)
timerObject = None
# FIXME: Because of the way nested function scopes work in Python 2.x we
# Ensure the resulting output is always of string type.
out = to_string(out)
- err = to_string(err)
+ err = '' if redirect_stderr else to_string(err)
if hitTimeOut[0]:
raise ExecuteCommandTimeoutException(
}"""
if os.environ['GTEST_SHARD_INDEX'] == '0':
+ print("""\
+[----------] 4 test from FirstTest
+[ RUN ] FirstTest.subTestA
+[ OK ] FirstTest.subTestA (18 ms)
+[ RUN ] FirstTest.subTestB""", flush=True)
+ print('I am about to crash', file=sys.stderr, flush=True)
exit_code = 1
else:
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
if os.environ['GTEST_SHARD_INDEX'] == '0':
+ print('[ RUN ] FirstTest.subTestB', flush=True)
+ print('I am subTest B output', file=sys.stderr, flush=True)
+ print('[ FAILED ] FirstTest.subTestB (8 ms)', flush=True)
+
f.write(output)
exit_code = 1
else:
f.write(output)
exit_code = 0
elif test_name == 'InfiniteLoopSubTest':
+ print('[ RUN ] T.InfiniteLoopSubTest', flush=True)
+ print('some in progess output', file=sys.stderr, flush=True)
while True:
pass
else:
# CHECK: *** TEST 'googletest-crash :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
-# CHECK-NEXT: GTEST_COLOR=no
-# CHECK-NEXT: GTEST_SHUFFLE=0
-# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
-# CHECK-NEXT: GTEST_SHARD_INDEX=0
-# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:.*\.json]]
-# CHECK-NEXT: [[FILE]]
+# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:[^[:space:]]*\.json]] GTEST_SHUFFLE=0 GTEST_TOTAL_SHARDS=6 GTEST_SHARD_INDEX=0 {{.*}}[[FILE]]
+# CHECK-NEXT: --
+# CHECK-EMPTY:
+# CHECK-NEXT: [----------] 4 test from FirstTest
+# CHECK-NEXT: [ RUN ] FirstTest.subTestA
+# CHECK-NEXT: [ OK ] FirstTest.subTestA (18 ms)
+# CHECK-NEXT: [ RUN ] FirstTest.subTestB
+# CHECK-NEXT: I am about to crash
+# CHECK-EMPTY:
+# CHECK-NEXT: --
+# CHECK-NEXT: exit:
# CHECK-NEXT: --
# CHECK-NEXT: shard JSON output does not exist: [[JSON]]
# CHECK-NEXT: ***
# CHECK: *** TEST 'googletest-format :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
-# CHECK-NEXT: GTEST_COLOR=no
-# CHECK-NEXT: GTEST_SHUFFLE=1
-# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
-# CHECK-NEXT: GTEST_SHARD_INDEX=0
-# CHECK-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
-# CHECK-NEXT: GTEST_RANDOM_SEED=123
-# CHECK-NEXT: [[FILE]]
+# CHECK-NEXT: GTEST_OUTPUT=json:{{[^[:space:]]*}} GTEST_SHUFFLE=1 GTEST_TOTAL_SHARDS=6 GTEST_SHARD_INDEX=0 GTEST_RANDOM_SEED=123 {{.*}}[[FILE]]
# CHECK-NEXT: --
# CHECK-EMPTY:
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestB
# CHECK-NEXT: --
+# CHECK-NEXT: I am subTest B output
+# CHECK-EMPTY:
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK-EMPTY:
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
# CHECK-INF: -- Testing:
-# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0/2
+# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest.py]]/0/2
# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/0/2' FAILED ********************
# CHECK-INF-NEXT: Script(shard):
# CHECK-INF-NEXT: --
-# CHECK-INF-NEXT: GTEST_COLOR=no
-# CHECK-INF-NEXT: GTEST_SHUFFLE=0
-# CHECK-INF-NEXT: GTEST_TOTAL_SHARDS=2
-# CHECK-INF-NEXT: GTEST_SHARD_INDEX=0
-# CHECK-INF-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
-# CHECK-INF-NEXT: [[FILE]]
+# CHECK-INF-NEXT: GTEST_OUTPUT=json:{{[^[:space:]]*}} GTEST_SHUFFLE=0 GTEST_TOTAL_SHARDS=2 GTEST_SHARD_INDEX=0 {{.*}}[[FILE]]
+# CHECK-INF-NEXT: --
+# CHECK-INF-EMPTY:
+# CHECK-INF-NEXT: [ RUN ] T.InfiniteLoopSubTest
+# CHECK-INF-NEXT: some in progess output
+# CHECK-INF-EMPTY:
+# CHECK-INF-NEXT: --
+# CHECK-INF-NEXT: exit:
# CHECK-INF-NEXT: --
# CHECK-INF-NEXT: Reached timeout of 1 seconds
# CHECK-INF: Timed Out: 1