From 7ffb5bc2a25bbe81c1048ffd49a6d927a7b50658 Mon Sep 17 00:00:00 2001 From: Julian Lettner Date: Fri, 1 May 2020 09:35:46 -0700 Subject: [PATCH] [lit] Factor out report generators into separate file Factor out the report generators from main.py into reports.py. I verified that we generate the exact same output by running `check-all` and comparing the new and old output for both report flavors. --- llvm/utils/lit/lit/Test.py | 42 -------------- llvm/utils/lit/lit/main.py | 98 +++----------------------------- llvm/utils/lit/lit/reports.py | 128 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 136 insertions(+), 132 deletions(-) create mode 100755 llvm/utils/lit/lit/reports.py diff --git a/llvm/utils/lit/lit/Test.py b/llvm/utils/lit/lit/Test.py index 7bffcd6..6277858 100644 --- a/llvm/utils/lit/lit/Test.py +++ b/llvm/utils/lit/lit/Test.py @@ -368,45 +368,3 @@ class Test: parallelism or where it is desirable to surface their failures early. """ return self.suite.config.is_early - - def writeJUnitXML(self, fil): - """Write the test's report xml representation to a file handle.""" - test_name = quoteattr(self.path_in_suite[-1]) - test_path = self.path_in_suite[:-1] - safe_test_path = [x.replace(".","_") for x in test_path] - safe_name = self.suite.name.replace(".","-") - - if safe_test_path: - class_name = safe_name + "." + "/".join(safe_test_path) - else: - class_name = safe_name + "." + safe_name - class_name = quoteattr(class_name) - testcase_template = '\n\t", "]]]]>")) - fil.write("]]>\n") - elif self.result.code == UNSUPPORTED: - unsupported_features = self.getMissingRequiredFeatures() - if unsupported_features: - skip_message = "Skipping because of: " + ", ".join(unsupported_features) - else: - skip_message = "Skipping because of configuration." - - fil.write(">\n\t\n\n".format(quoteattr(skip_message))) - else: - fil.write("/>") diff --git a/llvm/utils/lit/lit/main.py b/llvm/utils/lit/lit/main.py index bbaeb0d..d155b0d 100755 --- a/llvm/utils/lit/lit/main.py +++ b/llvm/utils/lit/lit/main.py @@ -333,95 +333,13 @@ def print_summary(tests_by_code, quiet, elapsed): def write_test_results(tests, lit_config, elapsed, output_path): - # TODO(yln): audit: unexecuted tests - # Construct the data we will write. - data = {} - # Encode the current lit version as a schema version. - data['__version__'] = lit.__versioninfo__ - data['elapsed'] = elapsed - # FIXME: Record some information on the lit configuration used? - # FIXME: Record information from the individual test suites? - - # Encode the tests. - data['tests'] = tests_data = [] - for test in tests: - test_data = { - 'name' : test.getFullName(), - 'code' : test.result.code.name, - 'output' : test.result.output, - 'elapsed' : test.result.elapsed } - - # Add test metrics, if present. - if test.result.metrics: - test_data['metrics'] = metrics_data = {} - for key, value in test.result.metrics.items(): - metrics_data[key] = value.todata() - - # Report micro-tests separately, if present - if test.result.microResults: - for key, micro_test in test.result.microResults.items(): - # Expand parent test name with micro test name - parent_name = test.getFullName() - micro_full_name = parent_name + ':' + key - - micro_test_data = { - 'name' : micro_full_name, - 'code' : micro_test.code.name, - 'output' : micro_test.output, - 'elapsed' : micro_test.elapsed } - if micro_test.metrics: - micro_test_data['metrics'] = micro_metrics_data = {} - for key, value in micro_test.metrics.items(): - micro_metrics_data[key] = value.todata() - - tests_data.append(micro_test_data) - - tests_data.append(test_data) - - # Write the output. - f = open(output_path, 'w') - try: - import json - json.dump(data, f, indent=2, sort_keys=True) - f.write('\n') - finally: - f.close() + import lit.reports + r = lit.reports.JsonReport(output_path) + r.write_results(tests, elapsed) + def write_test_results_xunit(tests, opts): - # TODO(yln): audit: unexecuted tests - from xml.sax.saxutils import quoteattr - # Collect the tests, indexed by test suite - by_suite = {} - for result_test in tests: - suite = result_test.suite.config.name - if suite not in by_suite: - by_suite[suite] = { - 'passes' : 0, - 'failures' : 0, - 'skipped': 0, - 'tests' : [] } - by_suite[suite]['tests'].append(result_test) - if result_test.isFailure(): - by_suite[suite]['failures'] += 1 - elif result_test.result.code == lit.Test.UNSUPPORTED: - by_suite[suite]['skipped'] += 1 - else: - by_suite[suite]['passes'] += 1 - xunit_output_file = open(opts.xunit_output_file, "w") - xunit_output_file.write("\n") - xunit_output_file.write("\n") - for suite_name, suite in by_suite.items(): - safe_suite_name = quoteattr(suite_name.replace(".", "-")) - xunit_output_file.write("\n") - - for result_test in suite['tests']: - result_test.writeJUnitXML(xunit_output_file) - xunit_output_file.write("\n") - xunit_output_file.write("\n") - xunit_output_file.write("") - xunit_output_file.close() + import lit.reports + r = lit.reports.XunitReport(opts.xunit_output_file) + r.write_results(tests, 0.0) + diff --git a/llvm/utils/lit/lit/reports.py b/llvm/utils/lit/lit/reports.py new file mode 100755 index 0000000..d043e7f7 --- /dev/null +++ b/llvm/utils/lit/lit/reports.py @@ -0,0 +1,128 @@ +import itertools +import json + +from xml.sax.saxutils import quoteattr as quo + +import lit.Test + + +class JsonReport(object): + def __init__(self, output_file): + self.output_file = output_file + + def write_results(self, tests, elapsed): + assert not any(t.result.code in {lit.Test.EXCLUDED, lit.Test.SKIPPED} for t in tests) + # Construct the data we will write. + data = {} + # Encode the current lit version as a schema version. + data['__version__'] = lit.__versioninfo__ + data['elapsed'] = elapsed + # FIXME: Record some information on the lit configuration used? + # FIXME: Record information from the individual test suites? + + # Encode the tests. + data['tests'] = tests_data = [] + for test in tests: + test_data = { + 'name': test.getFullName(), + 'code': test.result.code.name, + 'output': test.result.output, + 'elapsed': test.result.elapsed} + + # Add test metrics, if present. + if test.result.metrics: + test_data['metrics'] = metrics_data = {} + for key, value in test.result.metrics.items(): + metrics_data[key] = value.todata() + + # Report micro-tests separately, if present + if test.result.microResults: + for key, micro_test in test.result.microResults.items(): + # Expand parent test name with micro test name + parent_name = test.getFullName() + micro_full_name = parent_name + ':' + key + + micro_test_data = { + 'name': micro_full_name, + 'code': micro_test.code.name, + 'output': micro_test.output, + 'elapsed': micro_test.elapsed} + if micro_test.metrics: + micro_test_data['metrics'] = micro_metrics_data = {} + for key, value in micro_test.metrics.items(): + micro_metrics_data[key] = value.todata() + + tests_data.append(micro_test_data) + + tests_data.append(test_data) + + with open(self.output_file, 'w') as file: + json.dump(data, file, indent=2, sort_keys=True) + file.write('\n') + + +class XunitReport(object): + def __init__(self, output_file): + self.output_file = output_file + self.skipped_codes = {lit.Test.EXCLUDED, + lit.Test.SKIPPED, lit.Test.UNSUPPORTED} + + # TODO(yln): elapsed unused, put it somewhere? + def write_results(self, tests, elapsed): + assert not any(t.result.code in {lit.Test.EXCLUDED, lit.Test.SKIPPED} for t in tests) + # Suite names are not necessarily unique. Include object identity in + # sort key to avoid mixing tests of different suites. + tests.sort(key=lambda t: (t.suite.name, id(t.suite), t.path_in_suite)) + tests_by_suite = itertools.groupby(tests, lambda t: t.suite) + + with open(self.output_file, 'w') as file: + file.write('\n') + file.write('\n') + for suite, test_iter in tests_by_suite: + self._write_testsuite(file, suite, list(test_iter)) + file.write('\n') + + def _write_testsuite(self, file, suite, tests): + skipped = sum(1 for t in tests if t.result.code in self.skipped_codes) + failures = sum(1 for t in tests if t.isFailure()) + + name = suite.config.name.replace('.', '-') + file.write(f'\n') + for test in tests: + self._write_test(file, test, name) + file.write('\n') + + def _write_test(self, file, test, suite_name): + path = '/'.join(test.path_in_suite[:-1]).replace('.', '_') + class_name = f'{suite_name}.{path or suite_name}' + name = test.path_in_suite[-1] + time = test.result.elapsed or 0.0 + file.write(f'\n\t', ']]]]>') + if isinstance(output, bytes): + output.decode("utf-8", 'ignore') + file.write(output) + file.write(']]>\n\n') + elif test.result.code in self.skipped_codes: + reason = self._get_skip_reason(test) + file.write(f'>\n\t\n\n\n') + else: + file.write('/>\n') + + def _get_skip_reason(self, test): + code = test.result.code + if code == lit.Test.EXCLUDED: + return 'Test not selected (--filter, --max-tests, --run-shard)' + if code == lit.Test.SKIPPED: + return 'User interrupt' + + assert code == lit.Test.UNSUPPORTED + features = test.getMissingRequiredFeatures() + if features: + return 'Skipping because of: ' + ', '.join(features) + return 'Skipping because of configuration.' -- 2.7.4