2 # Copyright 2018 The Chromium Authors
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5 """Reads lines from files or stdin and identifies C++ tests.
7 Outputs a filter that can be used with --gtest_filter or a filter file to
8 run only the tests identified.
12 Outputs filter for all test fixtures in a directory. --class-only avoids an
13 overly long filter string.
14 $ cat components/mycomp/**test.cc | make_gtest_filter.py --class-only
16 Outputs filter for all tests in a file.
17 $ make_gtest_filter.py ./myfile_unittest.cc
19 Outputs filter for only test at line 123
20 $ make_gtest_filter.py --line=123 ./myfile_unittest.cc
22 Formats output as a GTest filter file.
23 $ make_gtest_filter.py ./myfile_unittest.cc --as-filter-file
25 Use a JSON failure summary as the input.
26 $ make_gtest_filter.py summary.json --from-failure-summary
28 Elide the filter list using wildcards when possible.
29 $ make_gtest_filter.py summary.json --from-failure-summary --wildcard-compress
32 from __future__ import print_function
44 # The number of strings which terminated on or underneath this node.
47 # The prefix subtries which follow |this|, keyed by their next character.
51 def PascalCaseSplit(input_string):
55 for current_char in input_string:
56 is_boundary = prev_char != '' and \
57 ((current_char.isupper() and prev_char.islower()) or \
58 (current_char.isalpha() != prev_char.isalpha()) or \
59 (current_char.isalnum() != prev_char.isalnum()))
60 prev_char = current_char
63 yield ''.join(current_term)
66 current_term.append(current_char)
68 if len(current_term) > 0:
69 yield ''.join(current_term)
72 def TrieInsert(trie, value):
73 """Inserts the characters of 'value' into a trie, with every edge representing
74 a single character. An empty child set indicates end-of-string."""
76 for term in PascalCaseSplit(value):
77 trie.num_strings = trie.num_strings + 1
78 if term in trie.children:
79 trie = trie.children[term]
82 trie.children[term] = subtrie
85 trie.num_strings = trie.num_strings + 1
88 def ComputeWildcardsFromTrie(trie, min_depth, min_cases):
89 """Computes a list of wildcarded test case names from a trie using a depth
94 # Stack of values to process, initialized with the root node.
95 # The first item of the tuple is the substring represented by the traversal so
97 # The second item of the tuple is the TrieNode itself.
98 # The third item is the depth of the traversal so far.
99 to_process = [('', trie, 0)]
101 while len(to_process) > 0:
102 cur_prefix, cur_trie, cur_depth = to_process.pop()
103 assert (cur_trie.num_strings != 0)
105 if len(cur_trie.children) == 0:
106 # No more children == we're at the end of a string.
109 elif (cur_depth == min_depth) and \
110 cur_trie.num_strings > min_cases:
111 # Trim traversal of this path if the path is deep enough and there
112 # are enough entries to warrant elision.
113 yield cur_prefix + WILDCARD
116 # Traverse all children of this node.
117 for term, subtrie in cur_trie.children.items():
118 to_process.append((cur_prefix + term, subtrie, cur_depth + 1))
121 def CompressWithWildcards(test_list, min_depth, min_cases):
122 """Given a list of SUITE.CASE names, generates an exclusion list using
123 wildcards to reduce redundancy.
132 # First build up a trie based representations of all test case names,
133 # partitioned per-suite.
134 for case in test_list:
135 suite_name, test = case.split('.')
136 if not suite_name in suite_tries:
137 suite_tries[suite_name] = TrieNode()
138 TrieInsert(suite_tries[suite_name], test)
141 # Go through the suites' tries and generate wildcarded representations
143 for suite in suite_tries.items():
144 suite_name, cases_trie = suite
145 for case_wildcard in ComputeWildcardsFromTrie(cases_trie, min_depth, \
147 output.append("{}.{}".format(suite_name, case_wildcard))
153 def GetFailedTestsFromTestLauncherSummary(summary):
155 for iteration in summary['per_iteration_data']:
156 for case_name, results in iteration.items():
157 for result in results:
158 if result['status'] == 'FAILURE':
159 failures.add(case_name)
160 return list(failures)
163 def GetFiltersForTests(tests, class_only):
164 # Note: Test names have the following structures:
165 # * FixtureName.TestName
166 # * InstantiationName/FixtureName.TestName/## (for TEST_P)
167 # * InstantiationName/FixtureName/ParameterId.TestName (for TYPED_TEST_P)
168 # * FixtureName.TestName/##
169 # * FixtureName/##.TestName (for TYPED_TEST)
170 # Since this script doesn't parse instantiations, we generate filters to
171 # match either regular tests or instantiated tests.
173 fixtures = set([t.split('.')[0] for t in tests])
174 return [c + '.*' for c in fixtures] + \
175 ['*/' + c + '.*/*' for c in fixtures] + \
176 ['*/' + c + '/*.*' for c in fixtures] + \
177 [c + '.*/*' for c in fixtures] + \
178 [c + '/*.*' for c in fixtures]
180 fixtures_and_tcs = [test.split('.', 1) for test in tests]
181 return [c for c in tests] + \
182 ['*/' + c + '/*' for c in tests] + \
183 [c + '/*' for c in tests] + \
184 [fixture + '/*.' + tc for fixture, tc in fixtures_and_tcs]
188 parser = argparse.ArgumentParser()
191 choices=['swarming_summary', 'test_launcher_summary', 'test_file'],
193 parser.add_argument('--output-format',
194 choices=['file', 'args'],
196 parser.add_argument('--wildcard-compress', action='store_true')
198 '--wildcard-min-depth',
201 help="Minimum number of terms in a case before a wildcard may be " +
202 "used, so that prefixes are not excessively broad.")
204 '--wildcard-min-cases',
207 help="Minimum number of cases in a filter before folding into a " +
208 "wildcard, so as to not create wildcards needlessly for small "
209 "numbers of similarly named test failures.")
210 parser.add_argument('--line', type=int)
211 parser.add_argument('--class-only', action='store_true')
215 help='Generate exclusion rules for test cases, instead of inclusions.')
216 args, left = parser.parse_known_args()
219 if args.input_format == 'swarming_summary':
220 # Decode the JSON files separately and combine their contents.
222 for json_file in left:
223 test_filters.extend(json.loads('\n'.join(open(json_file, 'r'))))
225 if args.wildcard_compress:
226 test_filters = CompressWithWildcards(test_filters,
227 args.wildcard_min_depth,
228 args.wildcard_min_cases)
230 elif args.input_format == 'test_launcher_summary':
231 # Decode the JSON files separately and combine their contents.
233 for json_file in left:
235 GetFailedTestsFromTestLauncherSummary(
236 json.loads('\n'.join(open(json_file, 'r')))))
238 if args.wildcard_compress:
239 test_filters = CompressWithWildcards(test_filters,
240 args.wildcard_min_depth,
241 args.wildcard_min_cases)
244 file_input = fileinput.input(left)
246 # If --line is used, restrict text to a few lines around the requested
248 requested_line = args.line
250 for line in file_input:
251 if (fileinput.lineno() >= requested_line
252 and fileinput.lineno() <= requested_line + 1):
253 selected_lines.append(line)
254 txt = ''.join(selected_lines)
256 txt = ''.join(list(file_input))
258 # This regex is not exhaustive, and should be updated as needed.
260 r'^(?:TYPED_)?(?:IN_PROC_BROWSER_)?TEST(_F|_P)?\(\s*(\w+)\s*' + \
262 flags=re.DOTALL | re.M)
264 for m in rx.finditer(txt):
265 tests.append(m.group(2) + '.' + m.group(3))
267 if args.wildcard_compress:
268 test_filters = CompressWithWildcards(tests, args.wildcard_min_depth,
269 args.wildcard_min_cases)
271 test_filters = GetFiltersForTests(tests, args.class_only)
273 if args.as_exclusions:
274 test_filters = ['-' + x for x in test_filters]
276 if args.output_format == 'file':
277 print('\n'.join(test_filters))
279 print(':'.join(test_filters))
284 if __name__ == '__main__':