1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
14 # * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 from webkitpy.layout_tests.models import test_expectations
35 from webkitpy.layout_tests.models import test_failures
38 _log = logging.getLogger(__name__)
42 # This matches what the shell does on POSIX.
43 INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
45 # POSIX limits status codes to 0-255. Normally run-webkit-tests returns the number
46 # of tests that failed. These indicate exceptional conditions triggered by the
47 # script itself, so we count backwards from 255 (aka -1) to enumerate them.
49 # FIXME: crbug.com/357866. We really shouldn't return the number of failures
50 # in the exit code at all.
51 EARLY_EXIT_STATUS = 251
52 SYS_DEPS_EXIT_STATUS = 252
53 NO_TESTS_EXIT_STATUS = 253
54 NO_DEVICES_EXIT_STATUS = 254
55 UNEXPECTED_ERROR_EXIT_STATUS = 255
58 INTERRUPTED_EXIT_STATUS,
62 NO_DEVICES_EXIT_STATUS,
63 UNEXPECTED_ERROR_EXIT_STATUS,
66 # In order to avoid colliding with the above codes, we put a ceiling on
67 # the value returned by num_regressions
68 MAX_FAILURES_EXIT_STATUS = 101
70 class TestRunException(Exception):
71 def __init__(self, code, msg):
76 class TestRunResults(object):
77 def __init__(self, expectations, num_tests):
78 self.total = num_tests
79 self.remaining = self.total
80 self.expectations = expectations
82 self.expected_failures = 0
84 self.unexpected_failures = 0
85 self.unexpected_crashes = 0
86 self.unexpected_timeouts = 0
87 self.tests_by_expectation = {}
88 self.tests_by_timeline = {}
89 self.results_by_name = {} # Map of test name to the last result for the test.
90 self.all_results = [] # All results from a run, including every iteration of every test.
91 self.unexpected_results_by_name = {}
92 self.failures_by_name = {}
93 self.total_failures = 0
94 self.expected_skips = 0
95 for expectation in test_expectations.TestExpectations.EXPECTATIONS.values():
96 self.tests_by_expectation[expectation] = set()
97 for timeline in test_expectations.TestExpectations.TIMELINES.values():
98 self.tests_by_timeline[timeline] = expectations.get_tests_with_timeline(timeline)
99 self.slow_tests = set()
100 self.interrupted = False
101 self.keyboard_interrupted = False
102 self.run_time = 0 # The wall clock time spent running the tests (layout_test_runner.run()).
104 def add(self, test_result, expected, test_is_slow):
105 result_type_for_stats = test_result.type
106 if test_expectations.WONTFIX in self.expectations.model().get_expectations(test_result.test_name):
107 result_type_for_stats = test_expectations.WONTFIX
108 self.tests_by_expectation[result_type_for_stats].add(test_result.test_name)
110 self.results_by_name[test_result.test_name] = test_result
111 if test_result.type != test_expectations.SKIP:
112 self.all_results.append(test_result)
114 if len(test_result.failures):
115 self.total_failures += 1
116 self.failures_by_name[test_result.test_name] = test_result.failures
119 if test_result.type == test_expectations.SKIP:
120 self.expected_skips += 1
121 elif test_result.type != test_expectations.PASS:
122 self.expected_failures += 1
124 self.unexpected_results_by_name[test_result.test_name] = test_result
126 if len(test_result.failures):
127 self.unexpected_failures += 1
128 if test_result.type == test_expectations.CRASH:
129 self.unexpected_crashes += 1
130 elif test_result.type == test_expectations.TIMEOUT:
131 self.unexpected_timeouts += 1
133 self.slow_tests.add(test_result.test_name)
136 class RunDetails(object):
137 def __init__(self, exit_code, summarized_full_results=None, summarized_failing_results=None, initial_results=None, retry_results=None, enabled_pixel_tests_in_retry=False):
138 self.exit_code = exit_code
139 self.summarized_full_results = summarized_full_results
140 self.summarized_failing_results = summarized_failing_results
141 self.initial_results = initial_results
142 self.retry_results = retry_results
143 self.enabled_pixel_tests_in_retry = enabled_pixel_tests_in_retry
146 def _interpret_test_failures(failures):
148 failure_types = [type(failure) for failure in failures]
149 # FIXME: get rid of all this is_* values once there is a 1:1 map between
150 # TestFailure type and test_expectations.EXPECTATION.
151 if test_failures.FailureMissingAudio in failure_types:
152 test_dict['is_missing_audio'] = True
154 if test_failures.FailureMissingResult in failure_types:
155 test_dict['is_missing_text'] = True
157 if test_failures.FailureMissingImage in failure_types or test_failures.FailureMissingImageHash in failure_types:
158 test_dict['is_missing_image'] = True
160 if test_failures.FailureTestHarnessAssertion in failure_types:
161 test_dict['is_testharness_test'] = True
166 def summarize_results(port_obj, expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=False):
167 """Returns a dictionary containing a summary of the test runs, with the following fields:
168 'version': a version indicator
169 'fixable': The number of fixable tests (NOW - PASS)
170 'skipped': The number of skipped tests (NOW & SKIPPED)
171 'num_regressions': The number of non-flaky failures
172 'num_flaky': The number of flaky failures
173 'num_passes': The number of unexpected passes
174 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
177 results['version'] = 3
179 tbe = initial_results.tests_by_expectation
180 tbt = initial_results.tests_by_timeline
181 results['fixable'] = len(tbt[test_expectations.NOW] - tbe[test_expectations.PASS])
182 # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
183 results['skipped'] = len(tbt[test_expectations.NOW] & tbe[test_expectations.SKIP])
189 for expecation_string, expectation_enum in test_expectations.TestExpectations.EXPECTATIONS.iteritems():
190 keywords[expectation_enum] = expecation_string.upper()
192 num_failures_by_type = {}
193 for expectation in initial_results.tests_by_expectation:
194 tests = initial_results.tests_by_expectation[expectation]
195 if expectation != test_expectations.WONTFIX:
196 tests &= tbt[test_expectations.NOW]
197 num_failures_by_type[keywords[expectation]] = len(tests)
198 # The number of failures by type.
199 results['num_failures_by_type'] = num_failures_by_type
203 for test_name, result in initial_results.results_by_name.iteritems():
204 expected = expectations.get_expectations_string(test_name)
205 result_type = result.type
206 actual = [keywords[result_type]]
208 if only_include_failing and result.type == test_expectations.SKIP:
211 if result_type == test_expectations.PASS:
213 if not result.has_stderr and only_include_failing:
215 elif result_type != test_expectations.SKIP and test_name in initial_results.unexpected_results_by_name:
217 if test_name not in retry_results.unexpected_results_by_name:
218 # The test failed unexpectedly at first, but ran as expected the second time -> flaky.
219 actual.extend(expectations.get_expectations_string(test_name).split(" "))
222 retry_result_type = retry_results.unexpected_results_by_name[test_name].type
223 if retry_result_type == test_expectations.PASS:
224 # The test failed unexpectedly at first, then passed unexpectedly -> unexpected pass.
226 if not result.has_stderr and only_include_failing:
229 # The test failed unexpectedly both times -> regression.
231 if not keywords[retry_result_type] in actual:
232 actual.append(keywords[retry_result_type])
234 # The test failed unexpectedly, but we didn't do any retries -> regression.
239 rounded_run_time = round(result.test_run_time, 1)
241 test_dict['time'] = rounded_run_time
243 if result.has_stderr:
244 test_dict['has_stderr'] = True
246 bugs = expectations.model().get_expectation_line(test_name).bugs
248 test_dict['bugs'] = bugs
250 if result.reftest_type:
251 test_dict.update(reftest_type=list(result.reftest_type))
253 test_dict['expected'] = expected
254 test_dict['actual'] = " ".join(actual)
256 def is_expected(actual_result):
257 return expectations.matches_an_expected_result(test_name, result_type,
258 port_obj.get_option('pixel_tests') or result.reftest_type,
259 port_obj.get_option('enable_sanitizer'))
261 # To avoid bloating the output results json too much, only add an entry for whether the failure is unexpected.
262 if not all(is_expected(actual_result) for actual_result in actual):
263 test_dict['is_unexpected'] = True
265 test_dict.update(_interpret_test_failures(result.failures))
268 retry_result = retry_results.unexpected_results_by_name.get(test_name)
270 test_dict.update(_interpret_test_failures(retry_result.failures))
272 if (result.has_repaint_overlay):
273 test_dict['has_repaint_overlay'] = True
275 # Store test hierarchically by directory. e.g.
276 # foo/bar/baz.html: test_dict
277 # foo/bar/baz1.html: test_dict
282 # baz.html: test_dict,
283 # baz1.html: test_dict
286 parts = test_name.split('/')
288 for i, part in enumerate(parts):
289 if i == (len(parts) - 1):
290 current_map[part] = test_dict
292 if part not in current_map:
293 current_map[part] = {}
294 current_map = current_map[part]
296 results['tests'] = tests
297 # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
298 results['num_passes'] = num_passes
299 results['num_flaky'] = num_flaky
300 # FIXME: Remove this. It is redundant with results['num_failures_by_type'].
301 results['num_regressions'] = num_regressions
302 results['interrupted'] = initial_results.interrupted # Does results.html have enough information to compute this itself? (by checking total number of results vs. total number of tests?)
303 results['layout_tests_dir'] = port_obj.layout_tests_dir()
304 results['has_wdiff'] = port_obj.wdiff_available()
305 results['has_pretty_patch'] = port_obj.pretty_patch_available()
306 results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
307 results['seconds_since_epoch'] = int(time.time())
308 results['build_number'] = port_obj.get_option('build_number')
309 results['builder_name'] = port_obj.get_option('builder_name')
311 # Don't do this by default since it takes >100ms.
312 # It's only used for uploading data to the flakiness dashboard.
313 results['chromium_revision'] = ''
314 results['blink_revision'] = ''
315 if port_obj.get_option('builder_name'):
316 for (name, path) in port_obj.repository_paths():
317 scm = port_obj.host.scm_for_path(path)
319 rev = scm.svn_revision(path)
321 results[name.lower() + '_revision'] = rev
323 _log.warn('Failed to determine svn revision for %s, '
324 'leaving "%s_revision" key blank in full_results.json.'