1 # Copyright (C) 2010, 2012 Google Inc. All rights reserved.
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
7 # * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 # * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
13 # * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 """Package that handles non-debug, non-file output for run-webkit-tests."""
34 from webkitpy.tool import grammar
35 from webkitpy.layout_tests.models import test_expectations
36 from webkitpy.layout_tests.models.test_expectations import TestExpectations, TestExpectationParser
37 from webkitpy.layout_tests.views.metered_stream import MeteredStream
40 NUM_SLOW_TESTS_TO_LOG = 10
45 optparse.make_option('-q', '--quiet', action='store_true', default=False,
46 help='run quietly (errors, warnings, and progress only)'),
47 optparse.make_option('--timing', action='store_true', default=False,
48 help='display test times (summary plus per-test w/ --verbose)'),
49 optparse.make_option('-v', '--verbose', action='store_true', default=False,
50 help='print a summarized result for every test (one line per test)'),
51 optparse.make_option('--details', action='store_true', default=False,
52 help='print detailed results for every test'),
53 optparse.make_option('--debug-rwt-logging', action='store_true', default=False,
54 help='print timestamps and debug information for run-webkit-tests itself'),
58 class Printer(object):
59 """Class handling all non-debug-logging printing done by run-webkit-tests."""
61 def __init__(self, port, options, regular_output, logger=None):
62 self.num_completed = 0
65 self._options = options
66 self._meter = MeteredStream(regular_output, options.debug_rwt_logging, logger=logger,
67 number_of_columns=self._port.host.platform.terminal_width())
68 self._running_tests = []
69 self._completed_tests = []
77 def print_config(self, results_directory):
78 self._print_default("Using port '%s'" % self._port.name())
79 self._print_default("Test configuration: %s" % self._port.test_configuration())
80 self._print_default("View the test results at file://%s/results.html" % results_directory)
82 if self._options.enable_versioned_results:
83 self._print_default("View the archived results dashboard at file://%s/dashboard.html" % results_directory)
85 # FIXME: should these options be in printing_options?
86 if self._options.new_baseline:
87 self._print_default("Placing new baselines in %s" % self._port.baseline_path())
89 fs = self._port.host.filesystem
90 fallback_path = [fs.split(x)[1] for x in self._port.baseline_search_path()]
91 self._print_default("Baseline search path: %s -> generic" % " -> ".join(fallback_path))
93 self._print_default("Using %s build" % self._options.configuration)
94 if self._options.pixel_tests:
95 self._print_default("Pixel tests enabled")
97 self._print_default("Pixel tests disabled")
99 self._print_default("Regular timeout: %s, slow test timeout: %s" %
100 (self._options.time_out_ms, self._options.slow_time_out_ms))
102 self._print_default('Command line: ' + ' '.join(self._port.driver_cmd_line()))
103 self._print_default('')
105 def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations):
106 found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_to_run)
107 if repeat_each * iterations > 1:
108 found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations)
109 found_str += ', skipping %d' % (num_all_test_files - num_to_run)
110 self._print_default(found_str + '.')
112 def print_expected(self, run_results, tests_with_result_type_callback):
113 self._print_expected_results_of_type(run_results, test_expectations.PASS, "passes", tests_with_result_type_callback)
114 self._print_expected_results_of_type(run_results, test_expectations.FAIL, "failures", tests_with_result_type_callback)
115 self._print_expected_results_of_type(run_results, test_expectations.FLAKY, "flaky", tests_with_result_type_callback)
116 self._print_debug('')
118 def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards):
119 driver_name = self._port.driver_name()
121 self._print_default("Running 1 %s." % driver_name)
122 self._print_debug("(%s)." % grammar.pluralize('shard', num_shards))
124 self._print_default("Running %d %ss in parallel." % (num_workers, driver_name))
125 self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards))
126 self._print_default('')
128 def _print_expected_results_of_type(self, run_results, result_type, result_type_str, tests_with_result_type_callback):
129 tests = tests_with_result_type_callback(result_type)
130 now = run_results.tests_by_timeline[test_expectations.NOW]
131 wontfix = run_results.tests_by_timeline[test_expectations.WONTFIX]
133 # We use a fancy format string in order to print the data out in a
134 # nicely-aligned table.
135 fmtstr = ("Expect: %%5d %%-8s (%%%dd now, %%%dd wontfix)"
136 % (self._num_digits(now), self._num_digits(wontfix)))
137 self._print_debug(fmtstr % (len(tests), result_type_str, len(tests & now), len(tests & wontfix)))
139 def _num_digits(self, num):
142 ndigits = int(math.log10(len(num))) + 1
145 def print_results(self, run_time, run_results, summarized_results):
146 self._print_timing_statistics(run_time, run_results)
147 self._print_one_line_summary(run_time, run_results)
149 def _print_timing_statistics(self, total_time, run_results):
150 self._print_debug("Test timing:")
151 self._print_debug(" %6.2f total testing time" % total_time)
152 self._print_debug("")
154 self._print_worker_statistics(run_results, int(self._options.child_processes))
155 self._print_aggregate_test_statistics(run_results)
156 self._print_individual_test_times(run_results)
157 self._print_directory_timings(run_results)
159 def _print_worker_statistics(self, run_results, num_workers):
160 self._print_debug("Thread timing:")
163 for result in run_results.results_by_name.values():
164 stats.setdefault(result.worker_name, {'num_tests': 0, 'total_time': 0})
165 stats[result.worker_name]['num_tests'] += 1
166 stats[result.worker_name]['total_time'] += result.total_run_time
167 cuml_time += result.total_run_time
169 for worker_name in stats:
170 self._print_debug(" %10s: %5d tests, %6.2f secs" % (worker_name, stats[worker_name]['num_tests'], stats[worker_name]['total_time']))
171 self._print_debug(" %6.2f cumulative, %6.2f optimal" % (cuml_time, cuml_time / num_workers))
172 self._print_debug("")
174 def _print_aggregate_test_statistics(self, run_results):
175 times_for_dump_render_tree = [result.test_run_time for result in run_results.results_by_name.values()]
176 self._print_statistics_for_test_timings("PER TEST TIME IN TESTSHELL (seconds):", times_for_dump_render_tree)
178 def _print_individual_test_times(self, run_results):
179 # Reverse-sort by the time spent in the driver.
181 individual_test_timings = sorted(run_results.results_by_name.values(), key=lambda result: result.test_run_time, reverse=True)
184 timeout_or_crash_tests = []
185 unexpected_slow_tests = []
186 for test_tuple in individual_test_timings:
187 test_name = test_tuple.test_name
188 is_timeout_crash_or_slow = False
189 if test_name in run_results.slow_tests:
190 is_timeout_crash_or_slow = True
191 slow_tests.append(test_tuple)
193 if test_name in run_results.failures_by_name:
194 result = run_results.results_by_name[test_name].type
195 if (result == test_expectations.TIMEOUT or
196 result == test_expectations.CRASH):
197 is_timeout_crash_or_slow = True
198 timeout_or_crash_tests.append(test_tuple)
200 if (not is_timeout_crash_or_slow and num_printed < NUM_SLOW_TESTS_TO_LOG):
201 num_printed = num_printed + 1
202 unexpected_slow_tests.append(test_tuple)
204 self._print_debug("")
205 if unexpected_slow_tests:
206 self._print_test_list_timing("%s slowest tests that are not marked as SLOW and did not timeout/crash:" %
207 NUM_SLOW_TESTS_TO_LOG, unexpected_slow_tests)
208 self._print_debug("")
211 self._print_test_list_timing("Tests marked as SLOW:", slow_tests)
212 self._print_debug("")
214 if timeout_or_crash_tests:
215 self._print_test_list_timing("Tests that timed out or crashed:", timeout_or_crash_tests)
216 self._print_debug("")
218 def _print_test_list_timing(self, title, test_list):
219 self._print_debug(title)
220 for test_tuple in test_list:
221 test_run_time = round(test_tuple.test_run_time, 1)
222 self._print_debug(" %s took %s seconds" % (test_tuple.test_name, test_run_time))
224 def _print_directory_timings(self, run_results):
226 for result in run_results.results_by_name.values():
227 stats.setdefault(result.shard_name, {'num_tests': 0, 'total_time': 0})
228 stats[result.shard_name]['num_tests'] += 1
229 stats[result.shard_name]['total_time'] += result.total_run_time
231 min_seconds_to_print = 15
234 for directory in stats:
235 rounded_time = round(stats[directory]['total_time'], 1)
236 if rounded_time > min_seconds_to_print:
237 timings.append((directory, rounded_time, stats[directory]['num_tests']))
244 self._print_debug("Time to process slowest subdirectories:")
245 for timing in timings:
246 self._print_debug(" %s took %s seconds to run %s tests." % timing)
247 self._print_debug("")
249 def _print_statistics_for_test_timings(self, title, timings):
250 self._print_debug(title)
253 num_tests = len(timings)
256 percentile90 = timings[int(.9 * num_tests)]
257 percentile99 = timings[int(.99 * num_tests)]
259 if num_tests % 2 == 1:
260 median = timings[((num_tests - 1) / 2) - 1]
262 lower = timings[num_tests / 2 - 1]
263 upper = timings[num_tests / 2]
264 median = (float(lower + upper)) / 2
266 mean = sum(timings) / num_tests
268 for timing in timings:
269 sum_of_deviations = math.pow(timing - mean, 2)
271 std_deviation = math.sqrt(sum_of_deviations / num_tests)
272 self._print_debug(" Median: %6.3f" % median)
273 self._print_debug(" Mean: %6.3f" % mean)
274 self._print_debug(" 90th percentile: %6.3f" % percentile90)
275 self._print_debug(" 99th percentile: %6.3f" % percentile99)
276 self._print_debug(" Standard dev: %6.3f" % std_deviation)
277 self._print_debug("")
279 def _print_one_line_summary(self, total_time, run_results):
280 if self._options.timing:
281 parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values())
283 # There is serial overhead in layout_test_runner.run() that we can't easily account for when
284 # really running in parallel, but taking the min() ensures that in the worst case
285 # (if parallel time is less than run_time) we do account for it.
286 serial_time = total_time - min(run_results.run_time, parallel_time)
288 speedup = (parallel_time + serial_time) / total_time
289 timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup)
293 total = run_results.total - run_results.expected_skips
294 expected = run_results.expected - run_results.expected_skips
295 unexpected = run_results.unexpected
296 incomplete = total - expected - unexpected
299 self._print_default("")
300 incomplete_str = " (%d didn't run)" % incomplete
302 if self._options.verbose or self._options.debug_rwt_logging or unexpected:
305 expected_summary_str = ''
306 if run_results.expected_failures > 0:
307 expected_summary_str = " (%d passed, %d didn't)" % (expected - run_results.expected_failures, run_results.expected_failures)
311 if expected == total:
313 summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary)
315 summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary)
317 summary = "%s ran as expected%s%s%s." % (grammar.pluralize('test', expected), expected_summary_str, incomplete_str, timing_summary)
319 summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary)
321 self._print_quiet(summary)
322 self._print_quiet("")
324 def _test_status_line(self, test_name, suffix):
325 format_string = '[%d/%d] %s%s'
326 status_line = format_string % (self.num_completed, self.num_tests, test_name, suffix)
327 if len(status_line) > self._meter.number_of_columns():
328 overflow_columns = len(status_line) - self._meter.number_of_columns()
330 if len(test_name) < overflow_columns + len(ellipsis) + 2:
331 # We don't have enough space even if we elide, just show the test filename.
332 fs = self._port.host.filesystem
333 test_name = fs.split(test_name)[1]
335 new_length = len(test_name) - overflow_columns - len(ellipsis)
336 prefix = int(new_length / 2)
337 test_name = test_name[:prefix] + ellipsis + test_name[-(new_length - prefix):]
338 return format_string % (self.num_completed, self.num_tests, test_name, suffix)
340 def print_started_test(self, test_name):
341 self._running_tests.append(test_name)
342 if len(self._running_tests) > 1:
343 suffix = ' (+%d)' % (len(self._running_tests) - 1)
346 if self._options.verbose:
347 write = self._meter.write_update
349 write = self._meter.write_throttled_update
350 write(self._test_status_line(test_name, suffix))
352 def print_finished_test(self, result, expected, exp_str, got_str):
353 self.num_completed += 1
354 test_name = result.test_name
356 result_message = self._result_message(result.type, result.failures, expected,
357 self._options.timing, result.test_run_time)
359 if self._options.details:
360 self._print_test_trace(result, exp_str, got_str)
361 elif self._options.verbose or not expected:
362 self.writeln(self._test_status_line(test_name, result_message))
363 elif self.num_completed == self.num_tests:
364 self._meter.write_update('')
366 if test_name == self._running_tests[0]:
367 self._completed_tests.insert(0, [test_name, result_message])
369 self._completed_tests.append([test_name, result_message])
371 for test_name, result_message in self._completed_tests:
372 self._meter.write_throttled_update(self._test_status_line(test_name, result_message))
373 self._completed_tests = []
374 self._running_tests.remove(test_name)
376 def _result_message(self, result_type, failures, expected, timing, test_run_time):
377 exp_string = ' unexpectedly' if not expected else ''
378 timing_string = ' %.4fs' % test_run_time if timing else ''
379 if result_type == test_expectations.PASS:
380 return ' passed%s%s' % (exp_string, timing_string)
382 return ' failed%s (%s)%s' % (exp_string, ', '.join(failure.message() for failure in failures), timing_string)
384 def _print_test_trace(self, result, exp_str, got_str):
385 test_name = result.test_name
386 self._print_default(self._test_status_line(test_name, ''))
388 base = self._port.lookup_virtual_test_base(test_name)
390 args = ' '.join(self._port.lookup_virtual_test_args(test_name))
391 self._print_default(' base: %s' % base)
392 self._print_default(' args: %s' % args)
394 references = self._port.reference_files(test_name)
396 for _, filename in references:
397 self._print_default(' ref: %s' % self._port.relative_test_filename(filename))
399 for extension in ('.txt', '.png', '.wav'):
400 self._print_baseline(test_name, extension)
402 self._print_default(' exp: %s' % exp_str)
403 self._print_default(' got: %s' % got_str)
404 self._print_default(' took: %-.3f' % result.test_run_time)
405 self._print_default('')
407 def _print_baseline(self, test_name, extension):
408 baseline = self._port.expected_filename(test_name, extension)
409 if self._port._filesystem.exists(baseline):
410 relpath = self._port.relative_test_filename(baseline)
413 self._print_default(' %s: %s' % (extension[1:], relpath))
415 def _print_quiet(self, msg):
418 def _print_default(self, msg):
419 if not self._options.quiet:
422 def _print_debug(self, msg):
423 if self._options.debug_rwt_logging:
426 def write_throttled_update(self, msg):
427 self._meter.write_throttled_update(msg)
429 def write_update(self, msg):
430 self._meter.write_update(msg)
432 def writeln(self, msg):
433 self._meter.writeln(msg)