1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are
8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above
11 # copyright notice, this list of conditions and the following disclaimer
12 # in the documentation and/or other materials provided with the
14 # * Neither the name of Google Inc. nor the names of its
15 # contributors may be used to endorse or promote products derived from
16 # this software without specific prior written permission.
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 The Manager runs a series of tests (TestType interface) against a set
32 of test files. If a test file fails a TestType, it returns a list of TestFailure
33 objects to the Manager. The Manager then aggregates the TestFailures to
34 create a final report.
44 from webkitpy.common.net.file_uploader import FileUploader
45 from webkitpy.layout_tests.controllers.layout_test_finder import LayoutTestFinder
46 from webkitpy.layout_tests.controllers.layout_test_runner import LayoutTestRunner
47 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
48 from webkitpy.layout_tests.layout_package import json_results_generator
49 from webkitpy.layout_tests.models import test_expectations
50 from webkitpy.layout_tests.models import test_failures
51 from webkitpy.layout_tests.models import test_run_results
52 from webkitpy.layout_tests.models.test_input import TestInput
54 _log = logging.getLogger(__name__)
56 # Builder base URL where we have the archived test results.
57 BUILDER_BASE_URL = "http://build.chromium.org/buildbot/layout_test_results/"
59 TestExpectations = test_expectations.TestExpectations
63 class Manager(object):
64 """A class for managing running a series of tests on a series of layout
67 def __init__(self, port, options, printer):
68 """Initialize test runner data structures.
71 port: an object implementing port-specific
72 options: a dictionary of command line options
73 printer: a Printer object to record updates to.
76 self._filesystem = port.host.filesystem
77 self._options = options
78 self._printer = printer
79 self._expectations = None
81 self.HTTP_SUBDIR = 'http' + port.TEST_PATH_SEPARATOR
82 self.PERF_SUBDIR = 'perf'
83 self.WEBSOCKET_SUBDIR = 'websocket' + port.TEST_PATH_SEPARATOR
84 self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
85 self._http_server_started = False
86 self._websockets_server_started = False
88 self._results_directory = self._port.results_directory()
89 self._finder = LayoutTestFinder(self._port, self._options)
90 self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory, self._test_is_slow)
92 def _collect_tests(self, args):
93 return self._finder.find_tests(self._options, args)
95 def _is_http_test(self, test):
96 return self.HTTP_SUBDIR in test or self._is_websocket_test(test)
98 def _is_websocket_test(self, test):
99 return self.WEBSOCKET_SUBDIR in test
101 def _http_tests(self, test_names):
102 return set(test for test in test_names if self._is_http_test(test))
104 def _is_perf_test(self, test):
105 return self.PERF_SUBDIR == test or (self.PERF_SUBDIR + self._port.TEST_PATH_SEPARATOR) in test
107 def _prepare_lists(self, paths, test_names):
108 tests_to_skip = self._finder.skip_tests(paths, test_names, self._expectations, self._http_tests(test_names))
109 tests_to_run = [test for test in test_names if test not in tests_to_skip]
111 # Create a sorted list of test files so the subset chunk,
112 # if used, contains alphabetically consecutive tests.
113 if self._options.order == 'natural':
114 tests_to_run.sort(key=self._port.test_key)
115 elif self._options.order == 'random':
116 random.shuffle(tests_to_run)
117 elif self._options.order == 'random-seeded':
118 rnd = random.Random()
119 rnd.seed(4) # http://xkcd.com/221/
120 rnd.shuffle(tests_to_run)
122 tests_to_run, tests_in_other_chunks = self._finder.split_into_chunks(tests_to_run)
123 self._expectations.add_extra_skipped_tests(tests_in_other_chunks)
124 tests_to_skip.update(tests_in_other_chunks)
126 return tests_to_run, tests_to_skip
128 def _test_input_for_file(self, test_file):
129 return TestInput(test_file,
130 self._options.slow_time_out_ms if self._test_is_slow(test_file) else self._options.time_out_ms,
131 self._test_requires_lock(test_file),
132 should_add_missing_baselines=(self._options.new_test_results and not self._test_is_expected_missing(test_file)))
134 def _test_requires_lock(self, test_file):
135 """Return True if the test needs to be locked when
136 running multiple copies of NRWTs. Perf tests are locked
137 because heavy load caused by running other tests in parallel
138 might cause some of them to timeout."""
139 return self._is_http_test(test_file) or self._is_perf_test(test_file)
141 def _test_is_expected_missing(self, test_file):
142 expectations = self._expectations.model().get_expectations(test_file)
143 return test_expectations.MISSING in expectations or test_expectations.NEEDS_REBASELINE in expectations or test_expectations.NEEDS_MANUAL_REBASELINE in expectations
145 def _test_is_slow(self, test_file):
146 return test_expectations.SLOW in self._expectations.model().get_expectations(test_file)
148 def needs_servers(self, test_names):
149 return any(self._test_requires_lock(test_name) for test_name in test_names)
151 def _set_up_run(self, test_names):
152 self._printer.write_update("Checking build ...")
153 if self._options.build:
154 exit_code = self._port.check_build(self.needs_servers(test_names), self._printer)
156 _log.error("Build check failed")
159 # This must be started before we check the system dependencies,
160 # since the helper may do things to make the setup correct.
161 if self._options.pixel_tests:
162 self._printer.write_update("Starting pixel test helper ...")
163 self._port.start_helper()
165 # Check that the system dependencies (themes, fonts, ...) are correct.
166 if not self._options.nocheck_sys_deps:
167 self._printer.write_update("Checking system dependencies ...")
168 exit_code = self._port.check_sys_deps(self.needs_servers(test_names))
170 self._port.stop_helper()
173 if self._options.clobber_old_results:
174 self._clobber_old_results()
176 # Create the output directory if it doesn't already exist.
177 self._port.host.filesystem.maybe_make_directory(self._results_directory)
179 self._port.setup_test_run()
180 return test_run_results.OK_EXIT_STATUS
183 """Run the tests and return a RunDetails object with the results."""
184 start_time = time.time()
185 self._printer.write_update("Collecting tests ...")
187 paths, test_names = self._collect_tests(args)
189 # This is raised if --test-list doesn't exist
190 return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
192 self._printer.write_update("Parsing expectations ...")
193 self._expectations = test_expectations.TestExpectations(self._port, test_names)
195 tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)
196 self._printer.print_found(len(test_names), len(tests_to_run), self._options.repeat_each, self._options.iterations)
198 # Check to make sure we're not skipping every test.
200 _log.critical('No tests to run.')
201 return test_run_results.RunDetails(exit_code=test_run_results.NO_TESTS_EXIT_STATUS)
203 exit_code = self._set_up_run(tests_to_run)
205 return test_run_results.RunDetails(exit_code=exit_code)
207 # Don't retry failures if an explicit list of tests was passed in.
208 if self._options.retry_failures is None:
209 should_retry_failures = len(paths) < len(test_names)
211 should_retry_failures = self._options.retry_failures
213 enabled_pixel_tests_in_retry = False
215 self._start_servers(tests_to_run)
217 initial_results = self._run_tests(tests_to_run, tests_to_skip, self._options.repeat_each, self._options.iterations,
218 self._port.num_workers(int(self._options.child_processes)), retrying=False)
220 # Don't retry failures when interrupted by user or failures limit exception.
221 should_retry_failures = should_retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
223 tests_to_retry = self._tests_to_retry(initial_results)
224 if should_retry_failures and tests_to_retry:
225 enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
228 _log.info("Retrying %d unexpected failure(s) ..." % len(tests_to_retry))
230 retry_results = self._run_tests(tests_to_retry, tests_to_skip=set(), repeat_each=1, iterations=1,
231 num_workers=1, retrying=True)
233 if enabled_pixel_tests_in_retry:
234 self._options.pixel_tests = False
241 # Some crash logs can take a long time to be written out so look
242 # for new logs after the test run finishes.
243 self._printer.write_update("looking for new crash logs")
244 self._look_for_new_crash_logs(initial_results, start_time)
246 self._look_for_new_crash_logs(retry_results, start_time)
248 _log.debug("summarizing results")
249 summarized_full_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry)
250 summarized_failing_results = test_run_results.summarize_results(self._port, self._expectations, initial_results, retry_results, enabled_pixel_tests_in_retry, only_include_failing=True)
252 exit_code = summarized_failing_results['num_regressions']
253 if not self._options.dry_run:
254 self._write_json_files(summarized_full_results, summarized_failing_results, initial_results)
255 self._upload_json_files()
257 results_path = self._filesystem.join(self._results_directory, "results.html")
258 self._copy_results_html_file(results_path)
259 if initial_results.keyboard_interrupted:
260 exit_code = test_run_results.INTERRUPTED_EXIT_STATUS
262 if self._options.show_results and (exit_code or (self._options.full_results_html and initial_results.total_failures)):
263 self._port.show_results_html_file(results_path)
264 self._printer.print_results(time.time() - start_time, initial_results, summarized_failing_results)
265 return test_run_results.RunDetails(exit_code, summarized_full_results, summarized_failing_results, initial_results, retry_results, enabled_pixel_tests_in_retry)
267 def _run_tests(self, tests_to_run, tests_to_skip, repeat_each, iterations, num_workers, retrying):
270 for _ in xrange(iterations):
271 for test in tests_to_run:
272 for _ in xrange(repeat_each):
273 test_inputs.append(self._test_input_for_file(test))
274 return self._runner.run_tests(self._expectations, test_inputs, tests_to_skip, num_workers, retrying)
276 def _start_servers(self, tests_to_run):
277 if self._port.requires_http_server() or any(self._is_http_test(test) for test in tests_to_run):
278 self._printer.write_update('Starting HTTP server ...')
279 self._port.start_http_server(number_of_servers=(2 * self._options.max_locked_shards))
280 self._http_server_started = True
282 if any(self._is_websocket_test(test) for test in tests_to_run):
283 self._printer.write_update('Starting WebSocket server ...')
284 self._port.start_websocket_server()
285 self._websockets_server_started = True
287 def _stop_servers(self):
288 if self._http_server_started:
289 self._printer.write_update('Stopping HTTP server ...')
290 self._http_server_started = False
291 self._port.stop_http_server()
292 if self._websockets_server_started:
293 self._printer.write_update('Stopping WebSocket server ...')
294 self._websockets_server_started = False
295 self._port.stop_websocket_server()
297 def _clean_up_run(self):
298 _log.debug("Flushing stdout")
300 _log.debug("Flushing stderr")
302 _log.debug("Stopping helper")
303 self._port.stop_helper()
304 _log.debug("Cleaning up port")
305 self._port.clean_up_test_run()
307 def _force_pixel_tests_if_needed(self):
308 if self._options.pixel_tests:
311 _log.debug("Restarting helper")
312 self._port.stop_helper()
313 self._options.pixel_tests = True
314 self._port.start_helper()
318 def _look_for_new_crash_logs(self, run_results, start_time):
319 """Since crash logs can take a long time to be written out if the system is
320 under stress do a second pass at the end of the test run.
322 run_results: the results of the test run
323 start_time: time the tests started at. We're looking for crash
324 logs after that time.
326 crashed_processes = []
327 for test, result in run_results.unexpected_results_by_name.iteritems():
328 if (result.type != test_expectations.CRASH):
330 for failure in result.failures:
331 if not isinstance(failure, test_failures.FailureCrash):
333 crashed_processes.append([test, failure.process_name, failure.pid])
335 sample_files = self._port.look_for_new_samples(crashed_processes, start_time)
337 for test, sample_file in sample_files.iteritems():
338 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
339 writer.copy_sample_file(sample_file)
341 crash_logs = self._port.look_for_new_crash_logs(crashed_processes, start_time)
343 for test, crash_log in crash_logs.iteritems():
344 writer = TestResultWriter(self._port._filesystem, self._port, self._port.results_directory(), test)
345 writer.write_crash_log(crash_log)
347 def _clobber_old_results(self):
348 # Just clobber the actual test results directories since the other
349 # files in the results directory are explicitly used for cross-run
351 self._printer.write_update("Clobbering old results in %s" %
352 self._results_directory)
353 layout_tests_dir = self._port.layout_tests_dir()
354 possible_dirs = self._port.test_dirs()
355 for dirname in possible_dirs:
356 if self._filesystem.isdir(self._filesystem.join(layout_tests_dir, dirname)):
357 self._filesystem.rmtree(self._filesystem.join(self._results_directory, dirname))
359 # Port specific clean-up.
360 self._port.clobber_old_port_specific_results()
362 def _tests_to_retry(self, run_results):
363 return [result.test_name for result in run_results.unexpected_results_by_name.values() if result.type != test_expectations.PASS]
365 def _write_json_files(self, summarized_full_results, summarized_failing_results, initial_results):
366 _log.debug("Writing JSON files in %s." % self._results_directory)
368 # FIXME: Upload stats.json to the server and delete times_ms.
369 times_trie = json_results_generator.test_timings_trie(self._port, initial_results.results_by_name.values())
370 times_json_path = self._filesystem.join(self._results_directory, "times_ms.json")
371 json_results_generator.write_json(self._filesystem, times_trie, times_json_path)
373 stats_trie = self._stats_trie(initial_results)
374 stats_path = self._filesystem.join(self._results_directory, "stats.json")
375 self._filesystem.write_text_file(stats_path, json.dumps(stats_trie))
377 full_results_path = self._filesystem.join(self._results_directory, "full_results.json")
378 json_results_generator.write_json(self._filesystem, summarized_full_results, full_results_path)
380 full_results_path = self._filesystem.join(self._results_directory, "failing_results.json")
381 # We write failing_results.json out as jsonp because we need to load it from a file url for results.html and Chromium doesn't allow that.
382 json_results_generator.write_json(self._filesystem, summarized_failing_results, full_results_path, callback="ADD_RESULTS")
384 _log.debug("Finished writing JSON files.")
386 def _upload_json_files(self):
387 if not self._options.test_results_server:
390 if not self._options.master_name:
391 _log.error("--test-results-server was set, but --master-name was not. Not uploading JSON files.")
394 _log.debug("Uploading JSON files for builder: %s", self._options.builder_name)
395 attrs = [("builder", self._options.builder_name),
396 ("testtype", "layout-tests"),
397 ("master", self._options.master_name)]
399 files = [(file, self._filesystem.join(self._results_directory, file)) for file in ["failing_results.json", "full_results.json", "times_ms.json"]]
401 url = "http://%s/testfile/upload" % self._options.test_results_server
402 # Set uploading timeout in case appengine server is having problems.
403 # 120 seconds are more than enough to upload test results.
404 uploader = FileUploader(url, 120)
406 response = uploader.upload_as_multipart_form_data(self._filesystem, files, attrs)
408 if response.code == 200:
409 _log.debug("JSON uploaded.")
411 _log.debug("JSON upload failed, %d: '%s'" % (response.code, response.read()))
413 _log.error("JSON upload failed; no response returned")
414 except Exception, err:
415 _log.error("Upload failed: %s" % err)
417 def _copy_results_html_file(self, destination_path):
418 base_dir = self._port.path_from_webkit_base('LayoutTests', 'fast', 'harness')
419 results_file = self._filesystem.join(base_dir, 'results.html')
420 # Note that the results.html template file won't exist when we're using a MockFileSystem during unit tests,
421 # so make sure it exists before we try to copy it.
422 if self._filesystem.exists(results_file):
423 self._filesystem.copyfile(results_file, destination_path)
425 def _stats_trie(self, initial_results):
426 def _worker_number(worker_name):
427 return int(worker_name.split('/')[1]) if worker_name else -1
430 for result in initial_results.results_by_name.values():
431 if result.type != test_expectations.SKIP:
432 stats[result.test_name] = {'results': (_worker_number(result.worker_name), result.test_number, result.pid, int(result.test_run_time * 1000), int(result.total_run_time * 1000))}
434 for name, value in stats.iteritems():
435 json_results_generator.add_path_to_trie(name, value, stats_trie)