1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011 Apple Inc. All rights reserved.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 import webkitpy.thirdparty.unittest2 as unittest
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
51 from webkitpy.layout_tests import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models import test_run_results
54 from webkitpy.layout_tests.port import Port
55 from webkitpy.layout_tests.port import test
56 from webkitpy.test.skip import skip_if
57 from webkitpy.tool import grammar
58 from webkitpy.tool.mocktool import MockOptions
61 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
62 extra_args = extra_args or []
64 if not '--platform' in extra_args:
65 args.extend(['--platform', 'test'])
67 args.append('--no-new-test-results')
69 if not '--child-processes' in extra_args:
70 args.extend(['--child-processes', 1])
71 args.extend(extra_args)
72 if not tests_included:
73 # We use the glob to test that globbing works.
74 args.extend(['passes',
77 'failures/expected/*'])
78 return run_webkit_tests.parse_args(args)
81 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
82 options, parsed_args = parse_args(extra_args, tests_included)
84 host = host or MockHost()
85 port_obj = host.port_factory.get(port_name=options.platform, options=options)
88 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
90 logging_stream = StringIO.StringIO()
91 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
92 return run_details.exit_code == 0
95 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
96 options, parsed_args = parse_args(extra_args=extra_args,
97 tests_included=tests_included,
98 print_nothing=False, new_results=new_results)
99 host = host or MockHost()
101 port_obj = host.port_factory.get(port_name=options.platform, options=options)
103 run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
104 return (run_details, output, host.user)
107 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
109 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
110 oc = outputcapture.OutputCapture()
113 logging_stream = StringIO.StringIO()
114 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
117 return (run_details, logging_stream)
120 def get_tests_run(args, host=None, port_obj=None):
121 results = get_test_results(args, host=host, port_obj=port_obj)
122 return [result.test_name for result in results]
125 def get_test_batches(args, host=None):
126 results = get_test_results(args, host)
130 for result in results:
131 if batch and result.pid != current_pid:
132 batches.append(batch)
134 batch.append(result.test_name)
136 batches.append(batch)
140 def get_test_results(args, host=None, port_obj=None):
141 options, parsed_args = parse_args(args, tests_included=True)
143 host = host or MockHost()
144 port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
146 oc = outputcapture.OutputCapture()
148 logging_stream = StringIO.StringIO()
150 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
155 if run_details.initial_results:
156 all_results.extend(run_details.initial_results.all_results)
158 if run_details.retry_results:
159 all_results.extend(run_details.retry_results.all_results)
163 def parse_full_results(full_results_text):
164 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
165 compressed_results = json.loads(json_to_eval)
166 return compressed_results
169 class StreamTestingMixin(object):
170 def assertContains(self, stream, string):
171 self.assertTrue(string in stream.getvalue())
173 def assertEmpty(self, stream):
174 self.assertFalse(stream.getvalue())
176 def assertNotEmpty(self, stream):
177 self.assertTrue(stream.getvalue())
180 class RunTest(unittest.TestCase, StreamTestingMixin):
182 # A real PlatformInfo object is used here instead of a
183 # MockPlatformInfo because we need to actually check for
184 # Windows and Mac to skip some tests.
185 self._platform = SystemHost().platform
187 # FIXME: Remove this when we fix test-webkitpy to work
188 # properly on cygwin (bug 63846).
189 self.should_test_processes = not self._platform.is_win()
191 def test_basic(self):
192 options, args = parse_args(tests_included=True)
193 logging_stream = StringIO.StringIO()
195 port_obj = host.port_factory.get(options.platform, options)
196 details = run_webkit_tests.run(port_obj, options, args, logging_stream)
198 # These numbers will need to be updated whenever we add new tests.
199 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
200 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
201 self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
202 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
203 self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
205 expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
206 expected_summary_str = ''
207 if details.initial_results.expected_failures > 0:
208 expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
209 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
211 expected_summary_str,
212 len(details.initial_results.unexpected_results_by_name))
213 self.assertTrue(one_line_summary in logging_stream.buflist)
215 # Ensure the results were summarized properly.
216 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
218 # Ensure the results were written out and displayed.
219 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
220 json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
221 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
223 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
224 self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
226 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
229 def test_batch_size(self):
230 batch_tests_run = get_test_batches(['--batch-size', '2'])
231 for batch in batch_tests_run:
232 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
234 def test_max_locked_shards(self):
235 # Tests for the default of using one locked shard even in the case of more than one child process.
236 if not self.should_test_processes:
238 save_env_webkit_test_max_locked_shards = None
239 if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
240 save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
241 del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
242 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
244 self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
246 if save_env_webkit_test_max_locked_shards:
247 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
249 def test_child_processes_2(self):
250 if self.should_test_processes:
251 _, regular_output, _ = logging_run(
252 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
253 self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
255 def test_child_processes_min(self):
256 if self.should_test_processes:
257 _, regular_output, _ = logging_run(
258 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/passes', 'passes'],
259 tests_included=True, shared_port=False)
260 self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
262 def test_dryrun(self):
263 tests_run = get_tests_run(['--dry-run'])
264 self.assertEqual(tests_run, [])
266 tests_run = get_tests_run(['-n'])
267 self.assertEqual(tests_run, [])
269 def test_exception_raised(self):
270 # Exceptions raised by a worker are treated differently depending on
271 # whether they are in-process or out. inline exceptions work as normal,
272 # which allows us to get the full stack trace and traceback from the
273 # worker. The downside to this is that it could be any error, but this
274 # is actually useful in testing.
276 # Exceptions raised in a separate process are re-packaged into
277 # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
278 # be printed, but don't display properly in the unit test exception handlers.
279 self.assertRaises(BaseException, logging_run,
280 ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
282 if self.should_test_processes:
283 self.assertRaises(BaseException, logging_run,
284 ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
286 def test_device_failure(self):
287 # Test that we handle a device going offline during a test properly.
288 details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
289 self.assertEqual(details.exit_code, 0)
290 self.assertTrue('worker/0 has failed' in regular_output.getvalue())
292 def test_full_results_html(self):
294 details, _, _ = logging_run(['--full-results-html'], host=host)
295 self.assertEqual(details.exit_code, 0)
296 self.assertEqual(len(host.user.opened_urls), 1)
298 def test_keyboard_interrupt(self):
299 # Note that this also tests running a test marked as SKIP if
300 # you specify it explicitly.
301 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
302 self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
304 if self.should_test_processes:
305 _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
306 self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
308 def test_no_tests_found(self):
309 details, err, _ = logging_run(['resources'], tests_included=True)
310 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
311 self.assertContains(err, 'No tests to run.\n')
313 def test_no_tests_found_2(self):
314 details, err, _ = logging_run(['foo'], tests_included=True)
315 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
316 self.assertContains(err, 'No tests to run.\n')
318 def test_natural_order(self):
319 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
320 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
321 self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
323 def test_natural_order_test_specified_multiple_times(self):
324 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
325 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
326 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
328 def test_random_order(self):
329 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
330 tests_run = get_tests_run(['--order=random'] + tests_to_run)
331 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
333 def test_random_daily_seed_order(self):
334 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
335 tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
336 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
338 def test_random_order_test_specified_multiple_times(self):
339 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
340 tests_run = get_tests_run(['--order=random'] + tests_to_run)
341 self.assertEqual(tests_run.count('passes/audio.html'), 2)
342 self.assertEqual(tests_run.count('passes/args.html'), 2)
344 def test_no_order(self):
345 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
346 tests_run = get_tests_run(['--order=none'] + tests_to_run)
347 self.assertEqual(tests_to_run, tests_run)
349 def test_no_order_test_specified_multiple_times(self):
350 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
351 tests_run = get_tests_run(['--order=none'] + tests_to_run)
352 self.assertEqual(tests_to_run, tests_run)
354 def test_no_order_with_directory_entries_in_natural_order(self):
355 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
356 tests_run = get_tests_run(['--order=none'] + tests_to_run)
357 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
359 def test_repeat_each(self):
360 tests_to_run = ['passes/image.html', 'passes/text.html']
361 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
362 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
364 def test_ignore_flag(self):
365 # Note that passes/image.html is expected to be run since we specified it directly.
366 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
367 self.assertFalse('passes/text.html' in tests_run)
368 self.assertTrue('passes/image.html' in tests_run)
370 def test_skipped_flag(self):
371 tests_run = get_tests_run(['passes'])
372 self.assertFalse('passes/skipped/skip.html' in tests_run)
373 num_tests_run_by_default = len(tests_run)
375 # Check that nothing changes when we specify skipped=default.
376 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
377 num_tests_run_by_default)
379 # Now check that we run one more test (the skipped one).
380 tests_run = get_tests_run(['--skipped=ignore', 'passes'])
381 self.assertTrue('passes/skipped/skip.html' in tests_run)
382 self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
384 # Now check that we only run the skipped test.
385 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
387 # Now check that we don't run anything.
388 self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
390 def test_iterations(self):
391 tests_to_run = ['passes/image.html', 'passes/text.html']
392 tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
393 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
395 def test_repeat_each_iterations_num_tests(self):
396 # The total number of tests should be: number_of_tests *
397 # repeat_each * iterations
399 _, err, _ = logging_run(
400 ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
401 tests_included=True, host=host)
402 self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
404 def test_run_chunk(self):
405 # Test that we actually select the right chunk
406 all_tests_run = get_tests_run(['passes', 'failures'])
407 chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
408 self.assertEqual(all_tests_run[4:8], chunk_tests_run)
410 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
411 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
412 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
413 self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
415 def test_run_part(self):
416 # Test that we actually select the right part
417 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
418 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
419 self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
421 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
422 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
423 # last part repeats the first two tests).
424 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
425 self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
427 def test_run_singly(self):
428 batch_tests_run = get_test_batches(['--run-singly'])
429 for batch in batch_tests_run:
430 self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
432 def test_skip_failing_tests(self):
433 # This tests that we skip both known failing and known flaky tests. Because there are
434 # no known flaky tests in the default test_expectations, we add additional expectations.
436 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
438 batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
439 has_passes_text = False
440 for batch in batches:
441 self.assertFalse('failures/expected/text.html' in batch)
442 self.assertFalse('passes/image.html' in batch)
443 has_passes_text = has_passes_text or ('passes/text.html' in batch)
444 self.assertTrue(has_passes_text)
446 def test_single_file(self):
447 tests_run = get_tests_run(['passes/text.html'])
448 self.assertEqual(tests_run, ['passes/text.html'])
450 def test_single_file_with_prefix(self):
451 tests_run = get_tests_run(['LayoutTests/passes/text.html'])
452 self.assertEqual(['passes/text.html'], tests_run)
454 def test_single_skipped_file(self):
455 tests_run = get_tests_run(['failures/expected/keybaord.html'])
456 self.assertEqual([], tests_run)
458 def test_stderr_is_saved(self):
460 self.assertTrue(passing_run(host=host))
461 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
462 'stuff going to stderr')
464 def test_test_list(self):
466 filename = '/tmp/foo.txt'
467 host.filesystem.write_text_file(filename, 'passes/text.html')
468 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
469 self.assertEqual(['passes/text.html'], tests_run)
470 host.filesystem.remove(filename)
471 details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
472 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
473 self.assertNotEmpty(err)
475 def test_test_list_with_prefix(self):
477 filename = '/tmp/foo.txt'
478 host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
479 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
480 self.assertEqual(['passes/text.html'], tests_run)
482 def test_smoke_test(self):
484 smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
485 host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
487 # Test the default smoke testing.
488 tests_run = get_tests_run(['--smoke'], host=host)
489 self.assertEqual(['passes/text.html'], tests_run)
491 # Test running the smoke tests plus some manually-specified tests.
492 tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
493 self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
495 # Test running the smoke tests plus some manually-specified tests.
496 tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
497 self.assertEqual(['passes/image.html'], tests_run)
499 # Test that we don't run just the smoke tests by default on a normal test port.
500 tests_run = get_tests_run([], host=host)
501 self.assertNotEqual(['passes/text.html'], tests_run)
503 # Create a port that does run only the smoke tests by default, and verify that works as expected.
504 port_obj = host.port_factory.get('test')
505 port_obj.default_smoke_test_only = lambda: True
506 tests_run = get_tests_run([], host=host, port_obj=port_obj)
507 self.assertEqual(['passes/text.html'], tests_run)
509 # Verify that --no-smoke continues to work on a smoke-by-default port.
510 tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
511 self.assertNotEqual(['passes/text.html'], tests_run)
513 def test_missing_and_unexpected_results(self):
514 # Test that we update expectations in place. If the expectation
515 # is missing, update the expected generic location.
517 details, err, _ = logging_run(['--no-show-results', '--retry-failures',
518 'failures/expected/missing_image.html',
519 'failures/unexpected/missing_text.html',
520 'failures/unexpected/text-image-checksum.html'],
521 tests_included=True, host=host)
522 file_list = host.filesystem.written_files.keys()
523 self.assertEqual(details.exit_code, 2)
524 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
525 self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
526 self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
527 self.assertTrue(json_string.find('"num_regressions":2') != -1)
528 self.assertTrue(json_string.find('"num_flaky":0') != -1)
530 def test_different_failure_on_retry(self):
531 # This tests that if a test fails two different ways -- both unexpected
532 # -- we treat it as a failure rather than a flaky result. We use the
533 # initial failure for simplicity and consistency w/ the flakiness
534 # dashboard, even if the second failure is worse.
536 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
537 self.assertEqual(details.exit_code, 1)
538 self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
541 # If we get a test that fails two different ways -- but the second one is expected --
542 # we should treat it as a flaky result and report the initial unexpected failure type
543 # to the dashboard. However, the test should be considered passing.
544 details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
545 self.assertEqual(details.exit_code, 0)
546 self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
549 def test_pixel_test_directories(self):
552 """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
553 args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
554 'failures/unexpected/pixeldir/image_in_pixeldir.html',
555 'failures/unexpected/image_not_in_pixeldir.html']
556 details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
558 self.assertEqual(details.exit_code, 1)
559 expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
560 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
561 self.assertTrue(json_string.find(expected_token) != -1)
563 def test_crash_with_stderr(self):
565 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
566 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
568 def test_no_image_failure_with_image_diff(self):
570 _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
571 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
573 def test_exit_after_n_failures_upload(self):
575 details, regular_output, user = logging_run(
576 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
577 tests_included=True, host=host)
579 # By returning False, we know that the incremental results were generated and then deleted.
580 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
582 # This checks that we report only the number of tests that actually failed.
583 self.assertEqual(details.exit_code, 1)
585 # This checks that passes/text.html is considered SKIPped.
586 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
588 # This checks that we told the user we bailed out.
589 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
591 # This checks that neither test ran as expected.
592 # FIXME: This log message is confusing; tests that were skipped should be called out separately.
593 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
595 def test_exit_after_n_failures(self):
596 # Unexpected failures should result in tests stopping.
597 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
598 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
600 # But we'll keep going for expected ones.
601 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
602 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
604 def test_exit_after_n_crashes(self):
605 # Unexpected crashes should result in tests stopping.
606 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
607 self.assertEqual(['failures/unexpected/crash.html'], tests_run)
609 # Same with timeouts.
610 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
611 self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
613 # But we'll keep going for expected ones.
614 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
615 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
617 def test_results_directory_absolute(self):
618 # We run a configuration that should fail, to generate output, then
619 # look for what the output results url was.
622 with host.filesystem.mkdtemp() as tmpdir:
623 _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
624 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
626 def test_results_directory_default(self):
627 # We run a configuration that should fail, to generate output, then
628 # look for what the output results url was.
630 # This is the default location.
631 _, _, user = logging_run(tests_included=True)
632 self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
634 def test_results_directory_relative(self):
635 # We run a configuration that should fail, to generate output, then
636 # look for what the output results url was.
638 host.filesystem.maybe_make_directory('/tmp/cwd')
639 host.filesystem.chdir('/tmp/cwd')
640 _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
641 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
643 def test_retrying_default_value(self):
645 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
646 self.assertEqual(details.exit_code, 1)
647 self.assertFalse('Retrying' in err.getvalue())
650 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
651 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIXME: This should be a constant in test.py .
652 self.assertTrue('Retrying' in err.getvalue())
654 def test_retrying_default_value_test_list(self):
656 filename = '/tmp/foo.txt'
657 host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
658 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
659 self.assertEqual(details.exit_code, 2)
660 self.assertFalse('Retrying' in err.getvalue())
663 filename = '/tmp/foo.txt'
664 host.filesystem.write_text_file(filename, 'failures')
665 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
666 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
667 self.assertTrue('Retrying' in err.getvalue())
669 def test_retrying_and_flaky_tests(self):
671 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
672 self.assertEqual(details.exit_code, 0)
673 self.assertTrue('Retrying' in err.getvalue())
674 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
675 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
676 self.assertEqual(len(host.user.opened_urls), 0)
678 # Now we test that --clobber-old-results does remove the old entries and the old retries,
679 # and that we don't retry again.
681 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
682 self.assertEqual(details.exit_code, 1)
683 self.assertTrue('Clobbering old results' in err.getvalue())
684 self.assertTrue('flaky/text.html' in err.getvalue())
685 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
686 self.assertFalse(host.filesystem.exists('retries'))
687 self.assertEqual(len(host.user.opened_urls), 1)
689 def test_retrying_crashed_tests(self):
691 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
692 self.assertEqual(details.exit_code, 1)
693 self.assertTrue('Retrying' in err.getvalue())
695 def test_retrying_force_pixel_tests(self):
697 details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
698 self.assertEqual(details.exit_code, 1)
699 self.assertTrue('Retrying' in err.getvalue())
700 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
701 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
702 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
703 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
704 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
705 json = parse_full_results(json_string)
706 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
707 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
708 self.assertFalse(json["pixel_tests_enabled"])
709 self.assertEqual(details.enabled_pixel_tests_in_retry, True)
711 def test_retrying_uses_retries_directory(self):
713 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
714 self.assertEqual(details.exit_code, 1)
715 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
716 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
718 def test_run_order__inline(self):
719 # These next tests test that we run the tests in ascending alphabetical
720 # order per directory. HTTP tests are sharded separately from other tests,
721 # so we have to test both.
722 tests_run = get_tests_run(['-i', 'passes/passes', 'passes'])
723 self.assertEqual(tests_run, sorted(tests_run))
725 tests_run = get_tests_run(['http/tests/passes'])
726 self.assertEqual(tests_run, sorted(tests_run))
728 def test_virtual(self):
729 self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
730 'virtual/passes/text.html', 'virtual/passes/args.html']))
732 def test_reftest_run(self):
733 tests_run = get_tests_run(['passes/reftest.html'])
734 self.assertEqual(['passes/reftest.html'], tests_run)
736 def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
737 tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
738 self.assertEqual(['passes/reftest.html'], tests_run)
740 def test_reftest_expected_html_should_be_ignored(self):
741 tests_run = get_tests_run(['passes/reftest-expected.html'])
742 self.assertEqual([], tests_run)
744 def test_reftest_driver_should_run_expected_html(self):
745 tests_run = get_test_results(['passes/reftest.html'])
746 self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
748 def test_reftest_driver_should_run_expected_mismatch_html(self):
749 tests_run = get_test_results(['passes/mismatch.html'])
750 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
752 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
754 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
755 results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
757 self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
758 self.assertEqual(results["num_regressions"], 5)
759 self.assertEqual(results["num_flaky"], 0)
761 def test_reftest_crash(self):
762 test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
763 # The list of references should be empty since the test crashed and we didn't run any references.
764 self.assertEqual(test_results[0].references, [])
766 def test_reftest_with_virtual_reference(self):
767 _, err, _ = logging_run(['--details', 'virtual/passes/reftest.html'], tests_included=True)
768 self.assertTrue('ref: virtual/passes/reftest-expected.html' in err.getvalue())
770 def test_additional_platform_directory(self):
771 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
772 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
773 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
774 self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
776 def test_additional_expectations(self):
778 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
779 self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
780 tests_included=True, host=host))
783 def has_test_of_type(tests, type):
784 return [test for test in tests if type in test]
786 def test_platform_directories_ignored_when_searching_for_tests(self):
787 tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
788 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
789 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
791 def test_platform_directories_not_searched_for_additional_tests(self):
792 tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
793 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
794 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
796 def test_output_diffs(self):
797 # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
800 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
801 written_files = host.filesystem.written_files
802 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
803 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
804 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
806 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
807 full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
808 self.assertEqual(full_results['has_wdiff'], False)
809 self.assertEqual(full_results['has_pretty_patch'], False)
811 def test_unsupported_platform(self):
812 stdout = StringIO.StringIO()
813 stderr = StringIO.StringIO()
814 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
816 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
817 self.assertEqual(stdout.getvalue(), '')
818 self.assertTrue('unsupported platform' in stderr.getvalue())
820 def test_build_check(self):
821 # By using a port_name for a different platform than the one we're running on, the build check should always fail.
822 if sys.platform == 'darwin':
823 port_name = 'linux-x86'
825 port_name = 'mac-lion'
826 out = StringIO.StringIO()
827 err = StringIO.StringIO()
828 self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
830 def test_verbose_in_child_processes(self):
831 # When we actually run multiple processes, we may have to reconfigure logging in the
832 # child process (e.g., on win32) and we need to make sure that works and we still
833 # see the verbose log output. However, we can't use logging_run() because using
834 # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
836 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
837 if not self.should_test_processes:
840 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
842 port_obj = host.port_factory.get(port_name=options.platform, options=options)
843 logging_stream = StringIO.StringIO()
844 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
845 self.assertTrue('text.html passed' in logging_stream.getvalue())
846 self.assertTrue('image.html passed' in logging_stream.getvalue())
848 def disabled_test_driver_logging(self):
849 # FIXME: Figure out how to either use a mock-test port to
850 # get output or mack mock ports work again.
852 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
853 tests_included=True, host=host)
854 self.assertTrue('OUT:' in err.getvalue())
856 class EndToEndTest(unittest.TestCase):
857 def test_reftest_with_two_notrefs(self):
858 # Test that we update expectations in place. If the expectation
859 # is missing, update the expected generic location.
861 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
862 file_list = host.filesystem.written_files.keys()
864 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
865 json = parse_full_results(json_string)
866 self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
867 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
868 self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
870 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
871 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
872 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
873 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
874 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
875 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
878 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
879 def assertBaselines(self, file_list, file, extensions, err):
880 "assert that the file_list contains the baselines."""
881 for ext in extensions:
882 baseline = file + "-expected" + ext
883 baseline_msg = 'Writing new expected result "%s"\n' % baseline
884 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
885 self.assertContains(err, baseline_msg)
887 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
890 def test_reset_results(self):
891 # Test that we update expectations in place. If the expectation
892 # is missing, update the expected generic location.
894 details, err, _ = logging_run(
895 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
896 tests_included=True, host=host, new_results=True)
897 file_list = host.filesystem.written_files.keys()
898 self.assertEqual(details.exit_code, 0)
899 self.assertEqual(len(file_list), 8)
900 self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
901 self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
903 def test_missing_results(self):
904 # Test that we update expectations in place. If the expectation
905 # is missing, update the expected generic location.
907 details, err, _ = logging_run(['--no-show-results',
908 'failures/unexpected/missing_text.html',
909 'failures/unexpected/missing_image.html',
910 'failures/unexpected/missing_render_tree_dump.html'],
911 tests_included=True, host=host, new_results=True)
912 file_list = host.filesystem.written_files.keys()
913 self.assertEqual(details.exit_code, 3)
914 self.assertEqual(len(file_list), 10)
915 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
916 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
917 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
919 def test_missing_results_not_added_if_expected_missing(self):
920 # Test that we update expectations in place. If the expectation
921 # is missing, update the expected generic location.
923 options, parsed_args = run_webkit_tests.parse_args([])
925 port = test.TestPort(host, options=options)
926 host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
927 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
928 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
929 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
930 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
932 details, err, _ = logging_run(['--no-show-results',
933 'failures/unexpected/missing_text.html',
934 'failures/unexpected/missing_image.html',
935 'failures/unexpected/missing_audio.html',
936 'failures/unexpected/missing_render_tree_dump.html'],
937 tests_included=True, host=host, new_results=True, port_obj=port)
938 file_list = host.filesystem.written_files.keys()
939 self.assertEqual(details.exit_code, 0)
940 self.assertEqual(len(file_list), 7)
941 self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
942 self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
943 self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
945 def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
946 # Test that we update expectations in place. If the expectation
947 # is missing, update the expected generic location.
949 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
951 port = test.TestPort(host, options=options)
952 host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
953 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
954 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
955 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
956 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
958 details, err, _ = logging_run(['--pixel-tests', '--reset-results',
959 'failures/unexpected/missing_text.html',
960 'failures/unexpected/missing_image.html',
961 'failures/unexpected/missing_audio.html',
962 'failures/unexpected/missing_render_tree_dump.html'],
963 tests_included=True, host=host, new_results=True, port_obj=port)
964 file_list = host.filesystem.written_files.keys()
965 self.assertEqual(details.exit_code, 0)
966 self.assertEqual(len(file_list), 11)
967 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
968 self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
969 self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
971 def test_new_baseline(self):
972 # Test that we update the platform expectations in the version-specific directories
973 # for both existing and new baselines.
975 details, err, _ = logging_run(
976 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
977 tests_included=True, host=host, new_results=True)
978 file_list = host.filesystem.written_files.keys()
979 self.assertEqual(details.exit_code, 0)
980 self.assertEqual(len(file_list), 8)
981 self.assertBaselines(file_list,
982 "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
983 self.assertBaselines(file_list,
984 "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
987 class PortTest(unittest.TestCase):
988 def assert_mock_port_works(self, port_name, args=[]):
989 self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
991 def disabled_test_mac_lion(self):
992 self.assert_mock_port_works('mac-lion')
995 class MainTest(unittest.TestCase):
996 def test_exception_handling(self):
997 orig_run_fn = run_webkit_tests.run
999 # unused args pylint: disable=W0613
1000 def interrupting_run(port, options, args, stderr):
1001 raise KeyboardInterrupt
1003 def successful_run(port, options, args, stderr):
1005 class FakeRunDetails(object):
1006 exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
1008 return FakeRunDetails()
1010 def exception_raising_run(port, options, args, stderr):
1013 stdout = StringIO.StringIO()
1014 stderr = StringIO.StringIO()
1016 run_webkit_tests.run = interrupting_run
1017 res = run_webkit_tests.main([], stdout, stderr)
1018 self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
1020 run_webkit_tests.run = successful_run
1021 res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1022 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1024 run_webkit_tests.run = exception_raising_run
1025 res = run_webkit_tests.main([], stdout, stderr)
1026 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1028 run_webkit_tests.run = orig_run_fn