1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011 Apple Inc. All rights reserved.
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 # * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
15 # * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
51 from webkitpy.layout_tests import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models import test_run_results
54 from webkitpy.layout_tests.port import Port
55 from webkitpy.layout_tests.port import test
56 from webkitpy.test.skip import skip_if
57 from webkitpy.tool import grammar
58 from webkitpy.tool.mocktool import MockOptions
61 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
62 extra_args = extra_args or []
64 if not '--platform' in extra_args:
65 args.extend(['--platform', 'test'])
67 args.append('--no-new-test-results')
69 if not '--child-processes' in extra_args:
70 args.extend(['--child-processes', 1])
71 args.extend(extra_args)
72 if not tests_included:
73 # We use the glob to test that globbing works.
74 args.extend(['passes',
77 'failures/expected/*'])
78 return run_webkit_tests.parse_args(args)
81 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
82 options, parsed_args = parse_args(extra_args, tests_included)
84 host = host or MockHost()
85 port_obj = host.port_factory.get(port_name=options.platform, options=options)
88 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
90 logging_stream = StringIO.StringIO()
91 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
92 return run_details.exit_code == 0
95 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
96 options, parsed_args = parse_args(extra_args=extra_args,
97 tests_included=tests_included,
98 print_nothing=False, new_results=new_results)
99 host = host or MockHost()
101 port_obj = host.port_factory.get(port_name=options.platform, options=options)
103 run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
104 return (run_details, output, host.user)
107 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
109 port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
110 oc = outputcapture.OutputCapture()
113 logging_stream = StringIO.StringIO()
114 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
117 return (run_details, logging_stream)
120 def get_tests_run(args, host=None, port_obj=None):
121 results = get_test_results(args, host=host, port_obj=port_obj)
122 return [result.test_name for result in results]
125 def get_test_batches(args, host=None):
126 results = get_test_results(args, host)
130 for result in results:
131 if batch and result.pid != current_pid:
132 batches.append(batch)
134 batch.append(result.test_name)
136 batches.append(batch)
140 def get_test_results(args, host=None, port_obj=None):
141 options, parsed_args = parse_args(args, tests_included=True)
143 host = host or MockHost()
144 port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
146 oc = outputcapture.OutputCapture()
148 logging_stream = StringIO.StringIO()
150 run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
155 if run_details.initial_results:
156 all_results.extend(run_details.initial_results.all_results)
158 if run_details.retry_results:
159 all_results.extend(run_details.retry_results.all_results)
163 def parse_full_results(full_results_text):
164 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
165 compressed_results = json.loads(json_to_eval)
166 return compressed_results
169 class StreamTestingMixin(object):
170 def assertContains(self, stream, string):
171 self.assertTrue(string in stream.getvalue())
173 def assertEmpty(self, stream):
174 self.assertFalse(stream.getvalue())
176 def assertNotEmpty(self, stream):
177 self.assertTrue(stream.getvalue())
180 class RunTest(unittest.TestCase, StreamTestingMixin):
182 # A real PlatformInfo object is used here instead of a
183 # MockPlatformInfo because we need to actually check for
184 # Windows and Mac to skip some tests.
185 self._platform = SystemHost().platform
187 # FIXME: Remove this when we fix test-webkitpy to work
188 # properly on cygwin (bug 63846).
189 self.should_test_processes = not self._platform.is_win()
191 def test_basic(self):
192 options, args = parse_args(tests_included=True)
193 logging_stream = StringIO.StringIO()
195 port_obj = host.port_factory.get(options.platform, options)
196 details = run_webkit_tests.run(port_obj, options, args, logging_stream)
198 # These numbers will need to be updated whenever we add new tests.
199 self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
200 self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
201 self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
202 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
203 self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
205 expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
206 expected_summary_str = ''
207 if details.initial_results.expected_failures > 0:
208 expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
209 one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
211 expected_summary_str,
212 len(details.initial_results.unexpected_results_by_name))
213 self.assertTrue(one_line_summary in logging_stream.buflist)
215 # Ensure the results were summarized properly.
216 self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
218 # Ensure the results were written out and displayed.
219 failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
220 json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
221 self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
223 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
224 self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
226 self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
228 def test_batch_size(self):
229 batch_tests_run = get_test_batches(['--batch-size', '2'])
230 for batch in batch_tests_run:
231 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
233 def test_max_locked_shards(self):
234 # Tests for the default of using one locked shard even in the case of more than one child process.
235 if not self.should_test_processes:
237 save_env_webkit_test_max_locked_shards = None
238 if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
239 save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
240 del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
241 _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
243 self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
245 if save_env_webkit_test_max_locked_shards:
246 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
248 def test_child_processes_2(self):
249 if self.should_test_processes:
250 _, regular_output, _ = logging_run(
251 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
252 self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
254 def test_child_processes_min(self):
255 if self.should_test_processes:
256 _, regular_output, _ = logging_run(
257 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'],
258 tests_included=True, shared_port=False)
259 self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
261 def test_dryrun(self):
262 tests_run = get_tests_run(['--dry-run'])
263 self.assertEqual(tests_run, [])
265 tests_run = get_tests_run(['-n'])
266 self.assertEqual(tests_run, [])
268 def test_enable_sanitizer(self):
269 self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/text.html']))
271 def test_exception_raised(self):
272 # Exceptions raised by a worker are treated differently depending on
273 # whether they are in-process or out. inline exceptions work as normal,
274 # which allows us to get the full stack trace and traceback from the
275 # worker. The downside to this is that it could be any error, but this
276 # is actually useful in testing.
278 # Exceptions raised in a separate process are re-packaged into
279 # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
280 # be printed, but don't display properly in the unit test exception handlers.
281 self.assertRaises(BaseException, logging_run,
282 ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
284 if self.should_test_processes:
285 self.assertRaises(BaseException, logging_run,
286 ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
288 def test_device_failure(self):
289 # Test that we handle a device going offline during a test properly.
290 details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
291 self.assertEqual(details.exit_code, 0)
292 self.assertTrue('worker/0 has failed' in regular_output.getvalue())
294 def test_full_results_html(self):
296 details, _, _ = logging_run(['--full-results-html'], host=host)
297 self.assertEqual(details.exit_code, 0)
298 self.assertEqual(len(host.user.opened_urls), 1)
300 def test_keyboard_interrupt(self):
301 # Note that this also tests running a test marked as SKIP if
302 # you specify it explicitly.
303 details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
304 self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
306 if self.should_test_processes:
307 _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
308 self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
310 def test_no_tests_found(self):
311 details, err, _ = logging_run(['resources'], tests_included=True)
312 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
313 self.assertContains(err, 'No tests to run.\n')
315 def test_no_tests_found_2(self):
316 details, err, _ = logging_run(['foo'], tests_included=True)
317 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
318 self.assertContains(err, 'No tests to run.\n')
320 def test_no_tests_found_3(self):
321 details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
322 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
323 self.assertContains(err, 'No tests to run.\n')
325 def test_natural_order(self):
326 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
327 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
328 self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
330 def test_natural_order_test_specified_multiple_times(self):
331 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
332 tests_run = get_tests_run(['--order=natural'] + tests_to_run)
333 self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
335 def test_random_order(self):
336 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
337 tests_run = get_tests_run(['--order=random'] + tests_to_run)
338 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
340 def test_random_daily_seed_order(self):
341 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
342 tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
343 self.assertEqual(sorted(tests_to_run), sorted(tests_run))
345 def test_random_order_test_specified_multiple_times(self):
346 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
347 tests_run = get_tests_run(['--order=random'] + tests_to_run)
348 self.assertEqual(tests_run.count('passes/audio.html'), 2)
349 self.assertEqual(tests_run.count('passes/args.html'), 2)
351 def test_no_order(self):
352 tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
353 tests_run = get_tests_run(['--order=none'] + tests_to_run)
354 self.assertEqual(tests_to_run, tests_run)
356 def test_no_order_test_specified_multiple_times(self):
357 tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
358 tests_run = get_tests_run(['--order=none'] + tests_to_run)
359 self.assertEqual(tests_to_run, tests_run)
361 def test_no_order_with_directory_entries_in_natural_order(self):
362 tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
363 tests_run = get_tests_run(['--order=none'] + tests_to_run)
364 self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
366 def test_repeat_each(self):
367 tests_to_run = ['passes/image.html', 'passes/text.html']
368 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
369 self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
371 def test_ignore_flag(self):
372 # Note that passes/image.html is expected to be run since we specified it directly.
373 tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
374 self.assertFalse('passes/text.html' in tests_run)
375 self.assertTrue('passes/image.html' in tests_run)
377 def test_skipped_flag(self):
378 tests_run = get_tests_run(['passes'])
379 self.assertFalse('passes/skipped/skip.html' in tests_run)
380 num_tests_run_by_default = len(tests_run)
382 # Check that nothing changes when we specify skipped=default.
383 self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
384 num_tests_run_by_default)
386 # Now check that we run one more test (the skipped one).
387 tests_run = get_tests_run(['--skipped=ignore', 'passes'])
388 self.assertTrue('passes/skipped/skip.html' in tests_run)
389 self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
391 # Now check that we only run the skipped test.
392 self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
394 # Now check that we don't run anything.
395 self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
397 def test_iterations(self):
398 tests_to_run = ['passes/image.html', 'passes/text.html']
399 tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
400 self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
402 def test_repeat_each_iterations_num_tests(self):
403 # The total number of tests should be: number_of_tests *
404 # repeat_each * iterations
406 _, err, _ = logging_run(
407 ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
408 tests_included=True, host=host)
409 self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
411 def test_run_chunk(self):
412 # Test that we actually select the right chunk
413 all_tests_run = get_tests_run(['passes', 'failures'])
414 chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
415 self.assertEqual(all_tests_run[4:8], chunk_tests_run)
417 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
418 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
419 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
420 self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
422 def test_run_part(self):
423 # Test that we actually select the right part
424 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
425 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
426 self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
428 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
429 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
430 # last part repeats the first two tests).
431 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
432 self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
434 def test_run_singly(self):
435 batch_tests_run = get_test_batches(['--run-singly'])
436 for batch in batch_tests_run:
437 self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
439 def test_skip_failing_tests(self):
440 # This tests that we skip both known failing and known flaky tests. Because there are
441 # no known flaky tests in the default test_expectations, we add additional expectations.
443 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
445 batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
446 has_passes_text = False
447 for batch in batches:
448 self.assertFalse('failures/expected/text.html' in batch)
449 self.assertFalse('passes/image.html' in batch)
450 has_passes_text = has_passes_text or ('passes/text.html' in batch)
451 self.assertTrue(has_passes_text)
453 def test_single_file(self):
454 tests_run = get_tests_run(['passes/text.html'])
455 self.assertEqual(tests_run, ['passes/text.html'])
457 def test_single_file_with_prefix(self):
458 tests_run = get_tests_run(['LayoutTests/passes/text.html'])
459 self.assertEqual(['passes/text.html'], tests_run)
461 def test_single_skipped_file(self):
462 tests_run = get_tests_run(['failures/expected/keybaord.html'])
463 self.assertEqual([], tests_run)
465 def test_stderr_is_saved(self):
467 self.assertTrue(passing_run(host=host))
468 self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
469 'stuff going to stderr')
471 def test_test_list(self):
473 filename = '/tmp/foo.txt'
474 host.filesystem.write_text_file(filename, 'passes/text.html')
475 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
476 self.assertEqual(['passes/text.html'], tests_run)
477 host.filesystem.remove(filename)
478 details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
479 self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
480 self.assertNotEmpty(err)
482 def test_test_list_with_prefix(self):
484 filename = '/tmp/foo.txt'
485 host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
486 tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
487 self.assertEqual(['passes/text.html'], tests_run)
489 def test_smoke_test(self):
491 smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
492 host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
494 # Test the default smoke testing.
495 tests_run = get_tests_run(['--smoke'], host=host)
496 self.assertEqual(['passes/text.html'], tests_run)
498 # Test running the smoke tests plus some manually-specified tests.
499 tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
500 self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
502 # Test running the smoke tests plus some manually-specified tests.
503 tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
504 self.assertEqual(['passes/image.html'], tests_run)
506 # Test that we don't run just the smoke tests by default on a normal test port.
507 tests_run = get_tests_run([], host=host)
508 self.assertNotEqual(['passes/text.html'], tests_run)
510 # Create a port that does run only the smoke tests by default, and verify that works as expected.
511 port_obj = host.port_factory.get('test')
512 port_obj.default_smoke_test_only = lambda: True
513 tests_run = get_tests_run([], host=host, port_obj=port_obj)
514 self.assertEqual(['passes/text.html'], tests_run)
516 # Verify that --no-smoke continues to work on a smoke-by-default port.
517 tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
518 self.assertNotEqual(['passes/text.html'], tests_run)
520 def test_missing_and_unexpected_results(self):
521 # Test that we update expectations in place. If the expectation
522 # is missing, update the expected generic location.
524 details, err, _ = logging_run(['--no-show-results', '--retry-failures',
525 'failures/expected/missing_image.html',
526 'failures/unexpected/missing_text.html',
527 'failures/unexpected/text-image-checksum.html'],
528 tests_included=True, host=host)
529 file_list = host.filesystem.written_files.keys()
530 self.assertEqual(details.exit_code, 2)
531 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
532 self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
533 self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
534 self.assertTrue(json_string.find('"num_regressions":2') != -1)
535 self.assertTrue(json_string.find('"num_flaky":0') != -1)
537 def test_different_failure_on_retry(self):
538 # This tests that if a test fails two different ways -- both unexpected
539 # -- we treat it as a failure rather than a flaky result. We use the
540 # initial failure for simplicity and consistency w/ the flakiness
541 # dashboard, even if the second failure is worse.
543 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
544 self.assertEqual(details.exit_code, 1)
545 self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
548 # If we get a test that fails two different ways -- but the second one is expected --
549 # we should treat it as a flaky result and report the initial unexpected failure type
550 # to the dashboard. However, the test should be considered passing.
551 details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
552 self.assertEqual(details.exit_code, 0)
553 self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
556 def test_pixel_test_directories(self):
559 """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
560 args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
561 'failures/unexpected/pixeldir/image_in_pixeldir.html',
562 'failures/unexpected/image_not_in_pixeldir.html']
563 details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
565 self.assertEqual(details.exit_code, 1)
566 expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
567 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
568 self.assertTrue(json_string.find(expected_token) != -1)
570 def test_crash_with_stderr(self):
572 _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
573 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
575 def test_no_image_failure_with_image_diff(self):
577 _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
578 self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
580 def test_exit_after_n_failures_upload(self):
582 details, regular_output, user = logging_run(
583 ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
584 tests_included=True, host=host)
586 # By returning False, we know that the incremental results were generated and then deleted.
587 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
589 self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
591 # This checks that passes/text.html is considered SKIPped.
592 self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
594 # This checks that we told the user we bailed out.
595 self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
597 # This checks that neither test ran as expected.
598 # FIXME: This log message is confusing; tests that were skipped should be called out separately.
599 self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
601 def test_exit_after_n_failures(self):
602 # Unexpected failures should result in tests stopping.
603 tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
604 self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
606 # But we'll keep going for expected ones.
607 tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
608 self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
610 def test_exit_after_n_crashes(self):
611 # Unexpected crashes should result in tests stopping.
612 tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
613 self.assertEqual(['failures/unexpected/crash.html'], tests_run)
615 # Same with timeouts.
616 tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
617 self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
619 # But we'll keep going for expected ones.
620 tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
621 self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
623 def test_results_directory_absolute(self):
624 # We run a configuration that should fail, to generate output, then
625 # look for what the output results url was.
628 with host.filesystem.mkdtemp() as tmpdir:
629 _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
630 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
632 def test_results_directory_default(self):
633 # We run a configuration that should fail, to generate output, then
634 # look for what the output results url was.
636 # This is the default location.
637 _, _, user = logging_run(tests_included=True)
638 self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
640 def test_results_directory_relative(self):
641 # We run a configuration that should fail, to generate output, then
642 # look for what the output results url was.
644 host.filesystem.maybe_make_directory('/tmp/cwd')
645 host.filesystem.chdir('/tmp/cwd')
646 _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
647 self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
649 def test_retrying_default_value(self):
651 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
652 self.assertEqual(details.exit_code, 1)
653 self.assertFalse('Retrying' in err.getvalue())
656 details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
657 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7) # FIXME: This should be a constant in test.py .
658 self.assertTrue('Retrying' in err.getvalue())
660 def test_retrying_default_value_test_list(self):
662 filename = '/tmp/foo.txt'
663 host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
664 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
665 self.assertEqual(details.exit_code, 2)
666 self.assertFalse('Retrying' in err.getvalue())
669 filename = '/tmp/foo.txt'
670 host.filesystem.write_text_file(filename, 'failures')
671 details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
672 self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
673 self.assertTrue('Retrying' in err.getvalue())
675 def test_retrying_and_flaky_tests(self):
677 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
678 self.assertEqual(details.exit_code, 0)
679 self.assertTrue('Retrying' in err.getvalue())
680 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
681 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
682 self.assertEqual(len(host.user.opened_urls), 0)
684 # Now we test that --clobber-old-results does remove the old entries and the old retries,
685 # and that we don't retry again.
687 details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
688 self.assertEqual(details.exit_code, 1)
689 self.assertTrue('Clobbering old results' in err.getvalue())
690 self.assertTrue('flaky/text.html' in err.getvalue())
691 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
692 self.assertFalse(host.filesystem.exists('retries'))
693 self.assertEqual(len(host.user.opened_urls), 1)
695 def test_retrying_crashed_tests(self):
697 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
698 self.assertEqual(details.exit_code, 1)
699 self.assertTrue('Retrying' in err.getvalue())
701 def test_retrying_leak_tests(self):
703 details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/leak.html'], tests_included=True, host=host)
704 self.assertEqual(details.exit_code, 1)
705 self.assertTrue('Retrying' in err.getvalue())
707 def test_retrying_force_pixel_tests(self):
709 details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
710 self.assertEqual(details.exit_code, 1)
711 self.assertTrue('Retrying' in err.getvalue())
712 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
713 self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
714 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
715 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
716 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
717 json = parse_full_results(json_string)
718 self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
719 {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
720 self.assertFalse(json["pixel_tests_enabled"])
721 self.assertEqual(details.enabled_pixel_tests_in_retry, True)
723 def test_retrying_uses_retries_directory(self):
725 details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
726 self.assertEqual(details.exit_code, 1)
727 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
728 self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
730 def test_run_order__inline(self):
731 # These next tests test that we run the tests in ascending alphabetical
732 # order per directory. HTTP tests are sharded separately from other tests,
733 # so we have to test both.
734 tests_run = get_tests_run(['-i', 'passes/virtual_passes', 'passes'])
735 self.assertEqual(tests_run, sorted(tests_run))
737 tests_run = get_tests_run(['http/tests/passes'])
738 self.assertEqual(tests_run, sorted(tests_run))
740 def test_virtual(self):
741 self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
742 'virtual/passes/text.html', 'virtual/passes/args.html']))
744 def test_reftest_run(self):
745 tests_run = get_tests_run(['passes/reftest.html'])
746 self.assertEqual(['passes/reftest.html'], tests_run)
748 def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
749 tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
750 self.assertEqual(['passes/reftest.html'], tests_run)
752 def test_reftest_expected_html_should_be_ignored(self):
753 tests_run = get_tests_run(['passes/reftest-expected.html'])
754 self.assertEqual([], tests_run)
756 def test_reftest_driver_should_run_expected_html(self):
757 tests_run = get_test_results(['passes/reftest.html'])
758 self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
760 def test_reftest_driver_should_run_expected_mismatch_html(self):
761 tests_run = get_test_results(['passes/mismatch.html'])
762 self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
764 def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
766 _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
767 results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
769 self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
770 self.assertEqual(results["num_regressions"], 5)
771 self.assertEqual(results["num_flaky"], 0)
773 def test_reftest_crash(self):
774 test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
775 # The list of references should be empty since the test crashed and we didn't run any references.
776 self.assertEqual(test_results[0].references, [])
778 def test_reftest_with_virtual_reference(self):
779 _, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True)
780 self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue())
782 def test_additional_platform_directory(self):
783 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
784 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
785 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
786 self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
788 def test_additional_expectations(self):
790 host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
791 self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
792 tests_included=True, host=host))
795 def has_test_of_type(tests, type):
796 return [test for test in tests if type in test]
798 def test_platform_directories_ignored_when_searching_for_tests(self):
799 tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
800 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
801 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
803 def test_platform_directories_not_searched_for_additional_tests(self):
804 tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
805 self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
806 self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
808 def test_output_diffs(self):
809 # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
812 _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
813 written_files = host.filesystem.written_files
814 self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
815 self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
816 self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
818 full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
819 full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
820 self.assertEqual(full_results['has_wdiff'], False)
821 self.assertEqual(full_results['has_pretty_patch'], False)
823 def test_unsupported_platform(self):
824 stdout = StringIO.StringIO()
825 stderr = StringIO.StringIO()
826 res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
828 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
829 self.assertEqual(stdout.getvalue(), '')
830 self.assertTrue('unsupported platform' in stderr.getvalue())
832 def test_build_check(self):
833 # By using a port_name for a different platform than the one we're running on, the build check should always fail.
834 if sys.platform == 'darwin':
835 port_name = 'linux-x86'
837 port_name = 'mac-lion'
838 out = StringIO.StringIO()
839 err = StringIO.StringIO()
840 self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
842 def test_verbose_in_child_processes(self):
843 # When we actually run multiple processes, we may have to reconfigure logging in the
844 # child process (e.g., on win32) and we need to make sure that works and we still
845 # see the verbose log output. However, we can't use logging_run() because using
846 # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
848 # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
849 if not self.should_test_processes:
852 options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
854 port_obj = host.port_factory.get(port_name=options.platform, options=options)
855 logging_stream = StringIO.StringIO()
856 run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
857 self.assertTrue('text.html passed' in logging_stream.getvalue())
858 self.assertTrue('image.html passed' in logging_stream.getvalue())
860 def disabled_test_driver_logging(self):
861 # FIXME: Figure out how to either use a mock-test port to
862 # get output or mack mock ports work again.
864 _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
865 tests_included=True, host=host)
866 self.assertTrue('OUT:' in err.getvalue())
868 def test_write_full_results_to(self):
870 details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_results.json'], host=host)
871 self.assertEqual(details.exit_code, 0)
872 self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
875 class EndToEndTest(unittest.TestCase):
876 def test_reftest_with_two_notrefs(self):
877 # Test that we update expectations in place. If the expectation
878 # is missing, update the expected generic location.
880 _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
881 file_list = host.filesystem.written_files.keys()
883 json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
884 json = parse_full_results(json_string)
885 self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
886 self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
887 self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
889 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
890 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
891 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
892 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
893 self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
894 {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
897 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
898 def assertBaselines(self, file_list, file, extensions, err):
899 "assert that the file_list contains the baselines."""
900 for ext in extensions:
901 baseline = file + "-expected" + ext
902 baseline_msg = 'Writing new expected result "%s"\n' % baseline
903 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
904 self.assertContains(err, baseline_msg)
906 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
909 def test_reset_results(self):
910 # Test that we update expectations in place. If the expectation
911 # is missing, update the expected generic location.
913 details, err, _ = logging_run(
914 ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
915 tests_included=True, host=host, new_results=True)
916 file_list = host.filesystem.written_files.keys()
917 self.assertEqual(details.exit_code, 0)
918 self.assertEqual(len(file_list), 8)
919 self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
920 self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
922 def test_missing_results(self):
923 # Test that we update expectations in place. If the expectation
924 # is missing, update the expected generic location.
926 details, err, _ = logging_run(['--no-show-results',
927 'failures/unexpected/missing_text.html',
928 'failures/unexpected/missing_image.html',
929 'failures/unexpected/missing_render_tree_dump.html'],
930 tests_included=True, host=host, new_results=True)
931 file_list = host.filesystem.written_files.keys()
932 self.assertEqual(details.exit_code, 3)
933 self.assertEqual(len(file_list), 10)
934 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
935 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
936 self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
938 def test_missing_results_not_added_if_expected_missing(self):
939 # Test that we update expectations in place. If the expectation
940 # is missing, update the expected generic location.
942 options, parsed_args = run_webkit_tests.parse_args([])
944 port = test.TestPort(host, options=options)
945 host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
946 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
947 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
948 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
949 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
951 details, err, _ = logging_run(['--no-show-results',
952 'failures/unexpected/missing_text.html',
953 'failures/unexpected/missing_image.html',
954 'failures/unexpected/missing_audio.html',
955 'failures/unexpected/missing_render_tree_dump.html'],
956 tests_included=True, host=host, new_results=True, port_obj=port)
957 file_list = host.filesystem.written_files.keys()
958 self.assertEqual(details.exit_code, 0)
959 self.assertEqual(len(file_list), 7)
960 self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
961 self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
962 self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
964 def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
965 # Test that we update expectations in place. If the expectation
966 # is missing, update the expected generic location.
968 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
970 port = test.TestPort(host, options=options)
971 host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
972 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
973 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
974 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
975 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
977 details, err, _ = logging_run(['--pixel-tests', '--reset-results',
978 'failures/unexpected/missing_text.html',
979 'failures/unexpected/missing_image.html',
980 'failures/unexpected/missing_audio.html',
981 'failures/unexpected/missing_render_tree_dump.html'],
982 tests_included=True, host=host, new_results=True, port_obj=port)
983 file_list = host.filesystem.written_files.keys()
984 self.assertEqual(details.exit_code, 0)
985 self.assertEqual(len(file_list), 11)
986 self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
987 self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
988 self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
990 def test_new_baseline(self):
991 # Test that we update the platform expectations in the version-specific directories
992 # for both existing and new baselines.
994 details, err, _ = logging_run(
995 ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
996 tests_included=True, host=host, new_results=True)
997 file_list = host.filesystem.written_files.keys()
998 self.assertEqual(details.exit_code, 0)
999 self.assertEqual(len(file_list), 8)
1000 self.assertBaselines(file_list,
1001 "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
1002 self.assertBaselines(file_list,
1003 "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
1006 class PortTest(unittest.TestCase):
1007 def assert_mock_port_works(self, port_name, args=[]):
1008 self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
1010 def disabled_test_mac_lion(self):
1011 self.assert_mock_port_works('mac-lion')
1014 class MainTest(unittest.TestCase):
1015 def test_exception_handling(self):
1016 orig_run_fn = run_webkit_tests.run
1018 # unused args pylint: disable=W0613
1019 def interrupting_run(port, options, args, stderr):
1020 raise KeyboardInterrupt
1022 def successful_run(port, options, args, stderr):
1024 class FakeRunDetails(object):
1025 exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
1027 return FakeRunDetails()
1029 def exception_raising_run(port, options, args, stderr):
1032 stdout = StringIO.StringIO()
1033 stderr = StringIO.StringIO()
1035 run_webkit_tests.run = interrupting_run
1036 res = run_webkit_tests.main([], stdout, stderr)
1037 self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
1039 run_webkit_tests.run = successful_run
1040 res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1041 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1043 run_webkit_tests.run = exception_raising_run
1044 res = run_webkit_tests.main([], stdout, stderr)
1045 self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1047 run_webkit_tests.run = orig_run_fn
1049 def test_buildbot_results_are_printed_on_early_exit(self):
1050 # unused args pylint: disable=W0613
1051 stdout = StringIO.StringIO()
1052 stderr = StringIO.StringIO()
1053 res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1',
1054 'failures/unexpected/missing_text.html',
1055 'failures/unexpected/missing_image.html'],
1057 self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
1058 self.assertEqual(stdout.getvalue(),
1060 'Regressions: Unexpected missing results (1)\n'
1061 ' failures/unexpected/missing_image.html [ Missing ]\n\n'))