2 # Copyright (C) 2010 Google Inc. All rights reserved.
3 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
4 # Copyright (C) 2011 Apple Inc. All rights reserved.
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are
10 # * Redistributions of source code must retain the above copyright
11 # notice, this list of conditions and the following disclaimer.
12 # * Redistributions in binary form must reproduce the above
13 # copyright notice, this list of conditions and the following disclaimer
14 # in the documentation and/or other materials provided with the
16 # * Neither the name of Google Inc. nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 """Unit tests for run_webkit_tests."""
34 from __future__ import with_statement
48 import multiprocessing
50 multiprocessing = None
55 # python 2.5 compatibility
56 import webkitpy.thirdparty.simplejson as json
58 # FIXME: remove this when we fix test-webkitpy to work properly on cygwin
60 SHOULD_TEST_PROCESSES = multiprocessing and sys.platform not in ('cygwin', 'win32')
62 from webkitpy.common import array_stream
63 from webkitpy.common.system import outputcapture
64 from webkitpy.common.host_mock import MockHost
66 from webkitpy.layout_tests import port
67 from webkitpy.layout_tests import run_webkit_tests
68 from webkitpy.layout_tests.port.test import TestPort, TestDriver, unit_test_filesystem
69 from webkitpy.layout_tests.port.test_files import is_reference_html_file
70 from webkitpy.python24.versioning import compare_version
71 from webkitpy.test.skip import skip_if
74 def parse_args(extra_args=None, record_results=False, tests_included=False, new_results=False, print_nothing=True):
75 extra_args = extra_args or []
77 args = ['--print', 'nothing']
80 if not '--platform' in extra_args:
81 args.extend(['--platform', 'test'])
82 if not record_results:
83 args.append('--no-record-results')
85 args.append('--no-new-test-results')
87 if not '--child-processes' in extra_args and not '--worker-model' in extra_args:
88 args.extend(['--worker-model', 'inline'])
89 args.extend(extra_args)
90 if not tests_included:
91 # We use the glob to test that globbing works.
92 args.extend(['passes',
95 'failures/expected/*'])
96 return run_webkit_tests.parse_args(args)
99 def passing_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None):
100 options, parsed_args = parse_args(extra_args, record_results, tests_included)
101 filesystem = filesystem or unit_test_filesystem()
104 port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem)
105 buildbot_output = array_stream.ArrayStream()
106 regular_output = array_stream.ArrayStream()
107 res = run_webkit_tests.run(port_obj, options, parsed_args, buildbot_output=buildbot_output, regular_output=regular_output)
108 return res == 0 and regular_output.empty() and buildbot_output.empty()
111 def logging_run(extra_args=None, port_obj=None, record_results=False, tests_included=False, filesystem=None, new_results=False):
112 options, parsed_args = parse_args(extra_args=extra_args,
113 record_results=record_results,
114 tests_included=tests_included,
115 print_nothing=False, new_results=new_results)
117 filesystem = filesystem or unit_test_filesystem()
119 port_obj = host.port_factory.get(port_name=options.platform, options=options, filesystem=filesystem)
121 res, buildbot_output, regular_output = run_and_capture(port_obj, options, parsed_args)
122 return (res, buildbot_output, regular_output, host.user)
125 def run_and_capture(port_obj, options, parsed_args):
126 oc = outputcapture.OutputCapture()
129 buildbot_output = array_stream.ArrayStream()
130 regular_output = array_stream.ArrayStream()
131 res = run_webkit_tests.run(port_obj, options, parsed_args,
132 buildbot_output=buildbot_output,
133 regular_output=regular_output)
136 return (res, buildbot_output, regular_output)
139 def get_tests_run(extra_args=None, tests_included=False, flatten_batches=False,
140 filesystem=None, include_reference_html=False):
141 extra_args = extra_args or []
142 if not tests_included:
143 # Not including http tests since they get run out of order (that
144 # behavior has its own test, see test_get_test_file_queue)
145 extra_args = ['passes', 'failures'] + extra_args
146 options, parsed_args = parse_args(extra_args, tests_included=True)
153 class RecordingTestDriver(TestDriver):
154 def __init__(self, port, worker_number):
155 TestDriver.__init__(self, port, worker_number, pixel_tests=port.get_option('pixel_test'))
156 self._current_test_batch = None
159 self._current_test_batch = None
161 def run_test(self, test_input):
162 if self._current_test_batch is None:
163 self._current_test_batch = []
164 test_batches.append(self._current_test_batch)
165 test_name = test_input.test_name
166 # In case of reftest, one test calls the driver's run_test() twice.
167 # We should not add a reference html used by reftests to tests unless include_reference_html parameter
168 # is explicitly given.
169 if include_reference_html or not is_reference_html_file(test_input.test_name):
170 self._current_test_batch.append(test_name)
171 return TestDriver.run_test(self, test_input)
173 class RecordingTestPort(TestPort):
174 def create_driver(self, worker_number):
175 return RecordingTestDriver(self, worker_number)
177 recording_port = RecordingTestPort(options=options, host=host, filesystem=filesystem)
178 run_and_capture(recording_port, options, parsed_args)
181 return list(itertools.chain(*test_batches))
186 class MainTest(unittest.TestCase):
187 def test_accelerated_compositing(self):
188 # This just tests that we recognize the command line args
189 self.assertTrue(passing_run(['--accelerated-video']))
190 self.assertTrue(passing_run(['--no-accelerated-video']))
192 def test_accelerated_2d_canvas(self):
193 # This just tests that we recognize the command line args
194 self.assertTrue(passing_run(['--accelerated-2d-canvas']))
195 self.assertTrue(passing_run(['--no-accelerated-2d-canvas']))
197 def test_basic(self):
198 self.assertTrue(passing_run())
200 def test_batch_size(self):
201 batch_tests_run = get_tests_run(['--batch-size', '2'])
202 for batch in batch_tests_run:
203 self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
205 def test_child_process_1(self):
206 if SHOULD_TEST_PROCESSES:
207 _, _, regular_output, _ = logging_run(
208 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '1'])
209 self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
211 def test_child_processes_2(self):
212 # This test seems to fail on win32.
213 if sys.platform == 'win32':
215 if SHOULD_TEST_PROCESSES:
216 _, _, regular_output, _ = logging_run(
217 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2'])
218 self.assertTrue(any(['Running 2 ' in line for line in regular_output.get()]))
220 def test_child_processes_min(self):
221 if SHOULD_TEST_PROCESSES:
222 _, _, regular_output, _ = logging_run(
223 ['--print', 'config', '--worker-model', 'processes', '--child-processes', '2', 'passes'],
225 self.assertTrue(any(['Running 1 ' in line for line in regular_output.get()]))
227 def test_dryrun(self):
228 batch_tests_run = get_tests_run(['--dry-run'])
229 self.assertEqual(batch_tests_run, [])
231 batch_tests_run = get_tests_run(['-n'])
232 self.assertEqual(batch_tests_run, [])
234 def test_exception_raised(self):
235 # Exceptions raised by a worker are treated differently depending on
236 # whether they are in-process or out. inline exceptions work as normal,
237 # which allows us to get the full stack trace and traceback from the
238 # worker. The downside to this is that it could be any error, but this
239 # is actually useful in testing, which is what --worker-model=inline is
242 # Exceptions raised in a separate process are re-packaged into
243 # WorkerExceptions, which have a string capture of the stack which can
244 # be printed, but don't display properly in the unit test exception handlers.
245 self.assertRaises(ValueError, logging_run,
246 ['failures/expected/exception.html'], tests_included=True)
248 if SHOULD_TEST_PROCESSES:
249 self.assertRaises(run_webkit_tests.WorkerException, logging_run,
250 ['--worker-model', 'processes', 'failures/expected/exception.html'], tests_included=True)
252 def test_full_results_html(self):
253 # FIXME: verify html?
254 res, out, err, user = logging_run(['--full-results-html'])
255 self.assertEqual(res, 0)
257 def test_help_printing(self):
258 res, out, err, user = logging_run(['--help-printing'])
259 self.assertEqual(res, 0)
260 self.assertTrue(out.empty())
261 self.assertFalse(err.empty())
263 def test_hung_thread(self):
264 res, out, err, user = logging_run(['--run-singly', '--time-out-ms=50',
265 'failures/expected/hang.html'],
267 self.assertEqual(res, 0)
268 self.assertFalse(out.empty())
269 self.assertFalse(err.empty())
271 def test_keyboard_interrupt(self):
272 # Note that this also tests running a test marked as SKIP if
273 # you specify it explicitly.
274 self.assertRaises(KeyboardInterrupt, logging_run,
275 ['failures/expected/keyboard.html'], tests_included=True)
277 def test_keyboard_interrupt_inline_worker_model(self):
278 self.assertRaises(KeyboardInterrupt, logging_run,
279 ['failures/expected/keyboard.html', '--worker-model', 'inline'],
282 def test_lint_test_files(self):
283 res, out, err, user = logging_run(['--lint-test-files'])
284 self.assertEqual(res, 0)
285 self.assertTrue(out.empty())
286 self.assertTrue(any(['Lint succeeded' in msg for msg in err.get()]))
288 def test_lint_test_files__errors(self):
289 options, parsed_args = parse_args(['--lint-test-files'])
291 port_obj = host.port_factory.get(options.platform, options=options)
292 port_obj.test_expectations = lambda: "# syntax error"
293 res, out, err = run_and_capture(port_obj, options, parsed_args)
295 self.assertEqual(res, -1)
296 self.assertTrue(out.empty())
297 self.assertTrue(any(['Lint failed' in msg for msg in err.get()]))
299 def test_no_tests_found(self):
300 res, out, err, user = logging_run(['resources'], tests_included=True)
301 self.assertEqual(res, -1)
302 self.assertTrue(out.empty())
303 self.assertTrue('No tests to run.\n' in err.get())
305 def test_no_tests_found_2(self):
306 res, out, err, user = logging_run(['foo'], tests_included=True)
307 self.assertEqual(res, -1)
308 self.assertTrue(out.empty())
309 self.assertTrue('No tests to run.\n' in err.get())
311 def test_randomize_order(self):
312 # FIXME: verify order was shuffled
313 self.assertTrue(passing_run(['--randomize-order']))
315 def test_gc_between_tests(self):
316 self.assertTrue(passing_run(['--gc-between-tests']))
318 def test_complex_text(self):
319 self.assertTrue(passing_run(['--complex-text']))
321 def test_threaded(self):
322 self.assertTrue(passing_run(['--threaded']))
324 def test_repeat_each(self):
325 tests_to_run = ['passes/image.html', 'passes/text.html']
326 tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
327 self.assertEquals(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
329 def test_iterations(self):
330 tests_to_run = ['passes/image.html', 'passes/text.html']
331 tests_run = get_tests_run(['--iterations', '2'] + tests_to_run, tests_included=True, flatten_batches=True)
332 self.assertEquals(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
334 def test_run_chunk(self):
335 # Test that we actually select the right chunk
336 all_tests_run = get_tests_run(flatten_batches=True)
337 chunk_tests_run = get_tests_run(['--run-chunk', '1:4'], flatten_batches=True)
338 self.assertEquals(all_tests_run[4:8], chunk_tests_run)
340 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
341 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
342 chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run, tests_included=True, flatten_batches=True)
343 self.assertEquals(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
345 def test_run_force(self):
346 # This raises an exception because we run
347 # failures/expected/exception.html, which is normally SKIPped.
349 # See also the comments in test_exception_raised() about ValueError vs. WorkerException.
350 self.assertRaises(ValueError, logging_run, ['--force'])
352 def test_run_part(self):
353 # Test that we actually select the right part
354 tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
355 tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run, tests_included=True, flatten_batches=True)
356 self.assertEquals(['passes/error.html', 'passes/image.html'], tests_run)
358 # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
359 # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
360 # last part repeats the first two tests).
361 chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run, tests_included=True, flatten_batches=True)
362 self.assertEquals(['passes/error.html', 'passes/image.html'], chunk_tests_run)
364 def test_run_singly(self):
365 batch_tests_run = get_tests_run(['--run-singly'])
366 for batch in batch_tests_run:
367 self.assertEquals(len(batch), 1, '%s had too many tests' % ', '.join(batch))
369 def test_skip_failing_tests(self):
370 batches = get_tests_run(['--skip-failing-tests'])
371 has_passes_text = False
372 for batch in batches:
373 self.assertFalse('failures/expected/text.html' in batch)
374 has_passes_text = has_passes_text or ('passes/text.html' in batch)
375 self.assertTrue(has_passes_text)
377 def test_run_singly_actually_runs_tests(self):
378 res, _, _, _ = logging_run(['--run-singly', 'failures/unexpected'])
379 self.assertEquals(res, 7)
381 def test_single_file(self):
382 # FIXME: We should consider replacing more of the get_tests_run()-style tests
383 # with tests that read the tests_run* files, like this one.
384 fs = unit_test_filesystem()
385 tests_run = passing_run(['passes/text.html'], tests_included=True, filesystem=fs)
386 self.assertEquals(fs.read_text_file('/tmp/layout-test-results/tests_run0.txt'),
387 'passes/text.html\n')
389 def test_single_file_with_prefix(self):
390 tests_run = get_tests_run(['LayoutTests/passes/text.html'], tests_included=True, flatten_batches=True)
391 self.assertEquals(['passes/text.html'], tests_run)
393 def test_single_skipped_file(self):
394 tests_run = get_tests_run(['failures/expected/keybaord.html'], tests_included=True, flatten_batches=True)
395 self.assertEquals([], tests_run)
397 def test_stderr_is_saved(self):
398 fs = unit_test_filesystem()
399 self.assertTrue(passing_run(filesystem=fs))
400 self.assertEquals(fs.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
401 'stuff going to stderr')
403 def test_test_list(self):
404 fs = unit_test_filesystem()
405 filename = '/tmp/foo.txt'
406 fs.write_text_file(filename, 'passes/text.html')
407 tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
408 self.assertEquals(['passes/text.html'], tests_run)
410 res, out, err, user = logging_run(['--test-list=%s' % filename],
411 tests_included=True, filesystem=fs)
412 self.assertEqual(res, -1)
413 self.assertFalse(err.empty())
415 def test_test_list_with_prefix(self):
416 fs = unit_test_filesystem()
417 filename = '/tmp/foo.txt'
418 fs.write_text_file(filename, 'LayoutTests/passes/text.html')
419 tests_run = get_tests_run(['--test-list=%s' % filename], tests_included=True, flatten_batches=True, filesystem=fs)
420 self.assertEquals(['passes/text.html'], tests_run)
422 def test_unexpected_failures(self):
423 # Run tests including the unexpected failures.
424 self._url_opened = None
425 res, out, err, user = logging_run(tests_included=True)
427 # Update this magic number if you add an unexpected test to webkitpy.layout_tests.port.test
428 # FIXME: It's nice to have a routine in port/test.py that returns this number.
429 unexpected_tests_count = 7
431 self.assertEqual(res, unexpected_tests_count)
432 self.assertFalse(out.empty())
433 self.assertFalse(err.empty())
434 self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
436 def test_missing_and_unexpected_results(self):
437 # Test that we update expectations in place. If the expectation
438 # is missing, update the expected generic location.
439 fs = unit_test_filesystem()
440 res, out, err, _ = logging_run(['--no-show-results',
441 'failures/expected/missing_image.html',
442 'failures/unexpected/missing_text.html',
443 'failures/unexpected/text-image-checksum.html'],
444 tests_included=True, filesystem=fs, record_results=True)
445 file_list = fs.written_files.keys()
446 file_list.remove('/tmp/layout-test-results/tests_run0.txt')
447 self.assertEquals(res, 1)
448 expected_token = '"unexpected":{"text-image-checksum.html":{"expected":"PASS","actual":"TEXT"},"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING"}'
449 json_string = fs.read_text_file('/tmp/layout-test-results/full_results.json')
450 self.assertTrue(json_string.find(expected_token) != -1)
451 self.assertTrue(json_string.find('"num_regression":1') == -1)
452 self.assertTrue(json_string.find('"num_flaky":1') == -1)
453 self.assertTrue(json_string.find('"num_missing":1') != -1)
455 def test_missing_and_unexpected_results_with_custom_exit_code(self):
456 # Test that we update expectations in place. If the expectation
457 # is missing, update the expected generic location.
458 fs = unit_test_filesystem()
460 class CustomExitCodePort(TestPort):
461 def exit_code_from_summarized_results(self, unexpected_results):
462 return unexpected_results['num_regressions'] + unexpected_results['num_missing']
464 options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--no-new-test-results'])
465 test_port = CustomExitCodePort(options=options)
466 res, out, err, _ = logging_run(['--no-show-results',
467 'failures/expected/missing_image.html',
468 'failures/unexpected/missing_text.html',
469 'failures/unexpected/text-image-checksum.html'],
470 tests_included=True, filesystem=fs, record_results=True, port_obj=test_port)
471 self.assertEquals(res, 2)
473 def test_crash_with_stderr(self):
474 fs = unit_test_filesystem()
475 res, buildbot_output, regular_output, user = logging_run([
476 'failures/unexpected/crash-with-stderr.html',
481 self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true}}') != -1)
483 def test_no_image_failure_with_image_diff(self):
484 fs = unit_test_filesystem()
485 res, buildbot_output, regular_output, user = logging_run([
486 'failures/unexpected/checksum-with-matching-image.html',
491 self.assertTrue(fs.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
493 def test_crash_log(self):
494 mock_crash_report = 'mock-crash-report'
495 fs = unit_test_filesystem()
496 fs.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash', mock_crash_report)
497 res, buildbot_output, regular_output, user = logging_run([
498 'failures/unexpected/crash-with-stderr.html',
503 expected_crash_log = mock_crash_report
504 # Currently CrashLog uploading only works on Darwin.
505 if sys.platform != "darwin":
506 expected_crash_log = "mock-std-error-output"
507 self.assertEquals(fs.read_text_file('/tmp/layout-test-results/failures/unexpected/crash-with-stderr-crash-log.txt'), expected_crash_log)
509 def test_web_process_crash_log(self):
510 mock_crash_report = 'mock-crash-report'
511 fs = unit_test_filesystem()
512 fs.write_text_file('/Users/mock/Library/Logs/DiagnosticReports/WebProcess_2011-06-13-150719_quadzen.crash', mock_crash_report)
513 res, buildbot_output, regular_output, user = logging_run([
514 'failures/unexpected/web-process-crash-with-stderr.html',
519 expected_crash_log = mock_crash_report
520 # Currently CrashLog uploading only works on Darwin.
521 if sys.platform != "darwin":
522 expected_crash_log = "mock-std-error-output"
523 self.assertEquals(fs.read_text_file('/tmp/layout-test-results/failures/unexpected/web-process-crash-with-stderr-crash-log.txt'), expected_crash_log)
525 def test_exit_after_n_failures_upload(self):
526 fs = unit_test_filesystem()
527 res, buildbot_output, regular_output, user = logging_run([
528 'failures/unexpected/text-image-checksum.html',
530 '--exit-after-n-failures', '1',
535 self.assertTrue('/tmp/layout-test-results/incremental_results.json' in fs.files)
537 def test_exit_after_n_failures(self):
538 # Unexpected failures should result in tests stopping.
539 tests_run = get_tests_run([
540 'failures/unexpected/text-image-checksum.html',
542 '--exit-after-n-failures', '1',
545 flatten_batches=True)
546 self.assertEquals(['failures/unexpected/text-image-checksum.html'], tests_run)
548 # But we'll keep going for expected ones.
549 tests_run = get_tests_run([
550 'failures/expected/text.html',
552 '--exit-after-n-failures', '1',
555 flatten_batches=True)
556 self.assertEquals(['failures/expected/text.html', 'passes/text.html'], tests_run)
558 def test_exit_after_n_crashes(self):
559 # Unexpected crashes should result in tests stopping.
560 tests_run = get_tests_run([
561 'failures/unexpected/crash.html',
563 '--exit-after-n-crashes-or-timeouts', '1',
566 flatten_batches=True)
567 self.assertEquals(['failures/unexpected/crash.html'], tests_run)
569 # Same with timeouts.
570 tests_run = get_tests_run([
571 'failures/unexpected/timeout.html',
573 '--exit-after-n-crashes-or-timeouts', '1',
576 flatten_batches=True)
577 self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
579 # But we'll keep going for expected ones.
580 tests_run = get_tests_run([
581 'failures/expected/crash.html',
583 '--exit-after-n-crashes-or-timeouts', '1',
586 flatten_batches=True)
587 self.assertEquals(['failures/expected/crash.html', 'passes/text.html'], tests_run)
589 def test_exit_after_n_crashes_inline_worker_model(self):
590 tests_run = get_tests_run([
591 'failures/unexpected/timeout.html',
593 '--exit-after-n-crashes-or-timeouts', '1',
594 '--worker-model', 'inline',
597 flatten_batches=True)
598 self.assertEquals(['failures/unexpected/timeout.html'], tests_run)
600 def test_results_directory_absolute(self):
601 # We run a configuration that should fail, to generate output, then
602 # look for what the output results url was.
604 fs = unit_test_filesystem()
605 with fs.mkdtemp() as tmpdir:
606 res, out, err, user = logging_run(['--results-directory=' + str(tmpdir)],
607 tests_included=True, filesystem=fs)
608 self.assertEqual(user.opened_urls, [fs.join(tmpdir, 'results.html')])
610 def test_results_directory_default(self):
611 # We run a configuration that should fail, to generate output, then
612 # look for what the output results url was.
614 # This is the default location.
615 res, out, err, user = logging_run(tests_included=True)
616 self.assertEqual(user.opened_urls, ['/tmp/layout-test-results/results.html'])
618 def test_results_directory_relative(self):
619 # We run a configuration that should fail, to generate output, then
620 # look for what the output results url was.
621 fs = unit_test_filesystem()
622 fs.maybe_make_directory('/tmp/cwd')
624 res, out, err, user = logging_run(['--results-directory=foo'],
625 tests_included=True, filesystem=fs)
626 self.assertEqual(user.opened_urls, ['/tmp/cwd/foo/results.html'])
628 # These next tests test that we run the tests in ascending alphabetical
629 # order per directory. HTTP tests are sharded separately from other tests,
630 # so we have to test both.
631 def assert_run_order(self, worker_model, child_processes='1'):
632 tests_run = get_tests_run(['--worker-model', worker_model,
633 '--child-processes', child_processes, 'passes'],
634 tests_included=True, flatten_batches=True)
635 self.assertEquals(tests_run, sorted(tests_run))
637 tests_run = get_tests_run(['--worker-model', worker_model,
638 '--child-processes', child_processes, 'http/tests/passes'],
639 tests_included=True, flatten_batches=True)
640 self.assertEquals(tests_run, sorted(tests_run))
642 def test_run_order__inline(self):
643 self.assert_run_order('inline')
645 def test_tolerance(self):
646 class ImageDiffTestPort(TestPort):
647 def diff_image(self, expected_contents, actual_contents):
648 self.tolerance_used_for_diff_image = self._options.tolerance
651 def get_port_for_run(args):
652 options, parsed_args = run_webkit_tests.parse_args(args)
653 test_port = ImageDiffTestPort(options=options)
654 res = passing_run(args, port_obj=test_port, tests_included=True)
658 base_args = ['--pixel-tests', '--no-new-test-results', 'failures/expected/*']
660 # If we pass in an explicit tolerance argument, then that will be used.
661 test_port = get_port_for_run(base_args + ['--tolerance', '.1'])
662 self.assertEqual(0.1, test_port.tolerance_used_for_diff_image)
663 test_port = get_port_for_run(base_args + ['--tolerance', '0'])
664 self.assertEqual(0, test_port.tolerance_used_for_diff_image)
666 # Otherwise the port's default tolerance behavior (including ignoring it)
668 test_port = get_port_for_run(base_args)
669 self.assertEqual(None, test_port.tolerance_used_for_diff_image)
671 def test_worker_model__inline(self):
672 self.assertTrue(passing_run(['--worker-model', 'inline']))
674 def test_worker_model__inline_with_child_processes(self):
675 res, out, err, user = logging_run(['--worker-model', 'inline',
676 '--child-processes', '2'])
677 self.assertEqual(res, 0)
678 self.assertTrue('--worker-model=inline overrides --child-processes\n' in err.get())
680 def test_worker_model__processes(self):
681 if SHOULD_TEST_PROCESSES:
682 self.assertTrue(passing_run(['--worker-model', 'processes']))
684 def test_worker_model__processes_and_dry_run(self):
685 if SHOULD_TEST_PROCESSES:
686 self.assertTrue(passing_run(['--worker-model', 'processes', '--dry-run']))
688 def test_worker_model__unknown(self):
689 self.assertRaises(ValueError, logging_run, ['--worker-model', 'unknown'])
691 def test_reftest_run(self):
692 tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True)
693 self.assertEquals(['passes/reftest.html'], tests_run)
695 def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
696 tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
697 self.assertEquals(['passes/reftest.html'], tests_run)
699 def test_reftest_skip_reftests_if_no_ref_tests(self):
700 tests_run = get_tests_run(['--no-ref-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
701 self.assertEquals([], tests_run)
702 tests_run = get_tests_run(['--no-ref-tests', '--no-pixel-tests', 'passes/reftest.html'], tests_included=True, flatten_batches=True)
703 self.assertEquals([], tests_run)
705 def test_reftest_expected_html_should_be_ignored(self):
706 tests_run = get_tests_run(['passes/reftest-expected.html'], tests_included=True, flatten_batches=True)
707 self.assertEquals([], tests_run)
709 def test_reftest_driver_should_run_expected_html(self):
710 tests_run = get_tests_run(['passes/reftest.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
711 self.assertEquals(['passes/reftest.html', 'passes/reftest-expected.html'], tests_run)
713 def test_reftest_driver_should_run_expected_mismatch_html(self):
714 tests_run = get_tests_run(['passes/mismatch.html'], tests_included=True, flatten_batches=True, include_reference_html=True)
715 self.assertEquals(['passes/mismatch.html', 'passes/mismatch-expected-mismatch.html'], tests_run)
717 def test_additional_platform_directory(self):
718 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
719 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
720 self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
722 res, buildbot_output, regular_output, user = logging_run(['--additional-platform-directory', 'foo'])
723 self.assertTrue('--additional-platform-directory=foo is ignored since it is not absolute\n' in regular_output.get())
725 def test_no_http_and_force(self):
726 # See test_run_force, using --force raises an exception.
727 # FIXME: We would like to check the warnings generated.
728 self.assertRaises(ValueError, logging_run, ['--force', '--no-http'])
731 def has_test_of_type(tests, type):
732 return [test for test in tests if type in test]
734 def test_no_http_tests(self):
735 batch_tests_dryrun = get_tests_run(['LayoutTests/http', 'websocket/'], flatten_batches=True)
736 self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'http'))
737 self.assertTrue(MainTest.has_test_of_type(batch_tests_dryrun, 'websocket'))
739 batch_tests_run_no_http = get_tests_run(['--no-http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
740 self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'http'))
741 self.assertFalse(MainTest.has_test_of_type(batch_tests_run_no_http, 'websocket'))
743 batch_tests_run_http = get_tests_run(['--http', 'LayoutTests/http', 'websocket/'], flatten_batches=True)
744 self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'http'))
745 self.assertTrue(MainTest.has_test_of_type(batch_tests_run_http, 'websocket'))
747 MainTest = skip_if(MainTest, sys.platform == 'cygwin' and compare_version(sys, '2.6')[0] < 0, 'new-run-webkit-tests tests hang on Cygwin Python 2.5.2')
750 class EndToEndTest(unittest.TestCase):
751 def parse_full_results(self, full_results_text):
752 json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
753 compressed_results = json.loads(json_to_eval)
754 return compressed_results
756 def test_end_to_end(self):
757 fs = unit_test_filesystem()
758 res, out, err, user = logging_run(record_results=True, tests_included=True, filesystem=fs)
760 # Seven tests should fail, so the return code should be 7.
761 self.assertEquals(res, 7)
762 results = self.parse_full_results(fs.files['/tmp/layout-test-results/full_results.json'])
764 # Check to ensure we're passing back image diff %age correctly.
765 self.assertEquals(results['tests']['failures']['expected']['image.html']['image_diff_percent'], 1)
767 # Check that we attempted to display the results page in a browser.
768 self.assertTrue(user.opened_urls)
770 class RebaselineTest(unittest.TestCase):
771 def assertBaselines(self, file_list, file, extensions, err):
772 "assert that the file_list contains the baselines."""
773 for ext in extensions:
774 baseline = file + "-expected" + ext
775 baseline_msg = 'Writing new expected result "%s"\n' % baseline[1:]
776 self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
777 self.assertTrue(baseline_msg in err.get())
779 # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
782 def test_reset_results(self):
783 # Test that we update expectations in place. If the expectation
784 # is missing, update the expected generic location.
785 fs = unit_test_filesystem()
786 res, out, err, _ = logging_run(['--pixel-tests',
789 'failures/expected/missing_image.html'],
790 tests_included=True, filesystem=fs, new_results=True)
791 file_list = fs.written_files.keys()
792 file_list.remove('/tmp/layout-test-results/tests_run0.txt')
793 self.assertEquals(res, 0)
794 self.assertTrue(out.empty())
795 self.assertEqual(len(file_list), 4)
796 self.assertBaselines(file_list, "/passes/image", [".txt", ".png"], err)
797 self.assertBaselines(file_list, "/failures/expected/missing_image", [".txt", ".png"], err)
799 def test_missing_results(self):
800 # Test that we update expectations in place. If the expectation
801 # is missing, update the expected generic location.
802 fs = unit_test_filesystem()
803 res, out, err, _ = logging_run(['--no-show-results',
804 'failures/unexpected/missing_text.html',
805 'failures/unexpected/missing_image.html',
806 'failures/unexpected/missing_audio.html',
807 'failures/unexpected/missing_render_tree_dump.html'],
808 tests_included=True, filesystem=fs, new_results=True)
809 file_list = fs.written_files.keys()
810 file_list.remove('/tmp/layout-test-results/tests_run0.txt')
811 self.assertEquals(res, 0)
812 self.assertFalse(out.empty())
813 self.assertEqual(len(file_list), 6)
814 self.assertBaselines(file_list, "/failures/unexpected/missing_text", [".txt"], err)
815 self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/unexpected/missing_image", [".png"], err)
816 self.assertBaselines(file_list, "/platform/test-mac-leopard/failures/unexpected/missing_render_tree_dump", [".txt"], err)
818 def test_new_baseline(self):
819 # Test that we update the platform expectations. If the expectation
820 # is mssing, then create a new expectation in the platform dir.
821 fs = unit_test_filesystem()
822 res, out, err, _ = logging_run(['--pixel-tests',
825 'failures/expected/missing_image.html'],
826 tests_included=True, filesystem=fs, new_results=True)
827 file_list = fs.written_files.keys()
828 file_list.remove('/tmp/layout-test-results/tests_run0.txt')
829 self.assertEquals(res, 0)
830 self.assertTrue(out.empty())
831 self.assertEqual(len(file_list), 4)
832 self.assertBaselines(file_list,
833 "/platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
834 self.assertBaselines(file_list,
835 "/platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
838 class DryrunTest(unittest.TestCase):
839 # FIXME: it's hard to know which platforms are safe to test; the
840 # chromium platforms require a chromium checkout, and the mac platform
841 # requires fcntl, so it can't be tested on win32, etc. There is
842 # probably a better way of handling this.
843 def disabled_test_darwin(self):
844 if sys.platform != "darwin":
847 self.assertTrue(passing_run(['--platform', 'dryrun', 'fast/html'], tests_included=True))
848 self.assertTrue(passing_run(['--platform', 'dryrun-mac', 'fast/html'], tests_included=True))
851 self.assertTrue(passing_run(['--platform', 'dryrun-test', '--pixel-tests']))
854 if __name__ == '__main__':