Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / layout_tests / run_webkit_tests_unittest.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Szeged
3 # Copyright (C) 2011 Apple Inc. All rights reserved.
4 #
5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are
7 # met:
8 #
9 #     * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer.
11 #     * Redistributions in binary form must reproduce the above
12 # copyright notice, this list of conditions and the following disclaimer
13 # in the documentation and/or other materials provided with the
14 # distribution.
15 #     * Neither the name of Google Inc. nor the names of its
16 # contributors may be used to endorse or promote products derived from
17 # this software without specific prior written permission.
18 #
19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 import Queue
32 import StringIO
33 import codecs
34 import json
35 import logging
36 import os
37 import platform
38 import re
39 import sys
40 import thread
41 import time
42 import threading
43 import unittest
44
45 from webkitpy.common.system import outputcapture, path
46 from webkitpy.common.system.crashlogs_unittest import make_mock_crash_report_darwin
47 from webkitpy.common.system.systemhost import SystemHost
48 from webkitpy.common.host import Host
49 from webkitpy.common.host_mock import MockHost
50
51 from webkitpy.layout_tests import port
52 from webkitpy.layout_tests import run_webkit_tests
53 from webkitpy.layout_tests.models import test_run_results
54 from webkitpy.layout_tests.port import Port
55 from webkitpy.layout_tests.port import test
56 from webkitpy.tool import grammar
57 from webkitpy.tool.mocktool import MockOptions
58
59
60 def parse_args(extra_args=None, tests_included=False, new_results=False, print_nothing=True):
61     extra_args = extra_args or []
62     args = []
63     if not '--platform' in extra_args:
64         args.extend(['--platform', 'test'])
65     if not new_results:
66         args.append('--no-new-test-results')
67
68     if not '--child-processes' in extra_args:
69         args.extend(['--child-processes', 1])
70     args.extend(extra_args)
71     if not tests_included:
72         # We use the glob to test that globbing works.
73         args.extend(['passes',
74                      'http/tests',
75                      'websocket/tests',
76                      'failures/expected/*'])
77     return run_webkit_tests.parse_args(args)
78
79
80 def passing_run(extra_args=None, port_obj=None, tests_included=False, host=None, shared_port=True):
81     options, parsed_args = parse_args(extra_args, tests_included)
82     if not port_obj:
83         host = host or MockHost()
84         port_obj = host.port_factory.get(port_name=options.platform, options=options)
85
86     if shared_port:
87         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
88
89     logging_stream = StringIO.StringIO()
90     run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
91     return run_details.exit_code == 0
92
93
94 def logging_run(extra_args=None, port_obj=None, tests_included=False, host=None, new_results=False, shared_port=True):
95     options, parsed_args = parse_args(extra_args=extra_args,
96                                       tests_included=tests_included,
97                                       print_nothing=False, new_results=new_results)
98     host = host or MockHost()
99     if not port_obj:
100         port_obj = host.port_factory.get(port_name=options.platform, options=options)
101
102     run_details, output = run_and_capture(port_obj, options, parsed_args, shared_port)
103     return (run_details, output, host.user)
104
105
106 def run_and_capture(port_obj, options, parsed_args, shared_port=True):
107     if shared_port:
108         port_obj.host.port_factory.get = lambda *args, **kwargs: port_obj
109     oc = outputcapture.OutputCapture()
110     try:
111         oc.capture_output()
112         logging_stream = StringIO.StringIO()
113         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
114     finally:
115         oc.restore_output()
116     return (run_details, logging_stream)
117
118
119 def get_tests_run(args, host=None, port_obj=None):
120     results = get_test_results(args, host=host, port_obj=port_obj)
121     return [result.test_name for result in results]
122
123
124 def get_test_batches(args, host=None):
125     results = get_test_results(args, host)
126     batches = []
127     batch = []
128     current_pid = None
129     for result in results:
130         if batch and result.pid != current_pid:
131             batches.append(batch)
132             batch = []
133         batch.append(result.test_name)
134     if batch:
135         batches.append(batch)
136     return batches
137
138
139 def get_test_results(args, host=None, port_obj=None):
140     options, parsed_args = parse_args(args, tests_included=True)
141
142     host = host or MockHost()
143     port_obj = port_obj or host.port_factory.get(port_name=options.platform, options=options)
144
145     oc = outputcapture.OutputCapture()
146     oc.capture_output()
147     logging_stream = StringIO.StringIO()
148     try:
149         run_details = run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
150     finally:
151         oc.restore_output()
152
153     all_results = []
154     if run_details.initial_results:
155         all_results.extend(run_details.initial_results.all_results)
156
157     if run_details.retry_results:
158         all_results.extend(run_details.retry_results.all_results)
159     return all_results
160
161
162 def parse_full_results(full_results_text):
163     json_to_eval = full_results_text.replace("ADD_RESULTS(", "").replace(");", "")
164     compressed_results = json.loads(json_to_eval)
165     return compressed_results
166
167
168 class StreamTestingMixin(object):
169     def assertContains(self, stream, string):
170         self.assertTrue(string in stream.getvalue())
171
172     def assertEmpty(self, stream):
173         self.assertFalse(stream.getvalue())
174
175     def assertNotEmpty(self, stream):
176         self.assertTrue(stream.getvalue())
177
178
179 class RunTest(unittest.TestCase, StreamTestingMixin):
180     def setUp(self):
181         # A real PlatformInfo object is used here instead of a
182         # MockPlatformInfo because we need to actually check for
183         # Windows and Mac to skip some tests.
184         self._platform = SystemHost().platform
185
186         # FIXME: Remove this when we fix test-webkitpy to work
187         # properly on cygwin (bug 63846).
188         self.should_test_processes = not self._platform.is_win()
189
190     def test_basic(self):
191         options, args = parse_args(tests_included=True)
192         logging_stream = StringIO.StringIO()
193         host = MockHost()
194         port_obj = host.port_factory.get(options.platform, options)
195         details = run_webkit_tests.run(port_obj, options, args, logging_stream)
196
197         # These numbers will need to be updated whenever we add new tests.
198         self.assertEqual(details.initial_results.total, test.TOTAL_TESTS)
199         self.assertEqual(details.initial_results.expected_skips, test.TOTAL_SKIPS)
200         self.assertEqual(len(details.initial_results.unexpected_results_by_name), test.UNEXPECTED_PASSES + test.UNEXPECTED_FAILURES)
201         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES)
202         self.assertEqual(details.retry_results.total, test.UNEXPECTED_FAILURES)
203
204         expected_tests = details.initial_results.total - details.initial_results.expected_skips - len(details.initial_results.unexpected_results_by_name)
205         expected_summary_str = ''
206         if details.initial_results.expected_failures > 0:
207             expected_summary_str = " (%d passed, %d didn't)" % (expected_tests - details.initial_results.expected_failures, details.initial_results.expected_failures)
208         one_line_summary = "%d tests ran as expected%s, %d didn't:\n" % (
209             expected_tests,
210             expected_summary_str,
211             len(details.initial_results.unexpected_results_by_name))
212         self.assertTrue(one_line_summary in logging_stream.buflist)
213
214         # Ensure the results were summarized properly.
215         self.assertEqual(details.summarized_failing_results['num_regressions'], details.exit_code)
216
217         # Ensure the results were written out and displayed.
218         failing_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
219         json_to_eval = failing_results_text.replace("ADD_RESULTS(", "").replace(");", "")
220         self.assertEqual(json.loads(json_to_eval), details.summarized_failing_results)
221
222         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
223         self.assertEqual(json.loads(full_results_text), details.summarized_full_results)
224
225         self.assertEqual(host.user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
226
227     def test_batch_size(self):
228         batch_tests_run = get_test_batches(['--batch-size', '2'])
229         for batch in batch_tests_run:
230             self.assertTrue(len(batch) <= 2, '%s had too many tests' % ', '.join(batch))
231
232     def test_max_locked_shards(self):
233         # Tests for the default of using one locked shard even in the case of more than one child process.
234         if not self.should_test_processes:
235             return
236         save_env_webkit_test_max_locked_shards = None
237         if "WEBKIT_TEST_MAX_LOCKED_SHARDS" in os.environ:
238             save_env_webkit_test_max_locked_shards = os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
239             del os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"]
240         _, regular_output, _ = logging_run(['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
241         try:
242             self.assertTrue(any(['1 locked' in line for line in regular_output.buflist]))
243         finally:
244             if save_env_webkit_test_max_locked_shards:
245                 os.environ["WEBKIT_TEST_MAX_LOCKED_SHARDS"] = save_env_webkit_test_max_locked_shards
246
247     def test_child_processes_2(self):
248         if self.should_test_processes:
249             _, regular_output, _ = logging_run(
250                 ['--debug-rwt-logging', '--child-processes', '2'], shared_port=False)
251             self.assertTrue(any(['Running 2 ' in line for line in regular_output.buflist]))
252
253     def test_child_processes_min(self):
254         if self.should_test_processes:
255             _, regular_output, _ = logging_run(
256                 ['--debug-rwt-logging', '--child-processes', '2', '-i', 'passes/virtual_passes', 'passes'],
257                 tests_included=True, shared_port=False)
258             self.assertTrue(any(['Running 1 ' in line for line in regular_output.buflist]))
259
260     def test_dryrun(self):
261         tests_run = get_tests_run(['--dry-run'])
262         self.assertEqual(tests_run, [])
263
264         tests_run = get_tests_run(['-n'])
265         self.assertEqual(tests_run, [])
266
267     def test_enable_sanitizer(self):
268         self.assertTrue(passing_run(['--enable-sanitizer', 'failures/expected/text.html']))
269
270     def test_exception_raised(self):
271         # Exceptions raised by a worker are treated differently depending on
272         # whether they are in-process or out. inline exceptions work as normal,
273         # which allows us to get the full stack trace and traceback from the
274         # worker. The downside to this is that it could be any error, but this
275         # is actually useful in testing.
276         #
277         # Exceptions raised in a separate process are re-packaged into
278         # WorkerExceptions (a subclass of BaseException), which have a string capture of the stack which can
279         # be printed, but don't display properly in the unit test exception handlers.
280         self.assertRaises(BaseException, logging_run,
281             ['failures/expected/exception.html', '--child-processes', '1'], tests_included=True)
282
283         if self.should_test_processes:
284             self.assertRaises(BaseException, logging_run,
285                 ['--child-processes', '2', '--skipped=ignore', 'failures/expected/exception.html', 'passes/text.html'], tests_included=True, shared_port=False)
286
287     def test_device_failure(self):
288         # Test that we handle a device going offline during a test properly.
289         details, regular_output, _ = logging_run(['failures/expected/device_failure.html'], tests_included=True)
290         self.assertEqual(details.exit_code, 0)
291         self.assertTrue('worker/0 has failed' in regular_output.getvalue())
292
293     def test_full_results_html(self):
294         host = MockHost()
295         details, _, _ = logging_run(['--full-results-html'], host=host)
296         self.assertEqual(details.exit_code, 0)
297         self.assertEqual(len(host.user.opened_urls), 1)
298
299     def test_keyboard_interrupt(self):
300         # Note that this also tests running a test marked as SKIP if
301         # you specify it explicitly.
302         details, _, _ = logging_run(['failures/expected/keyboard.html', '--child-processes', '1'], tests_included=True)
303         self.assertEqual(details.exit_code, test_run_results.INTERRUPTED_EXIT_STATUS)
304
305         if self.should_test_processes:
306             _, regular_output, _ = logging_run(['failures/expected/keyboard.html', 'passes/text.html', '--child-processes', '2', '--skipped=ignore'], tests_included=True, shared_port=False)
307             self.assertTrue(any(['Interrupted, exiting' in line for line in regular_output.buflist]))
308
309     def test_no_tests_found(self):
310         details, err, _ = logging_run(['resources'], tests_included=True)
311         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
312         self.assertContains(err, 'No tests to run.\n')
313
314     def test_no_tests_found_2(self):
315         details, err, _ = logging_run(['foo'], tests_included=True)
316         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
317         self.assertContains(err, 'No tests to run.\n')
318
319     def test_no_tests_found_3(self):
320         details, err, _ = logging_run(['--run-chunk', '5:400', 'foo/bar.html'], tests_included=True)
321         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
322         self.assertContains(err, 'No tests to run.\n')
323
324     def test_natural_order(self):
325         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
326         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
327         self.assertEqual(['failures/expected/missing_text.html', 'failures/expected/text.html', 'passes/args.html', 'passes/audio.html'], tests_run)
328
329     def test_natural_order_test_specified_multiple_times(self):
330         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
331         tests_run = get_tests_run(['--order=natural'] + tests_to_run)
332         self.assertEqual(['passes/args.html', 'passes/args.html', 'passes/audio.html', 'passes/audio.html'], tests_run)
333
334     def test_random_order(self):
335         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
336         tests_run = get_tests_run(['--order=random'] + tests_to_run)
337         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
338
339     def test_random_daily_seed_order(self):
340         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
341         tests_run = get_tests_run(['--order=random-seeded'] + tests_to_run)
342         self.assertEqual(sorted(tests_to_run), sorted(tests_run))
343
344     def test_random_order_test_specified_multiple_times(self):
345         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
346         tests_run = get_tests_run(['--order=random'] + tests_to_run)
347         self.assertEqual(tests_run.count('passes/audio.html'), 2)
348         self.assertEqual(tests_run.count('passes/args.html'), 2)
349
350     def test_no_order(self):
351         tests_to_run = ['passes/audio.html', 'failures/expected/text.html', 'failures/expected/missing_text.html', 'passes/args.html']
352         tests_run = get_tests_run(['--order=none'] + tests_to_run)
353         self.assertEqual(tests_to_run, tests_run)
354
355     def test_no_order_test_specified_multiple_times(self):
356         tests_to_run = ['passes/args.html', 'passes/audio.html', 'passes/audio.html', 'passes/args.html']
357         tests_run = get_tests_run(['--order=none'] + tests_to_run)
358         self.assertEqual(tests_to_run, tests_run)
359
360     def test_no_order_with_directory_entries_in_natural_order(self):
361         tests_to_run = ['http/tests/ssl', 'perf/foo', 'http/tests/passes']
362         tests_run = get_tests_run(['--order=none'] + tests_to_run)
363         self.assertEqual(tests_run, ['http/tests/ssl/text.html', 'perf/foo/test.html', 'http/tests/passes/image.html', 'http/tests/passes/text.html'])
364
365     def test_repeat_each(self):
366         tests_to_run = ['passes/image.html', 'passes/text.html']
367         tests_run = get_tests_run(['--repeat-each', '2'] + tests_to_run)
368         self.assertEqual(tests_run, ['passes/image.html', 'passes/image.html', 'passes/text.html', 'passes/text.html'])
369
370     def test_ignore_flag(self):
371         # Note that passes/image.html is expected to be run since we specified it directly.
372         tests_run = get_tests_run(['-i', 'passes', 'passes/image.html'])
373         self.assertFalse('passes/text.html' in tests_run)
374         self.assertTrue('passes/image.html' in tests_run)
375
376     def test_skipped_flag(self):
377         tests_run = get_tests_run(['passes'])
378         self.assertFalse('passes/skipped/skip.html' in tests_run)
379         num_tests_run_by_default = len(tests_run)
380
381         # Check that nothing changes when we specify skipped=default.
382         self.assertEqual(len(get_tests_run(['--skipped=default', 'passes'])),
383                           num_tests_run_by_default)
384
385         # Now check that we run one more test (the skipped one).
386         tests_run = get_tests_run(['--skipped=ignore', 'passes'])
387         self.assertTrue('passes/skipped/skip.html' in tests_run)
388         self.assertEqual(len(tests_run), num_tests_run_by_default + 1)
389
390         # Now check that we only run the skipped test.
391         self.assertEqual(get_tests_run(['--skipped=only', 'passes']), ['passes/skipped/skip.html'])
392
393         # Now check that we don't run anything.
394         self.assertEqual(get_tests_run(['--skipped=always', 'passes/skipped/skip.html']), [])
395
396     def test_iterations(self):
397         tests_to_run = ['passes/image.html', 'passes/text.html']
398         tests_run = get_tests_run(['--iterations', '2'] + tests_to_run)
399         self.assertEqual(tests_run, ['passes/image.html', 'passes/text.html', 'passes/image.html', 'passes/text.html'])
400
401     def test_repeat_each_iterations_num_tests(self):
402         # The total number of tests should be: number_of_tests *
403         # repeat_each * iterations
404         host = MockHost()
405         _, err, _ = logging_run(
406             ['--iterations', '2', '--repeat-each', '4', '--debug-rwt-logging', 'passes/text.html', 'failures/expected/text.html'],
407             tests_included=True, host=host)
408         self.assertContains(err, "All 16 tests ran as expected (8 passed, 8 didn't).\n")
409
410     def test_run_chunk(self):
411         # Test that we actually select the right chunk
412         all_tests_run = get_tests_run(['passes', 'failures'])
413         chunk_tests_run = get_tests_run(['--run-chunk', '1:4', 'passes', 'failures'])
414         self.assertEqual(all_tests_run[4:8], chunk_tests_run)
415
416         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
417         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
418         chunk_tests_run = get_tests_run(['--run-chunk', '1:3'] + tests_to_run)
419         self.assertEqual(['passes/text.html', 'passes/error.html', 'passes/image.html'], chunk_tests_run)
420
421     def test_run_part(self):
422         # Test that we actually select the right part
423         tests_to_run = ['passes/error.html', 'passes/image.html', 'passes/platform_image.html', 'passes/text.html']
424         tests_run = get_tests_run(['--run-part', '1:2'] + tests_to_run)
425         self.assertEqual(['passes/error.html', 'passes/image.html'], tests_run)
426
427         # Test that we wrap around if the number of tests is not evenly divisible by the chunk size
428         # (here we end up with 3 parts, each with 2 tests, and we only have 4 tests total, so the
429         # last part repeats the first two tests).
430         chunk_tests_run = get_tests_run(['--run-part', '3:3'] + tests_to_run)
431         self.assertEqual(['passes/error.html', 'passes/image.html'], chunk_tests_run)
432
433     def test_run_singly(self):
434         batch_tests_run = get_test_batches(['--run-singly'])
435         for batch in batch_tests_run:
436             self.assertEqual(len(batch), 1, '%s had too many tests' % ', '.join(batch))
437
438     def test_skip_failing_tests(self):
439         # This tests that we skip both known failing and known flaky tests. Because there are
440         # no known flaky tests in the default test_expectations, we add additional expectations.
441         host = MockHost()
442         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) passes/image.html [ ImageOnlyFailure Pass ]\n')
443
444         batches = get_test_batches(['--skip-failing-tests', '--additional-expectations', '/tmp/overrides.txt'], host=host)
445         has_passes_text = False
446         for batch in batches:
447             self.assertFalse('failures/expected/text.html' in batch)
448             self.assertFalse('passes/image.html' in batch)
449             has_passes_text = has_passes_text or ('passes/text.html' in batch)
450         self.assertTrue(has_passes_text)
451
452     def test_single_file(self):
453         tests_run = get_tests_run(['passes/text.html'])
454         self.assertEqual(tests_run, ['passes/text.html'])
455
456     def test_single_file_with_prefix(self):
457         tests_run = get_tests_run(['LayoutTests/passes/text.html'])
458         self.assertEqual(['passes/text.html'], tests_run)
459
460     def test_single_skipped_file(self):
461         tests_run = get_tests_run(['failures/expected/keybaord.html'])
462         self.assertEqual([], tests_run)
463
464     def test_stderr_is_saved(self):
465         host = MockHost()
466         self.assertTrue(passing_run(host=host))
467         self.assertEqual(host.filesystem.read_text_file('/tmp/layout-test-results/passes/error-stderr.txt'),
468                           'stuff going to stderr')
469
470     def test_test_list(self):
471         host = MockHost()
472         filename = '/tmp/foo.txt'
473         host.filesystem.write_text_file(filename, 'passes/text.html')
474         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
475         self.assertEqual(['passes/text.html'], tests_run)
476         host.filesystem.remove(filename)
477         details, err, user = logging_run(['--test-list=%s' % filename], tests_included=True, host=host)
478         self.assertEqual(details.exit_code, test_run_results.NO_TESTS_EXIT_STATUS)
479         self.assertNotEmpty(err)
480
481     def test_test_list_with_prefix(self):
482         host = MockHost()
483         filename = '/tmp/foo.txt'
484         host.filesystem.write_text_file(filename, 'LayoutTests/passes/text.html')
485         tests_run = get_tests_run(['--test-list=%s' % filename], host=host)
486         self.assertEqual(['passes/text.html'], tests_run)
487
488     def test_smoke_test(self):
489         host = MockHost()
490         smoke_test_filename = test.LAYOUT_TEST_DIR + '/SmokeTests'
491         host.filesystem.write_text_file(smoke_test_filename, 'passes/text.html\n')
492
493         # Test the default smoke testing.
494         tests_run = get_tests_run(['--smoke'], host=host)
495         self.assertEqual(['passes/text.html'], tests_run)
496
497         # Test running the smoke tests plus some manually-specified tests.
498         tests_run = get_tests_run(['--smoke', 'passes/image.html'], host=host)
499         self.assertEqual(['passes/image.html', 'passes/text.html'], tests_run)
500
501         # Test running the smoke tests plus some manually-specified tests.
502         tests_run = get_tests_run(['--no-smoke', 'passes/image.html'], host=host)
503         self.assertEqual(['passes/image.html'], tests_run)
504
505         # Test that we don't run just the smoke tests by default on a normal test port.
506         tests_run = get_tests_run([], host=host)
507         self.assertNotEqual(['passes/text.html'], tests_run)
508
509         # Create a port that does run only the smoke tests by default, and verify that works as expected.
510         port_obj = host.port_factory.get('test')
511         port_obj.default_smoke_test_only = lambda: True
512         tests_run = get_tests_run([], host=host, port_obj=port_obj)
513         self.assertEqual(['passes/text.html'], tests_run)
514
515         # Verify that --no-smoke continues to work on a smoke-by-default port.
516         tests_run = get_tests_run(['--no-smoke'], host=host, port_obj=port_obj)
517         self.assertNotEqual(['passes/text.html'], tests_run)
518
519     def test_missing_and_unexpected_results(self):
520         # Test that we update expectations in place. If the expectation
521         # is missing, update the expected generic location.
522         host = MockHost()
523         details, err, _ = logging_run(['--no-show-results', '--retry-failures',
524             'failures/expected/missing_image.html',
525             'failures/unexpected/missing_text.html',
526             'failures/unexpected/text-image-checksum.html'],
527             tests_included=True, host=host)
528         file_list = host.filesystem.written_files.keys()
529         self.assertEqual(details.exit_code, 2)
530         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
531         self.assertTrue(json_string.find('"text-image-checksum.html":{"expected":"PASS","actual":"IMAGE+TEXT","is_unexpected":true') != -1)
532         self.assertTrue(json_string.find('"missing_text.html":{"expected":"PASS","is_missing_text":true,"actual":"MISSING","is_unexpected":true') != -1)
533         self.assertTrue(json_string.find('"num_regressions":2') != -1)
534         self.assertTrue(json_string.find('"num_flaky":0') != -1)
535
536     def test_different_failure_on_retry(self):
537         # This tests that if a test fails two different ways -- both unexpected
538         # -- we treat it as a failure rather than a flaky result.  We use the
539         # initial failure for simplicity and consistency w/ the flakiness
540         # dashboard, even if the second failure is worse.
541
542         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/text_then_crash.html'], tests_included=True)
543         self.assertEqual(details.exit_code, 1)
544         self.assertEqual(details.summarized_failing_results['tests']['failures']['unexpected']['text_then_crash.html']['actual'],
545                          'TEXT CRASH')
546
547         # If we get a test that fails two different ways -- but the second one is expected --
548         # we should treat it as a flaky result and report the initial unexpected failure type
549         # to the dashboard. However, the test should be considered passing.
550         details, err, _ = logging_run(['--retry-failures', 'failures/expected/crash_then_text.html'], tests_included=True)
551         self.assertEqual(details.exit_code, 0)
552         self.assertEqual(details.summarized_failing_results['tests']['failures']['expected']['crash_then_text.html']['actual'],
553                          'CRASH FAIL')
554
555     def test_pixel_test_directories(self):
556         host = MockHost()
557
558         """Both tests have failing checksum. We include only the first in pixel tests so only that should fail."""
559         args = ['--pixel-tests', '--retry-failures', '--pixel-test-directory', 'failures/unexpected/pixeldir',
560                 'failures/unexpected/pixeldir/image_in_pixeldir.html',
561                 'failures/unexpected/image_not_in_pixeldir.html']
562         details, err, _ = logging_run(extra_args=args, host=host, tests_included=True)
563
564         self.assertEqual(details.exit_code, 1)
565         expected_token = '"pixeldir":{"image_in_pixeldir.html":{"expected":"PASS","actual":"IMAGE","is_unexpected":true'
566         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
567         self.assertTrue(json_string.find(expected_token) != -1)
568
569     def test_crash_with_stderr(self):
570         host = MockHost()
571         _, regular_output, _ = logging_run(['failures/unexpected/crash-with-stderr.html'], tests_included=True, host=host)
572         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('{"crash-with-stderr.html":{"expected":"PASS","actual":"CRASH","has_stderr":true,"is_unexpected":true') != -1)
573
574     def test_no_image_failure_with_image_diff(self):
575         host = MockHost()
576         _, regular_output, _ = logging_run(['failures/unexpected/checksum-with-matching-image.html'], tests_included=True, host=host)
577         self.assertTrue(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json').find('"num_regressions":0') != -1)
578
579     def test_exit_after_n_failures_upload(self):
580         host = MockHost()
581         details, regular_output, user = logging_run(
582            ['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'],
583            tests_included=True, host=host)
584
585         # By returning False, we know that the incremental results were generated and then deleted.
586         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/incremental_results.json'))
587
588         self.assertEqual(details.exit_code, test_run_results.EARLY_EXIT_STATUS)
589
590         # This checks that passes/text.html is considered SKIPped.
591         self.assertTrue('"skipped":1' in host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
592
593         # This checks that we told the user we bailed out.
594         self.assertTrue('Exiting early after 1 failures. 1 tests run.\n' in regular_output.getvalue())
595
596         # This checks that neither test ran as expected.
597         # FIXME: This log message is confusing; tests that were skipped should be called out separately.
598         self.assertTrue('0 tests ran as expected, 2 didn\'t:\n' in regular_output.getvalue())
599
600     def test_exit_after_n_failures(self):
601         # Unexpected failures should result in tests stopping.
602         tests_run = get_tests_run(['failures/unexpected/text-image-checksum.html', 'passes/text.html', '--exit-after-n-failures', '1'])
603         self.assertEqual(['failures/unexpected/text-image-checksum.html'], tests_run)
604
605         # But we'll keep going for expected ones.
606         tests_run = get_tests_run(['failures/expected/text.html', 'passes/text.html', '--exit-after-n-failures', '1'])
607         self.assertEqual(['failures/expected/text.html', 'passes/text.html'], tests_run)
608
609     def test_exit_after_n_crashes(self):
610         # Unexpected crashes should result in tests stopping.
611         tests_run = get_tests_run(['failures/unexpected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
612         self.assertEqual(['failures/unexpected/crash.html'], tests_run)
613
614         # Same with timeouts.
615         tests_run = get_tests_run(['failures/unexpected/timeout.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
616         self.assertEqual(['failures/unexpected/timeout.html'], tests_run)
617
618         # But we'll keep going for expected ones.
619         tests_run = get_tests_run(['failures/expected/crash.html', 'passes/text.html', '--exit-after-n-crashes-or-timeouts', '1'])
620         self.assertEqual(['failures/expected/crash.html', 'passes/text.html'], tests_run)
621
622     def test_results_directory_absolute(self):
623         # We run a configuration that should fail, to generate output, then
624         # look for what the output results url was.
625
626         host = MockHost()
627         with host.filesystem.mkdtemp() as tmpdir:
628             _, _, user = logging_run(['--results-directory=' + str(tmpdir)], tests_included=True, host=host)
629             self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, host.filesystem.join(tmpdir, 'results.html'))])
630
631     def test_results_directory_default(self):
632         # We run a configuration that should fail, to generate output, then
633         # look for what the output results url was.
634
635         # This is the default location.
636         _, _, user = logging_run(tests_included=True)
637         self.assertEqual(user.opened_urls, [path.abspath_to_uri(MockHost().platform, '/tmp/layout-test-results/results.html')])
638
639     def test_results_directory_relative(self):
640         # We run a configuration that should fail, to generate output, then
641         # look for what the output results url was.
642         host = MockHost()
643         host.filesystem.maybe_make_directory('/tmp/cwd')
644         host.filesystem.chdir('/tmp/cwd')
645         _, _, user = logging_run(['--results-directory=foo'], tests_included=True, host=host)
646         self.assertEqual(user.opened_urls, [path.abspath_to_uri(host.platform, '/tmp/cwd/foo/results.html')])
647
648     def test_retrying_default_value(self):
649         host = MockHost()
650         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
651         self.assertEqual(details.exit_code, 1)
652         self.assertFalse('Retrying' in err.getvalue())
653
654         host = MockHost()
655         details, err, _ = logging_run(['--debug-rwt-logging', 'failures/unexpected'], tests_included=True, host=host)
656         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)  # FIXME: This should be a constant in test.py .
657         self.assertTrue('Retrying' in err.getvalue())
658
659     def test_retrying_default_value_test_list(self):
660         host = MockHost()
661         filename = '/tmp/foo.txt'
662         host.filesystem.write_text_file(filename, 'failures/unexpected/text-image-checksum.html\nfailures/unexpected/crash.html')
663         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
664         self.assertEqual(details.exit_code, 2)
665         self.assertFalse('Retrying' in err.getvalue())
666
667         host = MockHost()
668         filename = '/tmp/foo.txt'
669         host.filesystem.write_text_file(filename, 'failures')
670         details, err, _ = logging_run(['--debug-rwt-logging', '--test-list=%s' % filename], tests_included=True, host=host)
671         self.assertEqual(details.exit_code, test.UNEXPECTED_FAILURES - 7)
672         self.assertTrue('Retrying' in err.getvalue())
673
674     def test_retrying_and_flaky_tests(self):
675         host = MockHost()
676         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/flaky'], tests_included=True, host=host)
677         self.assertEqual(details.exit_code, 0)
678         self.assertTrue('Retrying' in err.getvalue())
679         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
680         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/retries/failures/flaky/text-actual.txt'))
681         self.assertEqual(len(host.user.opened_urls), 0)
682
683         # Now we test that --clobber-old-results does remove the old entries and the old retries,
684         # and that we don't retry again.
685         host = MockHost()
686         details, err, _ = logging_run(['--no-retry-failures', '--clobber-old-results', 'failures/flaky'], tests_included=True, host=host)
687         self.assertEqual(details.exit_code, 1)
688         self.assertTrue('Clobbering old results' in err.getvalue())
689         self.assertTrue('flaky/text.html' in err.getvalue())
690         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/flaky/text-actual.txt'))
691         self.assertFalse(host.filesystem.exists('retries'))
692         self.assertEqual(len(host.user.opened_urls), 1)
693
694     def test_retrying_crashed_tests(self):
695         host = MockHost()
696         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/crash.html'], tests_included=True, host=host)
697         self.assertEqual(details.exit_code, 1)
698         self.assertTrue('Retrying' in err.getvalue())
699
700     def test_retrying_leak_tests(self):
701         host = MockHost()
702         details, err, _ = logging_run(['--retry-failures', 'failures/unexpected/leak.html'], tests_included=True, host=host)
703         self.assertEqual(details.exit_code, 1)
704         self.assertTrue('Retrying' in err.getvalue())
705
706     def test_retrying_force_pixel_tests(self):
707         host = MockHost()
708         details, err, _ = logging_run(['--no-pixel-tests', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
709         self.assertEqual(details.exit_code, 1)
710         self.assertTrue('Retrying' in err.getvalue())
711         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
712         self.assertFalse(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.png'))
713         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
714         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.png'))
715         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
716         json = parse_full_results(json_string)
717         self.assertEqual(json["tests"]["failures"]["unexpected"]["text-image-checksum.html"],
718             {"expected": "PASS", "actual": "TEXT IMAGE+TEXT", "is_unexpected": True})
719         self.assertFalse(json["pixel_tests_enabled"])
720         self.assertEqual(details.enabled_pixel_tests_in_retry, True)
721
722     def test_retrying_uses_retries_directory(self):
723         host = MockHost()
724         details, err, _ = logging_run(['--debug-rwt-logging', '--retry-failures', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
725         self.assertEqual(details.exit_code, 1)
726         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/failures/unexpected/text-image-checksum-actual.txt'))
727         self.assertTrue(host.filesystem.exists('/tmp/layout-test-results/retries/failures/unexpected/text-image-checksum-actual.txt'))
728
729     def test_run_order__inline(self):
730         # These next tests test that we run the tests in ascending alphabetical
731         # order per directory. HTTP tests are sharded separately from other tests,
732         # so we have to test both.
733         tests_run = get_tests_run(['-i', 'passes/virtual_passes', 'passes'])
734         self.assertEqual(tests_run, sorted(tests_run))
735
736         tests_run = get_tests_run(['http/tests/passes'])
737         self.assertEqual(tests_run, sorted(tests_run))
738
739     def test_virtual(self):
740         self.assertTrue(passing_run(['passes/text.html', 'passes/args.html',
741                                      'virtual/passes/text.html', 'virtual/passes/args.html']))
742
743     def test_reftest_run(self):
744         tests_run = get_tests_run(['passes/reftest.html'])
745         self.assertEqual(['passes/reftest.html'], tests_run)
746
747     def test_reftest_run_reftests_if_pixel_tests_are_disabled(self):
748         tests_run = get_tests_run(['--no-pixel-tests', 'passes/reftest.html'])
749         self.assertEqual(['passes/reftest.html'], tests_run)
750
751     def test_reftest_expected_html_should_be_ignored(self):
752         tests_run = get_tests_run(['passes/reftest-expected.html'])
753         self.assertEqual([], tests_run)
754
755     def test_reftest_driver_should_run_expected_html(self):
756         tests_run = get_test_results(['passes/reftest.html'])
757         self.assertEqual(tests_run[0].references, ['passes/reftest-expected.html'])
758
759     def test_reftest_driver_should_run_expected_mismatch_html(self):
760         tests_run = get_test_results(['passes/mismatch.html'])
761         self.assertEqual(tests_run[0].references, ['passes/mismatch-expected-mismatch.html'])
762
763     def test_reftest_should_not_use_naming_convention_if_not_listed_in_reftestlist(self):
764         host = MockHost()
765         _, err, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
766         results = parse_full_results(host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json'))
767
768         self.assertEqual(results["tests"]["reftests"]["foo"]["unlistedtest.html"]["actual"], "MISSING"),
769         self.assertEqual(results["num_regressions"], 5)
770         self.assertEqual(results["num_flaky"], 0)
771
772     def test_reftest_crash(self):
773         test_results = get_test_results(['failures/unexpected/crash-reftest.html'])
774         # The list of references should be empty since the test crashed and we didn't run any references.
775         self.assertEqual(test_results[0].references, [])
776
777     def test_reftest_with_virtual_reference(self):
778         _, err, _ = logging_run(['--details', 'virtual/virtual_passes/passes/reftest.html'], tests_included=True)
779         self.assertTrue('ref: virtual/virtual_passes/passes/reftest-expected.html' in err.getvalue())
780
781     def test_additional_platform_directory(self):
782         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo']))
783         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/../foo']))
784         self.assertTrue(passing_run(['--additional-platform-directory', '/tmp/foo', '--additional-platform-directory', '/tmp/bar']))
785         self.assertTrue(passing_run(['--additional-platform-directory', 'foo']))
786
787     def test_additional_expectations(self):
788         host = MockHost()
789         host.filesystem.write_text_file('/tmp/overrides.txt', 'Bug(x) failures/unexpected/mismatch.html [ ImageOnlyFailure ]\n')
790         self.assertTrue(passing_run(['--additional-expectations', '/tmp/overrides.txt', 'failures/unexpected/mismatch.html'],
791                                     tests_included=True, host=host))
792
793     @staticmethod
794     def has_test_of_type(tests, type):
795         return [test for test in tests if type in test]
796
797     def test_platform_directories_ignored_when_searching_for_tests(self):
798         tests_run = get_tests_run(['--platform', 'test-mac-leopard'])
799         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
800         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
801
802     def test_platform_directories_not_searched_for_additional_tests(self):
803         tests_run = get_tests_run(['--platform', 'test-mac-leopard', 'http'])
804         self.assertFalse('platform/test-mac-leopard/http/test.html' in tests_run)
805         self.assertFalse('platform/test-win-win7/http/test.html' in tests_run)
806
807     def test_output_diffs(self):
808         # Test to ensure that we don't generate -wdiff.html or -pretty.html if wdiff and PrettyPatch
809         # aren't available.
810         host = MockHost()
811         _, err, _ = logging_run(['--pixel-tests', 'failures/unexpected/text-image-checksum.html'], tests_included=True, host=host)
812         written_files = host.filesystem.written_files
813         self.assertTrue(any(path.endswith('-diff.txt') for path in written_files.keys()))
814         self.assertFalse(any(path.endswith('-wdiff.html') for path in written_files.keys()))
815         self.assertFalse(any(path.endswith('-pretty-diff.html') for path in written_files.keys()))
816
817         full_results_text = host.filesystem.read_text_file('/tmp/layout-test-results/full_results.json')
818         full_results = json.loads(full_results_text.replace("ADD_RESULTS(", "").replace(");", ""))
819         self.assertEqual(full_results['has_wdiff'], False)
820         self.assertEqual(full_results['has_pretty_patch'], False)
821
822     def test_unsupported_platform(self):
823         stdout = StringIO.StringIO()
824         stderr = StringIO.StringIO()
825         res = run_webkit_tests.main(['--platform', 'foo'], stdout, stderr)
826
827         self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
828         self.assertEqual(stdout.getvalue(), '')
829         self.assertTrue('unsupported platform' in stderr.getvalue())
830
831     def test_build_check(self):
832         # By using a port_name for a different platform than the one we're running on, the build check should always fail.
833         if sys.platform == 'darwin':
834             port_name = 'linux-x86'
835         else:
836             port_name = 'mac-lion'
837         out = StringIO.StringIO()
838         err = StringIO.StringIO()
839         self.assertEqual(run_webkit_tests.main(['--platform', port_name, 'fast/harness/results.html'], out, err), test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
840
841     def test_verbose_in_child_processes(self):
842         # When we actually run multiple processes, we may have to reconfigure logging in the
843         # child process (e.g., on win32) and we need to make sure that works and we still
844         # see the verbose log output. However, we can't use logging_run() because using
845         # outputcapture to capture stdout and stderr latter results in a nonpicklable host.
846
847         # Test is flaky on Windows: https://bugs.webkit.org/show_bug.cgi?id=98559
848         if not self.should_test_processes:
849             return
850
851         options, parsed_args = parse_args(['--verbose', '--fully-parallel', '--child-processes', '2', 'passes/text.html', 'passes/image.html'], tests_included=True, print_nothing=False)
852         host = MockHost()
853         port_obj = host.port_factory.get(port_name=options.platform, options=options)
854         logging_stream = StringIO.StringIO()
855         run_webkit_tests.run(port_obj, options, parsed_args, logging_stream=logging_stream)
856         self.assertTrue('text.html passed' in logging_stream.getvalue())
857         self.assertTrue('image.html passed' in logging_stream.getvalue())
858
859     def disabled_test_driver_logging(self):
860         # FIXME: Figure out how to either use a mock-test port to
861         # get output or mack mock ports work again.
862         host = Host()
863         _, err, _ = logging_run(['--platform', 'mock-win', '--driver-logging', 'fast/harness/results.html'],
864                                 tests_included=True, host=host)
865         self.assertTrue('OUT:' in err.getvalue())
866
867     def test_write_full_results_to(self):
868         host = MockHost()
869         details, _, _ = logging_run(['--write-full-results-to', '/tmp/full_results.json'], host=host)
870         self.assertEqual(details.exit_code, 0)
871         self.assertTrue(host.filesystem.exists('/tmp/full_results.json'))
872
873
874 class EndToEndTest(unittest.TestCase):
875     def test_reftest_with_two_notrefs(self):
876         # Test that we update expectations in place. If the expectation
877         # is missing, update the expected generic location.
878         host = MockHost()
879         _, _, _ = logging_run(['--no-show-results', 'reftests/foo/'], tests_included=True, host=host)
880         file_list = host.filesystem.written_files.keys()
881
882         json_string = host.filesystem.read_text_file('/tmp/layout-test-results/failing_results.json')
883         json = parse_full_results(json_string)
884         self.assertTrue("multiple-match-success.html" not in json["tests"]["reftests"]["foo"])
885         self.assertTrue("multiple-mismatch-success.html" not in json["tests"]["reftests"]["foo"])
886         self.assertTrue("multiple-both-success.html" not in json["tests"]["reftests"]["foo"])
887
888         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-match-failure.html"],
889             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["=="], "is_unexpected": True})
890         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-mismatch-failure.html"],
891             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["!="], "is_unexpected": True})
892         self.assertEqual(json["tests"]["reftests"]["foo"]["multiple-both-failure.html"],
893             {"expected": "PASS", "actual": "IMAGE", "reftest_type": ["==", "!="], "is_unexpected": True})
894
895
896 class RebaselineTest(unittest.TestCase, StreamTestingMixin):
897     def assertBaselines(self, file_list, file, extensions, err):
898         "assert that the file_list contains the baselines."""
899         for ext in extensions:
900             baseline = file + "-expected" + ext
901             baseline_msg = 'Writing new expected result "%s"\n' % baseline
902             self.assertTrue(any(f.find(baseline) != -1 for f in file_list))
903             self.assertContains(err, baseline_msg)
904
905     # FIXME: Add tests to ensure that we're *not* writing baselines when we're not
906     # supposed to be.
907
908     def test_reset_results(self):
909         # Test that we update expectations in place. If the expectation
910         # is missing, update the expected generic location.
911         host = MockHost()
912         details, err, _ = logging_run(
913             ['--pixel-tests', '--reset-results', 'passes/image.html', 'failures/expected/missing_image.html'],
914             tests_included=True, host=host, new_results=True)
915         file_list = host.filesystem.written_files.keys()
916         self.assertEqual(details.exit_code, 0)
917         self.assertEqual(len(file_list), 8)
918         self.assertBaselines(file_list, "passes/image", [".txt", ".png"], err)
919         self.assertBaselines(file_list, "failures/expected/missing_image", [".txt", ".png"], err)
920
921     def test_missing_results(self):
922         # Test that we update expectations in place. If the expectation
923         # is missing, update the expected generic location.
924         host = MockHost()
925         details, err, _ = logging_run(['--no-show-results',
926             'failures/unexpected/missing_text.html',
927             'failures/unexpected/missing_image.html',
928             'failures/unexpected/missing_render_tree_dump.html'],
929             tests_included=True, host=host, new_results=True)
930         file_list = host.filesystem.written_files.keys()
931         self.assertEqual(details.exit_code, 3)
932         self.assertEqual(len(file_list), 10)
933         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
934         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_image", [".png"], err)
935         self.assertBaselines(file_list, "platform/test/failures/unexpected/missing_render_tree_dump", [".txt"], err)
936
937     def test_missing_results_not_added_if_expected_missing(self):
938         # Test that we update expectations in place. If the expectation
939         # is missing, update the expected generic location.
940         host = MockHost()
941         options, parsed_args = run_webkit_tests.parse_args([])
942
943         port = test.TestPort(host, options=options)
944         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
945 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
946 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
947 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
948 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
949 """)
950         details, err, _ = logging_run(['--no-show-results',
951             'failures/unexpected/missing_text.html',
952             'failures/unexpected/missing_image.html',
953             'failures/unexpected/missing_audio.html',
954             'failures/unexpected/missing_render_tree_dump.html'],
955             tests_included=True, host=host, new_results=True,  port_obj=port)
956         file_list = host.filesystem.written_files.keys()
957         self.assertEqual(details.exit_code, 0)
958         self.assertEqual(len(file_list), 7)
959         self.assertFalse(any('failures/unexpected/missing_text-expected' in file for file in file_list))
960         self.assertFalse(any('failures/unexpected/missing_image-expected' in file for file in file_list))
961         self.assertFalse(any('failures/unexpected/missing_render_tree_dump-expected' in file for file in file_list))
962
963     def test_missing_results_not_added_if_expected_missing_and_reset_results(self):
964         # Test that we update expectations in place. If the expectation
965         # is missing, update the expected generic location.
966         host = MockHost()
967         options, parsed_args = run_webkit_tests.parse_args(['--pixel-tests', '--reset-results'])
968
969         port = test.TestPort(host, options=options)
970         host.filesystem.write_text_file(port.path_to_generic_test_expectations_file(), """
971 Bug(foo) failures/unexpected/missing_text.html [ Missing ]
972 Bug(foo) failures/unexpected/missing_image.html [ NeedsRebaseline ]
973 Bug(foo) failures/unexpected/missing_audio.html [ NeedsManualRebaseline ]
974 Bug(foo) failures/unexpected/missing_render_tree_dump.html [ Missing ]
975 """)
976         details, err, _ = logging_run(['--pixel-tests', '--reset-results',
977             'failures/unexpected/missing_text.html',
978             'failures/unexpected/missing_image.html',
979             'failures/unexpected/missing_audio.html',
980             'failures/unexpected/missing_render_tree_dump.html'],
981             tests_included=True, host=host, new_results=True,  port_obj=port)
982         file_list = host.filesystem.written_files.keys()
983         self.assertEqual(details.exit_code, 0)
984         self.assertEqual(len(file_list), 11)
985         self.assertBaselines(file_list, "failures/unexpected/missing_text", [".txt"], err)
986         self.assertBaselines(file_list, "failures/unexpected/missing_image", [".png"], err)
987         self.assertBaselines(file_list, "failures/unexpected/missing_render_tree_dump", [".txt"], err)
988
989     def test_new_baseline(self):
990         # Test that we update the platform expectations in the version-specific directories
991         # for both existing and new baselines.
992         host = MockHost()
993         details, err, _ = logging_run(
994             ['--pixel-tests', '--new-baseline', 'passes/image.html', 'failures/expected/missing_image.html'],
995             tests_included=True, host=host, new_results=True)
996         file_list = host.filesystem.written_files.keys()
997         self.assertEqual(details.exit_code, 0)
998         self.assertEqual(len(file_list), 8)
999         self.assertBaselines(file_list,
1000             "platform/test-mac-leopard/passes/image", [".txt", ".png"], err)
1001         self.assertBaselines(file_list,
1002             "platform/test-mac-leopard/failures/expected/missing_image", [".txt", ".png"], err)
1003
1004
1005 class PortTest(unittest.TestCase):
1006     def assert_mock_port_works(self, port_name, args=[]):
1007         self.assertTrue(passing_run(args + ['--platform', 'mock-' + port_name, 'fast/harness/results.html'], tests_included=True, host=Host()))
1008
1009     def disabled_test_mac_lion(self):
1010         self.assert_mock_port_works('mac-lion')
1011
1012
1013 class MainTest(unittest.TestCase):
1014     def test_exception_handling(self):
1015         orig_run_fn = run_webkit_tests.run
1016
1017         # unused args pylint: disable=W0613
1018         def interrupting_run(port, options, args, stderr):
1019             raise KeyboardInterrupt
1020
1021         def successful_run(port, options, args, stderr):
1022
1023             class FakeRunDetails(object):
1024                 exit_code = test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
1025
1026             return FakeRunDetails()
1027
1028         def exception_raising_run(port, options, args, stderr):
1029             assert False
1030
1031         stdout = StringIO.StringIO()
1032         stderr = StringIO.StringIO()
1033         try:
1034             run_webkit_tests.run = interrupting_run
1035             res = run_webkit_tests.main([], stdout, stderr)
1036             self.assertEqual(res, test_run_results.INTERRUPTED_EXIT_STATUS)
1037
1038             run_webkit_tests.run = successful_run
1039             res = run_webkit_tests.main(['--platform', 'test'], stdout, stderr)
1040             self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1041
1042             run_webkit_tests.run = exception_raising_run
1043             res = run_webkit_tests.main([], stdout, stderr)
1044             self.assertEqual(res, test_run_results.UNEXPECTED_ERROR_EXIT_STATUS)
1045         finally:
1046             run_webkit_tests.run = orig_run_fn
1047
1048     def test_buildbot_results_are_printed_on_early_exit(self):
1049         # unused args pylint: disable=W0613
1050         stdout = StringIO.StringIO()
1051         stderr = StringIO.StringIO()
1052         res = run_webkit_tests.main(['--platform', 'test', '--exit-after-n-failures', '1',
1053                                      'failures/unexpected/missing_text.html',
1054                                      'failures/unexpected/missing_image.html'],
1055                                     stdout, stderr)
1056         self.assertEqual(res, test_run_results.EARLY_EXIT_STATUS)
1057         self.assertEqual(stdout.getvalue(),
1058                 ('\n'
1059                  'Regressions: Unexpected missing results (1)\n'
1060                  '  failures/unexpected/missing_image.html [ Missing ]\n\n'))