Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / layout_tests / port / test.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the Google name nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import base64
30 import copy
31 import sys
32 import time
33
34 from webkitpy.layout_tests.port import DeviceFailure, Driver, DriverOutput, Port
35 from webkitpy.layout_tests.port.base import VirtualTestSuite
36 from webkitpy.layout_tests.models.test_configuration import TestConfiguration
37 from webkitpy.layout_tests.models import test_run_results
38 from webkitpy.common.system.filesystem_mock import MockFileSystem
39 from webkitpy.common.system.crashlogs import CrashLogs
40
41
42 # This sets basic expectations for a test. Each individual expectation
43 # can be overridden by a keyword argument in TestList.add().
44 class TestInstance(object):
45     def __init__(self, name):
46         self.name = name
47         self.base = name[(name.rfind("/") + 1):name.rfind(".")]
48         self.crash = False
49         self.web_process_crash = False
50         self.exception = False
51         self.keyboard = False
52         self.error = ''
53         self.timeout = False
54         self.is_reftest = False
55         self.device_failure = False
56         self.leak = False
57
58         # The values of each field are treated as raw byte strings. They
59         # will be converted to unicode strings where appropriate using
60         # FileSystem.read_text_file().
61         self.actual_text = self.base + '-txt'
62         self.actual_checksum = self.base + '-checksum'
63
64         # We add the '\x8a' for the image file to prevent the value from
65         # being treated as UTF-8 (the character is invalid)
66         self.actual_image = self.base + '\x8a' + '-png' + 'tEXtchecksum\x00' + self.actual_checksum
67
68         self.expected_text = self.actual_text
69         self.expected_image = self.actual_image
70
71         self.actual_audio = None
72         self.expected_audio = None
73
74
75 # This is an in-memory list of tests, what we want them to produce, and
76 # what we want to claim are the expected results.
77 class TestList(object):
78     def __init__(self):
79         self.tests = {}
80
81     def add(self, name, **kwargs):
82         test = TestInstance(name)
83         for key, value in kwargs.items():
84             test.__dict__[key] = value
85         self.tests[name] = test
86
87     def add_reftest(self, name, reference_name, same_image, crash=False):
88         self.add(name, actual_checksum='xxx', actual_image='XXX', is_reftest=True, crash=crash)
89         if same_image:
90             self.add(reference_name, actual_checksum='xxx', actual_image='XXX', is_reftest=True)
91         else:
92             self.add(reference_name, actual_checksum='yyy', actual_image='YYY', is_reftest=True)
93
94     def keys(self):
95         return self.tests.keys()
96
97     def __contains__(self, item):
98         return item in self.tests
99
100     def __getitem__(self, item):
101         return self.tests[item]
102
103 #
104 # These numbers may need to be updated whenever we add or delete tests. This includes virtual tests.
105 #
106 TOTAL_TESTS = 117
107 TOTAL_SKIPS = 30
108
109 UNEXPECTED_PASSES = 1
110 UNEXPECTED_FAILURES = 26
111
112 def unit_test_list():
113     tests = TestList()
114     tests.add('failures/expected/crash.html', crash=True)
115     tests.add('failures/expected/exception.html', exception=True)
116     tests.add('failures/expected/device_failure.html', device_failure=True)
117     tests.add('failures/expected/timeout.html', timeout=True)
118     tests.add('failures/expected/leak.html', leak=True)
119     tests.add('failures/expected/missing_text.html', expected_text=None)
120     tests.add('failures/expected/needsrebaseline.html', actual_text='needsrebaseline text')
121     tests.add('failures/expected/needsmanualrebaseline.html', actual_text='needsmanualrebaseline text')
122     tests.add('failures/expected/image.html',
123               actual_image='image_fail-pngtEXtchecksum\x00checksum_fail',
124               expected_image='image-pngtEXtchecksum\x00checksum-png')
125     tests.add('failures/expected/image_checksum.html',
126               actual_checksum='image_checksum_fail-checksum',
127               actual_image='image_checksum_fail-png')
128     tests.add('failures/expected/audio.html',
129               actual_audio=base64.b64encode('audio_fail-wav'), expected_audio='audio-wav',
130               actual_text=None, expected_text=None,
131               actual_image=None, expected_image=None,
132               actual_checksum=None)
133     tests.add('failures/expected/keyboard.html', keyboard=True)
134     tests.add('failures/expected/missing_check.html',
135               expected_image='missing_check-png')
136     tests.add('failures/expected/missing_image.html', expected_image=None)
137     tests.add('failures/expected/missing_audio.html', expected_audio=None,
138               actual_text=None, expected_text=None,
139               actual_image=None, expected_image=None,
140               actual_checksum=None)
141     tests.add('failures/expected/missing_text.html', expected_text=None)
142     tests.add('failures/expected/newlines_leading.html',
143               expected_text="\nfoo\n", actual_text="foo\n")
144     tests.add('failures/expected/newlines_trailing.html',
145               expected_text="foo\n\n", actual_text="foo\n")
146     tests.add('failures/expected/newlines_with_excess_CR.html',
147               expected_text="foo\r\r\r\n", actual_text="foo\n")
148     tests.add('failures/expected/testharness.html',
149             actual_text='This is a testharness.js-based test.\nFAIL: assert fired\n.Harness: the test ran to completion.\n\n', expected_text=None,
150               actual_image=None, expected_image=None,
151               actual_checksum=None)
152     tests.add('failures/expected/testharness.html',
153             actual_text='RANDOM TEXT.\nThis is a testharness.js-based test.\nPASS: things are fine.\n.Harness: the test ran to completion.\n\n', expected_text=None,
154               actual_image=None, expected_image=None,
155               actual_checksum=None)
156     tests.add('failures/expected/text.html', actual_text='text_fail-png')
157     tests.add('failures/expected/crash_then_text.html')
158     tests.add('failures/expected/skip_text.html', actual_text='text diff')
159     tests.add('failures/flaky/text.html')
160     tests.add('failures/unexpected/missing_text.html', expected_text=None)
161     tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
162     tests.add('failures/unexpected/missing_image.html', expected_image=None)
163     tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
164   RenderView at (0,0) size 800x600
165 layer at (0,0) size 800x34
166   RenderBlock {HTML} at (0,0) size 800x34
167     RenderBody {BODY} at (8,8) size 784x18
168       RenderText {#text} at (0,0) size 133x18
169         text run at (0,0) width 133: "This is an image test!"
170 """, expected_text=None)
171     tests.add('failures/unexpected/crash.html', crash=True)
172     tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
173               error="mock-std-error-output")
174     tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
175               error="mock-std-error-output")
176     tests.add('failures/unexpected/pass.html')
177     tests.add('failures/unexpected/text-checksum.html',
178               actual_text='text-checksum_fail-txt',
179               actual_checksum='text-checksum_fail-checksum')
180     tests.add('failures/unexpected/text-image-checksum.html',
181               actual_text='text-image-checksum_fail-txt',
182               actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
183               actual_checksum='text-image-checksum_fail-checksum')
184     tests.add('failures/unexpected/checksum-with-matching-image.html',
185               actual_checksum='text-image-checksum_fail-checksum')
186     tests.add('failures/unexpected/skip_pass.html')
187     tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
188     tests.add('failures/unexpected/text_then_crash.html')
189     tests.add('failures/unexpected/timeout.html', timeout=True)
190     tests.add('failures/unexpected/leak.html', leak=True)
191     tests.add('http/tests/passes/text.html')
192     tests.add('http/tests/passes/image.html')
193     tests.add('http/tests/ssl/text.html')
194     tests.add('passes/args.html')
195     tests.add('passes/error.html', error='stuff going to stderr')
196     tests.add('passes/image.html')
197     tests.add('passes/audio.html',
198               actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
199               actual_text=None, expected_text=None,
200               actual_image=None, expected_image=None,
201               actual_checksum=None)
202     tests.add('passes/platform_image.html')
203     tests.add('passes/checksum_in_image.html',
204               expected_image='tEXtchecksum\x00checksum_in_image-checksum')
205     tests.add('passes/skipped/skip.html')
206     tests.add('passes/testharness.html',
207             actual_text='CONSOLE LOG: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\n.Harness: the test ran to completion.\n\n', expected_text=None,
208               actual_image=None, expected_image=None,
209               actual_checksum=None)
210     tests.add('passes/testharness.html',
211             actual_text='CONSOLE ERROR: error.\nThis is a testharness.js-based test.\nPASS: things are fine.\n.Harness: the test ran to completion.\n\n', expected_text=None,
212               actual_image=None, expected_image=None,
213               actual_checksum=None)
214     tests.add('passes/testharness.html',
215             actual_text='  This is a testharness.js-based test.\nPASS: assert is fine\nHarness: the test ran to completion.\n\n', expected_text=None,
216               actual_image=None, expected_image=None,
217               actual_checksum=None)
218     tests.add('passes/testharness.html',
219             actual_text='This is a testharness.js-based test.\nPASS: assert is fine\nHarness: the test ran to completion.\n\n', expected_text=None,
220               actual_image=None, expected_image=None,
221               actual_checksum=None)
222
223     # Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
224     # See https://bugs.webkit.org/show_bug.cgi?id=69444 .
225     tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
226
227     # Text output files contain "\r\n" on Windows.  This may be
228     # helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
229     tests.add('passes/text.html',
230               expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
231
232     # For reftests.
233     tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
234
235     # This adds a different virtual reference to ensure that that also works.
236     tests.add('virtual/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
237
238     tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
239     tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
240     tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
241     tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
242     tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
243     tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
244     tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
245     tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
246     tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
247     tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
248     tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
249     tests.add('reftests/foo/test.html')
250     tests.add('reftests/foo/test-ref.html')
251
252     tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
253     tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
254     tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
255     tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
256     tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
257     tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
258
259     tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
260     tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
261     tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
262
263     # The following files shouldn't be treated as reftests
264     tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
265     tests.add('reftests/foo/reference/bar/common.html')
266     tests.add('reftests/foo/reftest/bar/shared.html')
267
268     tests.add('websocket/tests/passes/text.html')
269
270     # For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
271     tests.add('platform/test-mac-leopard/http/test.html')
272     tests.add('platform/test-win-win7/http/test.html')
273
274     # For testing if perf tests are running in a locked shard.
275     tests.add('perf/foo/test.html')
276     tests.add('perf/foo/test-ref.html')
277
278     # For testing --pixel-test-directories.
279     tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
280         actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
281         expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
282     tests.add('failures/unexpected/image_not_in_pixeldir.html',
283         actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
284         expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
285
286     # For testing that virtual test suites don't expand names containing themselves
287     # See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
288     tests.add('passes/test-virtual-passes.html')
289     tests.add('passes/passes/test-virtual-passes.html')
290
291     return tests
292
293
294 # Here we use a non-standard location for the layout tests, to ensure that
295 # this works. The path contains a '.' in the name because we've seen bugs
296 # related to this before.
297
298 LAYOUT_TEST_DIR = '/test.checkout/LayoutTests'
299 PERF_TEST_DIR = '/test.checkout/PerformanceTests'
300
301
302 # Here we synthesize an in-memory filesystem from the test list
303 # in order to fully control the test output and to demonstrate that
304 # we don't need a real filesystem to run the tests.
305 def add_unit_tests_to_mock_filesystem(filesystem):
306     # Add the test_expectations file.
307     filesystem.maybe_make_directory('/mock-checkout/LayoutTests')
308     if not filesystem.exists('/mock-checkout/LayoutTests/TestExpectations'):
309         filesystem.write_text_file('/mock-checkout/LayoutTests/TestExpectations', """
310 Bug(test) failures/expected/crash.html [ Crash ]
311 Bug(test) failures/expected/crash_then_text.html [ Failure ]
312 Bug(test) failures/expected/image.html [ ImageOnlyFailure ]
313 Bug(test) failures/expected/needsrebaseline.html [ NeedsRebaseline ]
314 Bug(test) failures/expected/needsmanualrebaseline.html [ NeedsManualRebaseline ]
315 Bug(test) failures/expected/audio.html [ Failure ]
316 Bug(test) failures/expected/image_checksum.html [ ImageOnlyFailure ]
317 Bug(test) failures/expected/mismatch.html [ ImageOnlyFailure ]
318 Bug(test) failures/expected/missing_check.html [ Missing Pass ]
319 Bug(test) failures/expected/missing_image.html [ Missing Pass ]
320 Bug(test) failures/expected/missing_audio.html [ Missing Pass ]
321 Bug(test) failures/expected/missing_text.html [ Missing Pass ]
322 Bug(test) failures/expected/newlines_leading.html [ Failure ]
323 Bug(test) failures/expected/newlines_trailing.html [ Failure ]
324 Bug(test) failures/expected/newlines_with_excess_CR.html [ Failure ]
325 Bug(test) failures/expected/reftest.html [ ImageOnlyFailure ]
326 Bug(test) failures/expected/text.html [ Failure ]
327 Bug(test) failures/expected/testharness.html [ Failure ]
328 Bug(test) failures/expected/timeout.html [ Timeout ]
329 Bug(test) failures/expected/keyboard.html [ WontFix ]
330 Bug(test) failures/expected/exception.html [ WontFix ]
331 Bug(test) failures/expected/device_failure.html [ WontFix ]
332 Bug(test) failures/expected/leak.html [ Leak ]
333 Bug(test) failures/unexpected/pass.html [ Failure ]
334 Bug(test) passes/skipped/skip.html [ Skip ]
335 Bug(test) passes/text.html [ Pass ]
336 """)
337
338     filesystem.maybe_make_directory(LAYOUT_TEST_DIR + '/reftests/foo')
339     filesystem.write_text_file(LAYOUT_TEST_DIR + '/reftests/foo/reftest.list', """
340 == test.html test-ref.html
341
342 == multiple-match-success.html mismatching-ref.html
343 == multiple-match-success.html matching-ref.html
344 == multiple-match-failure.html mismatching-ref.html
345 == multiple-match-failure.html second-mismatching-ref.html
346 != multiple-mismatch-success.html mismatching-ref.html
347 != multiple-mismatch-success.html second-mismatching-ref.html
348 != multiple-mismatch-failure.html mismatching-ref.html
349 != multiple-mismatch-failure.html matching-ref.html
350 == multiple-both-success.html matching-ref.html
351 == multiple-both-success.html mismatching-ref.html
352 != multiple-both-success.html second-mismatching-ref.html
353 == multiple-both-failure.html matching-ref.html
354 != multiple-both-failure.html second-mismatching-ref.html
355 != multiple-both-failure.html matching-ref.html
356 """)
357
358     # FIXME: This test was only being ignored because of missing a leading '/'.
359     # Fixing the typo causes several tests to assert, so disabling the test entirely.
360     # Add in a file should be ignored by port.find_test_files().
361     #files[LAYOUT_TEST_DIR + '/userscripts/resources/iframe.html'] = 'iframe'
362
363     def add_file(test, suffix, contents):
364         dirname = filesystem.join(LAYOUT_TEST_DIR, test.name[0:test.name.rfind('/')])
365         base = test.base
366         filesystem.maybe_make_directory(dirname)
367         filesystem.write_binary_file(filesystem.join(dirname, base + suffix), contents)
368
369     # Add each test and the expected output, if any.
370     test_list = unit_test_list()
371     for test in test_list.tests.values():
372         add_file(test, test.name[test.name.rfind('.'):], '')
373         if test.is_reftest:
374             continue
375         if test.actual_audio:
376             add_file(test, '-expected.wav', test.expected_audio)
377             continue
378         add_file(test, '-expected.txt', test.expected_text)
379         add_file(test, '-expected.png', test.expected_image)
380
381     filesystem.write_text_file(filesystem.join(LAYOUT_TEST_DIR, 'virtual', 'passes', 'args-expected.txt'), 'args-txt --virtual-arg')
382     # Clear the list of written files so that we can watch what happens during testing.
383     filesystem.clear_written_files()
384
385
386 class TestPort(Port):
387     port_name = 'test'
388     default_port_name = 'test-mac-leopard'
389
390     """Test implementation of the Port interface."""
391     ALL_BASELINE_VARIANTS = (
392         'test-linux-x86_64',
393         'test-mac-snowleopard', 'test-mac-leopard',
394         'test-win-win7', 'test-win-xp',
395     )
396
397     FALLBACK_PATHS = {
398         'xp':          ['test-win-win7', 'test-win-xp'],
399         'win7':        ['test-win-win7'],
400         'leopard':     ['test-mac-leopard', 'test-mac-snowleopard'],
401         'snowleopard': ['test-mac-snowleopard'],
402         'lucid':       ['test-linux-x86_64', 'test-win-win7'],
403     }
404
405     @classmethod
406     def determine_full_port_name(cls, host, options, port_name):
407         if port_name == 'test':
408             return TestPort.default_port_name
409         return port_name
410
411     def __init__(self, host, port_name=None, **kwargs):
412         Port.__init__(self, host, port_name or TestPort.default_port_name, **kwargs)
413         self._tests = unit_test_list()
414         self._flakes = set()
415
416         # FIXME: crbug.com/279494. This needs to be in the "real layout tests
417         # dir" in a mock filesystem, rather than outside of the checkout, so
418         # that tests that want to write to a TestExpectations file can share
419         # this between "test" ports and "real" ports.  This is the result of
420         # rebaseline_unittest.py having tests that refer to "real" port names
421         # and real builders instead of fake builders that point back to the
422         # test ports. rebaseline_unittest.py needs to not mix both "real" ports
423         # and "test" ports
424
425         self._generic_expectations_path = '/mock-checkout/LayoutTests/TestExpectations'
426         self._results_directory = None
427
428         self._operating_system = 'mac'
429         if self._name.startswith('test-win'):
430             self._operating_system = 'win'
431         elif self._name.startswith('test-linux'):
432             self._operating_system = 'linux'
433
434         version_map = {
435             'test-win-xp': 'xp',
436             'test-win-win7': 'win7',
437             'test-mac-leopard': 'leopard',
438             'test-mac-snowleopard': 'snowleopard',
439             'test-linux-x86_64': 'lucid',
440         }
441         self._version = version_map[self._name]
442
443     def repository_paths(self):
444         """Returns a list of (repository_name, repository_path) tuples of its depending code base."""
445         # FIXME: We override this just to keep the perf tests happy.
446         return [('blink', self.layout_tests_dir())]
447
448     def buildbot_archives_baselines(self):
449         return self._name != 'test-win-xp'
450
451     def default_pixel_tests(self):
452         return True
453
454     def _path_to_driver(self):
455         # This routine shouldn't normally be called, but it is called by
456         # the mock_drt Driver. We return something, but make sure it's useless.
457         return 'MOCK _path_to_driver'
458
459     def default_child_processes(self):
460         return 1
461
462     def check_build(self, needs_http, printer):
463         return test_run_results.OK_EXIT_STATUS
464
465     def check_sys_deps(self, needs_http):
466         return test_run_results.OK_EXIT_STATUS
467
468     def default_configuration(self):
469         return 'Release'
470
471     def diff_image(self, expected_contents, actual_contents):
472         diffed = actual_contents != expected_contents
473         if not actual_contents and not expected_contents:
474             return (None, None)
475         if not actual_contents or not expected_contents:
476             return (True, None)
477         if diffed:
478             return ("< %s\n---\n> %s\n" % (expected_contents, actual_contents), None)
479         return (None, None)
480
481     def layout_tests_dir(self):
482         return LAYOUT_TEST_DIR
483
484     def perf_tests_dir(self):
485         return PERF_TEST_DIR
486
487     def webkit_base(self):
488         return '/test.checkout'
489
490     def _skipped_tests_for_unsupported_features(self, test_list):
491         return set(['failures/expected/skip_text.html',
492                     'failures/unexpected/skip_pass.html',
493                     'virtual/skipped'])
494
495     def name(self):
496         return self._name
497
498     def operating_system(self):
499         return self._operating_system
500
501     def _path_to_wdiff(self):
502         return None
503
504     def default_results_directory(self):
505         return '/tmp/layout-test-results'
506
507     def setup_test_run(self):
508         pass
509
510     def _driver_class(self):
511         return TestDriver
512
513     def start_http_server(self, additional_dirs, number_of_drivers):
514         pass
515
516     def start_websocket_server(self):
517         pass
518
519     def acquire_http_lock(self):
520         pass
521
522     def stop_http_server(self):
523         pass
524
525     def stop_websocket_server(self):
526         pass
527
528     def release_http_lock(self):
529         pass
530
531     def path_to_apache(self):
532         return "/usr/sbin/httpd"
533
534     def path_to_apache_config_file(self):
535         return self._filesystem.join(self.layout_tests_dir(), 'http', 'conf', 'httpd.conf')
536
537     def path_to_generic_test_expectations_file(self):
538         return self._generic_expectations_path
539
540     def _port_specific_expectations_files(self):
541         return [self._filesystem.join(self._webkit_baseline_path(d), 'TestExpectations') for d in ['test', 'test-win-xp']]
542
543     def all_test_configurations(self):
544         """Returns a sequence of the TestConfigurations the port supports."""
545         # By default, we assume we want to test every graphics type in
546         # every configuration on every system.
547         test_configurations = []
548         for version, architecture in self._all_systems():
549             for build_type in self._all_build_types():
550                 test_configurations.append(TestConfiguration(
551                     version=version,
552                     architecture=architecture,
553                     build_type=build_type))
554         return test_configurations
555
556     def _all_systems(self):
557         return (('leopard', 'x86'),
558                 ('snowleopard', 'x86'),
559                 ('xp', 'x86'),
560                 ('win7', 'x86'),
561                 ('lucid', 'x86'),
562                 ('lucid', 'x86_64'))
563
564     def _all_build_types(self):
565         return ('debug', 'release')
566
567     def configuration_specifier_macros(self):
568         """To avoid surprises when introducing new macros, these are intentionally fixed in time."""
569         return {'mac': ['leopard', 'snowleopard'], 'win': ['xp', 'win7'], 'linux': ['lucid']}
570
571     def all_baseline_variants(self):
572         return self.ALL_BASELINE_VARIANTS
573
574     def virtual_test_suites(self):
575         return [
576             VirtualTestSuite('passes', 'passes', ['--virtual-arg'], use_legacy_naming=True),
577             VirtualTestSuite('skipped', 'failures/expected', ['--virtual-arg2'], use_legacy_naming=True),
578         ]
579
580
581 class TestDriver(Driver):
582     """Test/Dummy implementation of the driver interface."""
583     next_pid = 1
584
585     def __init__(self, *args, **kwargs):
586         super(TestDriver, self).__init__(*args, **kwargs)
587         self.started = False
588         self.pid = 0
589
590     def cmd_line(self, pixel_tests, per_test_args):
591         pixel_tests_flag = '-p' if pixel_tests else ''
592         return [self._port._path_to_driver()] + [pixel_tests_flag] + self._port.get_option('additional_drt_flag', []) + per_test_args
593
594     def run_test(self, driver_input, stop_when_done):
595         if not self.started:
596             self.started = True
597             self.pid = TestDriver.next_pid
598             TestDriver.next_pid += 1
599
600         start_time = time.time()
601         test_name = driver_input.test_name
602         test_args = driver_input.args or []
603         test = self._port._tests[test_name]
604         if test.keyboard:
605             raise KeyboardInterrupt
606         if test.exception:
607             raise ValueError('exception from ' + test_name)
608         if test.device_failure:
609             raise DeviceFailure('device failure in ' + test_name)
610
611         audio = None
612         actual_text = test.actual_text
613         crash = test.crash
614         web_process_crash = test.web_process_crash
615
616         if 'flaky/text.html' in test_name and not test_name in self._port._flakes:
617             self._port._flakes.add(test_name)
618             actual_text = 'flaky text failure'
619
620         if 'crash_then_text.html' in test_name:
621             if test_name in self._port._flakes:
622                 actual_text = 'text failure'
623             else:
624                 self._port._flakes.add(test_name)
625                 crashed_process_name = self._port.driver_name()
626                 crashed_pid = 1
627                 crash = True
628
629         if 'text_then_crash.html' in test_name:
630             if test_name in self._port._flakes:
631                 crashed_process_name = self._port.driver_name()
632                 crashed_pid = 1
633                 crash = True
634             else:
635                 self._port._flakes.add(test_name)
636                 actual_text = 'text failure'
637
638         if actual_text and test_args and test_name == 'passes/args.html':
639             actual_text = actual_text + ' ' + ' '.join(test_args)
640
641         if test.actual_audio:
642             audio = base64.b64decode(test.actual_audio)
643         crashed_process_name = None
644         crashed_pid = None
645         if crash:
646             crashed_process_name = self._port.driver_name()
647             crashed_pid = 1
648         elif web_process_crash:
649             crashed_process_name = 'WebProcess'
650             crashed_pid = 2
651
652         crash_log = ''
653         if crashed_process_name:
654             crash_logs = CrashLogs(self._port.host)
655             crash_log = crash_logs.find_newest_log(crashed_process_name, None) or ''
656
657         if stop_when_done:
658             self.stop()
659
660         if test.actual_checksum == driver_input.image_hash:
661             image = None
662         else:
663             image = test.actual_image
664         return DriverOutput(actual_text, image, test.actual_checksum, audio,
665             crash=(crash or web_process_crash), crashed_process_name=crashed_process_name,
666             crashed_pid=crashed_pid, crash_log=crash_log,
667             test_time=time.time() - start_time, timeout=test.timeout, error=test.error, pid=self.pid,
668             leak=test.leak)
669
670     def stop(self):
671         self.started = False