Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / layout_tests / models / test_expectations.py
1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 """A helper class for reading in and dealing with tests expectations
30 for layout tests.
31 """
32
33 import logging
34 import re
35
36 from webkitpy.layout_tests.models.test_configuration import TestConfigurationConverter
37
38 _log = logging.getLogger(__name__)
39
40
41 # Test expectation and specifier constants.
42 #
43 # FIXME: range() starts with 0 which makes if expectation checks harder
44 # as PASS is 0.
45 (PASS, FAIL, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, TIMEOUT, CRASH, LEAK, SKIP, WONTFIX,
46  SLOW, REBASELINE, NEEDS_REBASELINE, NEEDS_MANUAL_REBASELINE, MISSING, FLAKY, NOW, NONE) = range(19)
47
48 # FIXME: Perhas these two routines should be part of the Port instead?
49 BASELINE_SUFFIX_LIST = ('png', 'wav', 'txt')
50
51 WEBKIT_BUG_PREFIX = 'webkit.org/b/'
52 CHROMIUM_BUG_PREFIX = 'crbug.com/'
53 V8_BUG_PREFIX = 'code.google.com/p/v8/issues/detail?id='
54 NAMED_BUG_PREFIX = 'Bug('
55
56 MISSING_KEYWORD = 'Missing'
57 NEEDS_REBASELINE_KEYWORD = 'NeedsRebaseline'
58 NEEDS_MANUAL_REBASELINE_KEYWORD = 'NeedsManualRebaseline'
59
60 class ParseError(Exception):
61     def __init__(self, warnings):
62         super(ParseError, self).__init__()
63         self.warnings = warnings
64
65     def __str__(self):
66         return '\n'.join(map(str, self.warnings))
67
68     def __repr__(self):
69         return 'ParseError(warnings=%s)' % self.warnings
70
71
72 class TestExpectationParser(object):
73     """Provides parsing facilities for lines in the test_expectation.txt file."""
74
75     # FIXME: Rename these to *_KEYWORD as in MISSING_KEYWORD above, but make the case studdly-caps to match the actual file contents.
76     REBASELINE_MODIFIER = 'rebaseline'
77     NEEDS_REBASELINE_MODIFIER = 'needsrebaseline'
78     NEEDS_MANUAL_REBASELINE_MODIFIER = 'needsmanualrebaseline'
79     PASS_EXPECTATION = 'pass'
80     SKIP_MODIFIER = 'skip'
81     SLOW_MODIFIER = 'slow'
82     WONTFIX_MODIFIER = 'wontfix'
83
84     TIMEOUT_EXPECTATION = 'timeout'
85
86     MISSING_BUG_WARNING = 'Test lacks BUG specifier.'
87
88     def __init__(self, port, full_test_list, is_lint_mode):
89         self._port = port
90         self._test_configuration_converter = TestConfigurationConverter(set(port.all_test_configurations()), port.configuration_specifier_macros())
91         self._full_test_list = full_test_list
92         self._is_lint_mode = is_lint_mode
93
94     def parse(self, filename, expectations_string):
95         expectation_lines = []
96         line_number = 0
97         for line in expectations_string.split("\n"):
98             line_number += 1
99             test_expectation = self._tokenize_line(filename, line, line_number)
100             self._parse_line(test_expectation)
101             expectation_lines.append(test_expectation)
102         return expectation_lines
103
104     def _create_expectation_line(self, test_name, expectations, file_name):
105         expectation_line = TestExpectationLine()
106         expectation_line.original_string = test_name
107         expectation_line.name = test_name
108         expectation_line.filename = file_name
109         expectation_line.expectations = expectations
110         return expectation_line
111
112     def expectation_line_for_test(self, test_name, expectations):
113         expectation_line = self._create_expectation_line(test_name, expectations, '<Bot TestExpectations>')
114         self._parse_line(expectation_line)
115         return expectation_line
116
117
118     def expectation_for_skipped_test(self, test_name):
119         if not self._port.test_exists(test_name):
120             _log.warning('The following test %s from the Skipped list doesn\'t exist' % test_name)
121         expectation_line = self._create_expectation_line(test_name, [TestExpectationParser.PASS_EXPECTATION], '<Skipped file>')
122         expectation_line.expectations = [TestExpectationParser.SKIP_MODIFIER, TestExpectationParser.WONTFIX_MODIFIER]
123         expectation_line.is_skipped_outside_expectations_file = True
124         self._parse_line(expectation_line)
125         return expectation_line
126
127     def _parse_line(self, expectation_line):
128         if not expectation_line.name:
129             return
130
131         if not self._check_test_exists(expectation_line):
132             return
133
134         expectation_line.is_file = self._port.test_isfile(expectation_line.name)
135         if expectation_line.is_file:
136             expectation_line.path = expectation_line.name
137         else:
138             expectation_line.path = self._port.normalize_test_name(expectation_line.name)
139
140         self._collect_matching_tests(expectation_line)
141
142         self._parse_specifiers(expectation_line)
143         self._parse_expectations(expectation_line)
144
145     def _parse_specifiers(self, expectation_line):
146         if self._is_lint_mode:
147             self._lint_line(expectation_line)
148
149         parsed_specifiers = set([specifier.lower() for specifier in expectation_line.specifiers])
150         expectation_line.matching_configurations = self._test_configuration_converter.to_config_set(parsed_specifiers, expectation_line.warnings)
151
152     def _lint_line(self, expectation_line):
153         expectations = [expectation.lower() for expectation in expectation_line.expectations]
154         if not expectation_line.bugs and self.WONTFIX_MODIFIER not in expectations:
155             expectation_line.warnings.append(self.MISSING_BUG_WARNING)
156         if self.REBASELINE_MODIFIER in expectations:
157             expectation_line.warnings.append('REBASELINE should only be used for running rebaseline.py. Cannot be checked in.')
158
159         if self.NEEDS_REBASELINE_MODIFIER in expectations or self.NEEDS_MANUAL_REBASELINE_MODIFIER in expectations:
160             for test in expectation_line.matching_tests:
161                 if self._port.reference_files(test):
162                     expectation_line.warnings.append('A reftest cannot be marked as NeedsRebaseline/NeedsManualRebaseline')
163
164     def _parse_expectations(self, expectation_line):
165         result = set()
166         for part in expectation_line.expectations:
167             expectation = TestExpectations.expectation_from_string(part)
168             if expectation is None:  # Careful, PASS is currently 0.
169                 expectation_line.warnings.append('Unsupported expectation: %s' % part)
170                 continue
171             result.add(expectation)
172         expectation_line.parsed_expectations = result
173
174     def _check_test_exists(self, expectation_line):
175         # WebKit's way of skipping tests is to add a -disabled suffix.
176         # So we should consider the path existing if the path or the
177         # -disabled version exists.
178         if not self._port.test_exists(expectation_line.name) and not self._port.test_exists(expectation_line.name + '-disabled'):
179             # Log a warning here since you hit this case any
180             # time you update TestExpectations without syncing
181             # the LayoutTests directory
182             expectation_line.warnings.append('Path does not exist.')
183             return False
184         return True
185
186     def _collect_matching_tests(self, expectation_line):
187         """Convert the test specification to an absolute, normalized
188         path and make sure directories end with the OS path separator."""
189         # FIXME: full_test_list can quickly contain a big amount of
190         # elements. We should consider at some point to use a more
191         # efficient structure instead of a list. Maybe a dictionary of
192         # lists to represent the tree of tests, leaves being test
193         # files and nodes being categories.
194
195         if not self._full_test_list:
196             expectation_line.matching_tests = [expectation_line.path]
197             return
198
199         if not expectation_line.is_file:
200             # this is a test category, return all the tests of the category.
201             expectation_line.matching_tests = [test for test in self._full_test_list if test.startswith(expectation_line.path)]
202             return
203
204         # this is a test file, do a quick check if it's in the
205         # full test suite.
206         if expectation_line.path in self._full_test_list:
207             expectation_line.matching_tests.append(expectation_line.path)
208
209     # FIXME: Update the original specifiers and remove this once the old syntax is gone.
210     _configuration_tokens_list = [
211         'Mac', 'SnowLeopard', 'Lion', 'Retina', 'MountainLion', 'Mavericks',
212         'Win', 'XP', 'Win7',
213         'Linux',
214         'Android',
215         'Release',
216         'Debug',
217     ]
218
219     _configuration_tokens = dict((token, token.upper()) for token in _configuration_tokens_list)
220     _inverted_configuration_tokens = dict((value, name) for name, value in _configuration_tokens.iteritems())
221
222     # FIXME: Update the original specifiers list and remove this once the old syntax is gone.
223     _expectation_tokens = {
224         'Crash': 'CRASH',
225         'Leak': 'LEAK',
226         'Failure': 'FAIL',
227         'ImageOnlyFailure': 'IMAGE',
228         MISSING_KEYWORD: 'MISSING',
229         'Pass': 'PASS',
230         'Rebaseline': 'REBASELINE',
231         NEEDS_REBASELINE_KEYWORD: 'NEEDSREBASELINE',
232         NEEDS_MANUAL_REBASELINE_KEYWORD: 'NEEDSMANUALREBASELINE',
233         'Skip': 'SKIP',
234         'Slow': 'SLOW',
235         'Timeout': 'TIMEOUT',
236         'WontFix': 'WONTFIX',
237     }
238
239     _inverted_expectation_tokens = dict([(value, name) for name, value in _expectation_tokens.iteritems()] +
240                                         [('TEXT', 'Failure'), ('IMAGE+TEXT', 'Failure'), ('AUDIO', 'Failure')])
241
242     # FIXME: Seems like these should be classmethods on TestExpectationLine instead of TestExpectationParser.
243     @classmethod
244     def _tokenize_line(cls, filename, expectation_string, line_number):
245         """Tokenizes a line from TestExpectations and returns an unparsed TestExpectationLine instance using the old format.
246
247         The new format for a test expectation line is:
248
249         [[bugs] [ "[" <configuration specifiers> "]" <name> [ "[" <expectations> "]" ["#" <comment>]
250
251         Any errant whitespace is not preserved.
252
253         """
254         expectation_line = TestExpectationLine()
255         expectation_line.original_string = expectation_string
256         expectation_line.filename = filename
257         expectation_line.line_numbers = str(line_number)
258
259         comment_index = expectation_string.find("#")
260         if comment_index == -1:
261             comment_index = len(expectation_string)
262         else:
263             expectation_line.comment = expectation_string[comment_index + 1:]
264
265         remaining_string = re.sub(r"\s+", " ", expectation_string[:comment_index].strip())
266         if len(remaining_string) == 0:
267             return expectation_line
268
269         # special-case parsing this so that we fail immediately instead of treating this as a test name
270         if remaining_string.startswith('//'):
271             expectation_line.warnings = ['use "#" instead of "//" for comments']
272             return expectation_line
273
274         bugs = []
275         specifiers = []
276         name = None
277         expectations = []
278         warnings = []
279         has_unrecognized_expectation = False
280
281         tokens = remaining_string.split()
282         state = 'start'
283         for token in tokens:
284             if (token.startswith(WEBKIT_BUG_PREFIX) or
285                 token.startswith(CHROMIUM_BUG_PREFIX) or
286                 token.startswith(V8_BUG_PREFIX) or
287                 token.startswith(NAMED_BUG_PREFIX)):
288                 if state != 'start':
289                     warnings.append('"%s" is not at the start of the line.' % token)
290                     break
291                 if token.startswith(WEBKIT_BUG_PREFIX):
292                     bugs.append(token)
293                 elif token.startswith(CHROMIUM_BUG_PREFIX):
294                     bugs.append(token)
295                 elif token.startswith(V8_BUG_PREFIX):
296                     bugs.append(token)
297                 else:
298                     match = re.match('Bug\((\w+)\)$', token)
299                     if not match:
300                         warnings.append('unrecognized bug identifier "%s"' % token)
301                         break
302                     else:
303                         bugs.append(token)
304             elif token == '[':
305                 if state == 'start':
306                     state = 'configuration'
307                 elif state == 'name_found':
308                     state = 'expectations'
309                 else:
310                     warnings.append('unexpected "["')
311                     break
312             elif token == ']':
313                 if state == 'configuration':
314                     state = 'name'
315                 elif state == 'expectations':
316                     state = 'done'
317                 else:
318                     warnings.append('unexpected "]"')
319                     break
320             elif token in ('//', ':', '='):
321                 warnings.append('"%s" is not legal in the new TestExpectations syntax.' % token)
322                 break
323             elif state == 'configuration':
324                 specifiers.append(cls._configuration_tokens.get(token, token))
325             elif state == 'expectations':
326                 if token not in cls._expectation_tokens:
327                     has_unrecognized_expectation = True
328                     warnings.append('Unrecognized expectation "%s"' % token)
329                 else:
330                     expectations.append(cls._expectation_tokens.get(token, token))
331             elif state == 'name_found':
332                 warnings.append('expecting "[", "#", or end of line instead of "%s"' % token)
333                 break
334             else:
335                 name = token
336                 state = 'name_found'
337
338         if not warnings:
339             if not name:
340                 warnings.append('Did not find a test name.')
341             elif state not in ('name_found', 'done'):
342                 warnings.append('Missing a "]"')
343
344         if 'WONTFIX' in expectations and 'SKIP' not in expectations:
345             expectations.append('SKIP')
346
347         if ('SKIP' in expectations or 'WONTFIX' in expectations) and len(set(expectations) - set(['SKIP', 'WONTFIX'])):
348             warnings.append('A test marked Skip or WontFix must not have other expectations.')
349
350         if not expectations and not has_unrecognized_expectation:
351             warnings.append('Missing expectations.')
352
353         expectation_line.bugs = bugs
354         expectation_line.specifiers = specifiers
355         expectation_line.expectations = expectations
356         expectation_line.name = name
357         expectation_line.warnings = warnings
358         return expectation_line
359
360     @classmethod
361     def _split_space_separated(cls, space_separated_string):
362         """Splits a space-separated string into an array."""
363         return [part.strip() for part in space_separated_string.strip().split(' ')]
364
365
366 class TestExpectationLine(object):
367     """Represents a line in test expectations file."""
368
369     def __init__(self):
370         """Initializes a blank-line equivalent of an expectation."""
371         self.original_string = None
372         self.filename = None  # this is the path to the expectations file for this line
373         self.line_numbers = "0"
374         self.name = None  # this is the path in the line itself
375         self.path = None  # this is the normpath of self.name
376         self.bugs = []
377         self.specifiers = []
378         self.parsed_specifiers = []
379         self.matching_configurations = set()
380         self.expectations = []
381         self.parsed_expectations = set()
382         self.comment = None
383         self.matching_tests = []
384         self.warnings = []
385         self.is_skipped_outside_expectations_file = False
386
387     def __eq__(self, other):
388         return (self.original_string == other.original_string
389             and self.filename == other.filename
390             and self.line_numbers == other.line_numbers
391             and self.name == other.name
392             and self.path == other.path
393             and self.bugs == other.bugs
394             and self.specifiers == other.specifiers
395             and self.parsed_specifiers == other.parsed_specifiers
396             and self.matching_configurations == other.matching_configurations
397             and self.expectations == other.expectations
398             and self.parsed_expectations == other.parsed_expectations
399             and self.comment == other.comment
400             and self.matching_tests == other.matching_tests
401             and self.warnings == other.warnings
402             and self.is_skipped_outside_expectations_file == other.is_skipped_outside_expectations_file)
403
404     def is_invalid(self):
405         return bool(self.warnings and self.warnings != [TestExpectationParser.MISSING_BUG_WARNING])
406
407     def is_flaky(self):
408         return len(self.parsed_expectations) > 1
409
410     def is_whitespace_or_comment(self):
411         return bool(re.match("^\s*$", self.original_string.split('#')[0]))
412
413     @staticmethod
414     def create_passing_expectation(test):
415         expectation_line = TestExpectationLine()
416         expectation_line.name = test
417         expectation_line.path = test
418         expectation_line.parsed_expectations = set([PASS])
419         expectation_line.expectations = set(['PASS'])
420         expectation_line.matching_tests = [test]
421         return expectation_line
422
423     @staticmethod
424     def merge_expectation_lines(line1, line2, model_all_expectations):
425         """Merges the expectations of line2 into line1 and returns a fresh object."""
426         if line1 is None:
427             return line2
428         if line2 is None:
429             return line1
430         if model_all_expectations and line1.filename != line2.filename:
431             return line2
432
433         # Don't merge original_string or comment.
434         result = TestExpectationLine()
435         # We only care about filenames when we're linting, in which case the filenames are the same.
436         # Not clear that there's anything better to do when not linting and the filenames are different.
437         if model_all_expectations:
438             result.filename = line2.filename
439         result.line_numbers = line1.line_numbers + "," + line2.line_numbers
440         result.name = line1.name
441         result.path = line1.path
442         result.parsed_expectations = set(line1.parsed_expectations) | set(line2.parsed_expectations)
443         result.expectations = list(set(line1.expectations) | set(line2.expectations))
444         result.bugs = list(set(line1.bugs) | set(line2.bugs))
445         result.specifiers = list(set(line1.specifiers) | set(line2.specifiers))
446         result.parsed_specifiers = list(set(line1.parsed_specifiers) | set(line2.parsed_specifiers))
447         result.matching_configurations = set(line1.matching_configurations) | set(line2.matching_configurations)
448         result.matching_tests = list(list(set(line1.matching_tests) | set(line2.matching_tests)))
449         result.warnings = list(set(line1.warnings) | set(line2.warnings))
450         result.is_skipped_outside_expectations_file = line1.is_skipped_outside_expectations_file or line2.is_skipped_outside_expectations_file
451         return result
452
453     def to_string(self, test_configuration_converter, include_specifiers=True, include_expectations=True, include_comment=True):
454         parsed_expectation_to_string = dict([[parsed_expectation, expectation_string] for expectation_string, parsed_expectation in TestExpectations.EXPECTATIONS.items()])
455
456         if self.is_invalid():
457             return self.original_string or ''
458
459         if self.name is None:
460             return '' if self.comment is None else "#%s" % self.comment
461
462         if test_configuration_converter and self.bugs:
463             specifiers_list = test_configuration_converter.to_specifiers_list(self.matching_configurations)
464             result = []
465             for specifiers in specifiers_list:
466                 # FIXME: this is silly that we join the specifiers and then immediately split them.
467                 specifiers = self._serialize_parsed_specifiers(test_configuration_converter, specifiers).split()
468                 expectations = self._serialize_parsed_expectations(parsed_expectation_to_string).split()
469                 result.append(self._format_line(self.bugs, specifiers, self.name, expectations, self.comment))
470             return "\n".join(result) if result else None
471
472         return self._format_line(self.bugs, self.specifiers, self.name, self.expectations, self.comment,
473             include_specifiers, include_expectations, include_comment)
474
475     def to_csv(self):
476         # Note that this doesn't include the comments.
477         return '%s,%s,%s,%s' % (self.name, ' '.join(self.bugs), ' '.join(self.specifiers), ' '.join(self.expectations))
478
479     def _serialize_parsed_expectations(self, parsed_expectation_to_string):
480         result = []
481         for index in TestExpectations.EXPECTATIONS.values():
482             if index in self.parsed_expectations:
483                 result.append(parsed_expectation_to_string[index])
484         return ' '.join(result)
485
486     def _serialize_parsed_specifiers(self, test_configuration_converter, specifiers):
487         result = []
488         result.extend(sorted(self.parsed_specifiers))
489         result.extend(test_configuration_converter.specifier_sorter().sort_specifiers(specifiers))
490         return ' '.join(result)
491
492     @staticmethod
493     def _filter_redundant_expectations(expectations):
494         if set(expectations) == set(['Pass', 'Skip']):
495             return ['Skip']
496         if set(expectations) == set(['Pass', 'Slow']):
497             return ['Slow']
498         return expectations
499
500     @staticmethod
501     def _format_line(bugs, specifiers, name, expectations, comment, include_specifiers=True, include_expectations=True, include_comment=True):
502         new_specifiers = []
503         new_expectations = []
504         for specifier in specifiers:
505             # FIXME: Make this all work with the mixed-cased specifiers (e.g. WontFix, Slow, etc).
506             specifier = specifier.upper()
507             new_specifiers.append(TestExpectationParser._inverted_configuration_tokens.get(specifier, specifier))
508
509         for expectation in expectations:
510             expectation = expectation.upper()
511             new_expectations.append(TestExpectationParser._inverted_expectation_tokens.get(expectation, expectation))
512
513         result = ''
514         if include_specifiers and (bugs or new_specifiers):
515             if bugs:
516                 result += ' '.join(bugs) + ' '
517             if new_specifiers:
518                 result += '[ %s ] ' % ' '.join(new_specifiers)
519         result += name
520         if include_expectations and new_expectations:
521             new_expectations = TestExpectationLine._filter_redundant_expectations(new_expectations)
522             result += ' [ %s ]' % ' '.join(sorted(set(new_expectations)))
523         if include_comment and comment is not None:
524             result += " #%s" % comment
525         return result
526
527
528 # FIXME: Refactor API to be a proper CRUD.
529 class TestExpectationsModel(object):
530     """Represents relational store of all expectations and provides CRUD semantics to manage it."""
531
532     def __init__(self, shorten_filename=None):
533         # Maps a test to its list of expectations.
534         self._test_to_expectations = {}
535
536         # Maps a test to list of its specifiers (string values)
537         self._test_to_specifiers = {}
538
539         # Maps a test to a TestExpectationLine instance.
540         self._test_to_expectation_line = {}
541
542         self._expectation_to_tests = self._dict_of_sets(TestExpectations.EXPECTATIONS)
543         self._timeline_to_tests = self._dict_of_sets(TestExpectations.TIMELINES)
544         self._result_type_to_tests = self._dict_of_sets(TestExpectations.RESULT_TYPES)
545
546         self._shorten_filename = shorten_filename or (lambda x: x)
547
548     def _merge_test_map(self, self_map, other_map):
549         for test in other_map:
550             new_expectations = set(other_map[test])
551             if test in self_map:
552                 new_expectations |= set(self_map[test])
553             self_map[test] = list(new_expectations) if isinstance(other_map[test], list) else new_expectations
554
555     def _merge_dict_of_sets(self, self_dict, other_dict):
556         for key in other_dict:
557             self_dict[key] |= other_dict[key]
558
559     def merge_model(self, other):
560         self._merge_test_map(self._test_to_expectations, other._test_to_expectations)
561
562         for test, line in other._test_to_expectation_line.items():
563             if test in self._test_to_expectation_line:
564                 line = TestExpectationLine.merge_expectation_lines(self._test_to_expectation_line[test], line, model_all_expectations=False)
565             self._test_to_expectation_line[test] = line
566
567         self._merge_dict_of_sets(self._expectation_to_tests, other._expectation_to_tests)
568         self._merge_dict_of_sets(self._timeline_to_tests, other._timeline_to_tests)
569         self._merge_dict_of_sets(self._result_type_to_tests, other._result_type_to_tests)
570
571     def _dict_of_sets(self, strings_to_constants):
572         """Takes a dict of strings->constants and returns a dict mapping
573         each constant to an empty set."""
574         d = {}
575         for c in strings_to_constants.values():
576             d[c] = set()
577         return d
578
579     def get_test_set(self, expectation, include_skips=True):
580         tests = self._expectation_to_tests[expectation]
581         if not include_skips:
582             tests = tests - self.get_test_set(SKIP)
583         return tests
584
585     def get_test_set_for_keyword(self, keyword):
586         expectation_enum = TestExpectations.EXPECTATIONS.get(keyword.lower(), None)
587         if expectation_enum is not None:
588             return self._expectation_to_tests[expectation_enum]
589
590         matching_tests = set()
591         for test, specifiers in self._test_to_specifiers.iteritems():
592             if keyword.lower() in specifiers:
593                 matching_tests.add(test)
594         return matching_tests
595
596     def get_tests_with_result_type(self, result_type):
597         return self._result_type_to_tests[result_type]
598
599     def get_tests_with_timeline(self, timeline):
600         return self._timeline_to_tests[timeline]
601
602     def has_test(self, test):
603         return test in self._test_to_expectation_line
604
605     def get_expectation_line(self, test):
606         return self._test_to_expectation_line.get(test)
607
608     def get_expectations(self, test):
609         return self._test_to_expectations[test]
610
611     def get_expectations_string(self, test):
612         """Returns the expectatons for the given test as an uppercase string.
613         If there are no expectations for the test, then "PASS" is returned."""
614         if self.get_expectation_line(test).is_skipped_outside_expectations_file:
615             return 'NOTRUN'
616
617         expectations = self.get_expectations(test)
618         retval = []
619
620         # FIXME: WontFix should cause the test to get skipped without artificially adding SKIP to the expectations list.
621         if WONTFIX in expectations and SKIP in expectations:
622             expectations.remove(SKIP)
623
624         for expectation in expectations:
625             retval.append(self.expectation_to_string(expectation))
626
627         return " ".join(retval)
628
629     def expectation_to_string(self, expectation):
630         """Return the uppercased string equivalent of a given expectation."""
631         for item in TestExpectations.EXPECTATIONS.items():
632             if item[1] == expectation:
633                 return item[0].upper()
634         raise ValueError(expectation)
635
636     def remove_expectation_line(self, test):
637         if not self.has_test(test):
638             return
639         self._clear_expectations_for_test(test)
640         del self._test_to_expectation_line[test]
641
642     def add_expectation_line(self, expectation_line,
643                              model_all_expectations=False):
644         """Returns a list of warnings encountered while matching specifiers."""
645
646         if expectation_line.is_invalid():
647             return
648
649         for test in expectation_line.matching_tests:
650             if self._already_seen_better_match(test, expectation_line):
651                 continue
652
653             if model_all_expectations:
654                 expectation_line = TestExpectationLine.merge_expectation_lines(self.get_expectation_line(test), expectation_line, model_all_expectations)
655
656             self._clear_expectations_for_test(test)
657             self._test_to_expectation_line[test] = expectation_line
658             self._add_test(test, expectation_line)
659
660     def _add_test(self, test, expectation_line):
661         """Sets the expected state for a given test.
662
663         This routine assumes the test has not been added before. If it has,
664         use _clear_expectations_for_test() to reset the state prior to
665         calling this."""
666         self._test_to_expectations[test] = expectation_line.parsed_expectations
667         for expectation in expectation_line.parsed_expectations:
668             self._expectation_to_tests[expectation].add(test)
669
670         self._test_to_specifiers[test] = expectation_line.specifiers
671
672         if WONTFIX in expectation_line.parsed_expectations:
673             self._timeline_to_tests[WONTFIX].add(test)
674         else:
675             self._timeline_to_tests[NOW].add(test)
676
677         if SKIP in expectation_line.parsed_expectations:
678             self._result_type_to_tests[SKIP].add(test)
679         elif expectation_line.parsed_expectations == set([PASS]):
680             self._result_type_to_tests[PASS].add(test)
681         elif expectation_line.is_flaky():
682             self._result_type_to_tests[FLAKY].add(test)
683         else:
684             # FIXME: What is this?
685             self._result_type_to_tests[FAIL].add(test)
686
687     def _clear_expectations_for_test(self, test):
688         """Remove prexisting expectations for this test.
689         This happens if we are seeing a more precise path
690         than a previous listing.
691         """
692         if self.has_test(test):
693             self._test_to_expectations.pop(test, '')
694             self._remove_from_sets(test, self._expectation_to_tests)
695             self._remove_from_sets(test, self._timeline_to_tests)
696             self._remove_from_sets(test, self._result_type_to_tests)
697
698     def _remove_from_sets(self, test, dict_of_sets_of_tests):
699         """Removes the given test from the sets in the dictionary.
700
701         Args:
702           test: test to look for
703           dict: dict of sets of files"""
704         for set_of_tests in dict_of_sets_of_tests.itervalues():
705             if test in set_of_tests:
706                 set_of_tests.remove(test)
707
708     def _already_seen_better_match(self, test, expectation_line):
709         """Returns whether we've seen a better match already in the file.
710
711         Returns True if we've already seen a expectation_line.name that matches more of the test
712             than this path does
713         """
714         # FIXME: See comment below about matching test configs and specificity.
715         if not self.has_test(test):
716             # We've never seen this test before.
717             return False
718
719         prev_expectation_line = self._test_to_expectation_line[test]
720
721         if prev_expectation_line.filename != expectation_line.filename:
722             # We've moved on to a new expectation file, which overrides older ones.
723             return False
724
725         if len(prev_expectation_line.path) > len(expectation_line.path):
726             # The previous path matched more of the test.
727             return True
728
729         if len(prev_expectation_line.path) < len(expectation_line.path):
730             # This path matches more of the test.
731             return False
732
733         # At this point we know we have seen a previous exact match on this
734         # base path, so we need to check the two sets of specifiers.
735
736         # FIXME: This code was originally designed to allow lines that matched
737         # more specifiers to override lines that matched fewer specifiers.
738         # However, we currently view these as errors.
739         #
740         # To use the "more specifiers wins" policy, change the errors for overrides
741         # to be warnings and return False".
742
743         if prev_expectation_line.matching_configurations == expectation_line.matching_configurations:
744             expectation_line.warnings.append('Duplicate or ambiguous entry lines %s:%s and %s:%s.' % (
745                 self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
746                 self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
747             return True
748
749         if prev_expectation_line.matching_configurations >= expectation_line.matching_configurations:
750             expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
751                 self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
752                 self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
753             # FIXME: return False if we want more specific to win.
754             return True
755
756         if prev_expectation_line.matching_configurations <= expectation_line.matching_configurations:
757             expectation_line.warnings.append('More specific entry for %s on line %s:%s overrides line %s:%s.' % (expectation_line.name,
758                 self._shorten_filename(expectation_line.filename), expectation_line.line_numbers,
759                 self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers))
760             return True
761
762         if prev_expectation_line.matching_configurations & expectation_line.matching_configurations:
763             expectation_line.warnings.append('Entries for %s on lines %s:%s and %s:%s match overlapping sets of configurations.' % (expectation_line.name,
764                 self._shorten_filename(prev_expectation_line.filename), prev_expectation_line.line_numbers,
765                 self._shorten_filename(expectation_line.filename), expectation_line.line_numbers))
766             return True
767
768         # Configuration sets are disjoint, then.
769         return False
770
771
772 class TestExpectations(object):
773     """Test expectations consist of lines with specifications of what
774     to expect from layout test cases. The test cases can be directories
775     in which case the expectations apply to all test cases in that
776     directory and any subdirectory. The format is along the lines of:
777
778       LayoutTests/fast/js/fixme.js [ Failure ]
779       LayoutTests/fast/js/flaky.js [ Failure Pass ]
780       LayoutTests/fast/js/crash.js [ Crash Failure Pass Timeout ]
781       ...
782
783     To add specifiers:
784       LayoutTests/fast/js/no-good.js
785       [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Timeout ]
786       [ Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
787       [ Linux Debug ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
788       [ Linux Win ] LayoutTests/fast/js/no-good.js [ Pass Skip Timeout ]
789
790     Skip: Doesn't run the test.
791     Slow: The test takes a long time to run, but does not timeout indefinitely.
792     WontFix: For tests that we never intend to pass on a given platform (treated like Skip).
793
794     Notes:
795       -A test cannot be both SLOW and TIMEOUT
796       -A test can be included twice, but not via the same path.
797       -If a test is included twice, then the more precise path wins.
798       -CRASH tests cannot be WONTFIX
799     """
800
801     # FIXME: Update to new syntax once the old format is no longer supported.
802     EXPECTATIONS = {'pass': PASS,
803                     'audio': AUDIO,
804                     'fail': FAIL,
805                     'image': IMAGE,
806                     'image+text': IMAGE_PLUS_TEXT,
807                     'text': TEXT,
808                     'timeout': TIMEOUT,
809                     'crash': CRASH,
810                     'leak': LEAK,
811                     'missing': MISSING,
812                     TestExpectationParser.SKIP_MODIFIER: SKIP,
813                     TestExpectationParser.NEEDS_REBASELINE_MODIFIER: NEEDS_REBASELINE,
814                     TestExpectationParser.NEEDS_MANUAL_REBASELINE_MODIFIER: NEEDS_MANUAL_REBASELINE,
815                     TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
816                     TestExpectationParser.SLOW_MODIFIER: SLOW,
817                     TestExpectationParser.REBASELINE_MODIFIER: REBASELINE,
818     }
819
820     EXPECTATIONS_TO_STRING = dict((k, v) for (v, k) in EXPECTATIONS.iteritems())
821
822     # (aggregated by category, pass/fail/skip, type)
823     EXPECTATION_DESCRIPTIONS = {SKIP: 'skipped',
824                                 PASS: 'passes',
825                                 FAIL: 'failures',
826                                 IMAGE: 'image-only failures',
827                                 TEXT: 'text-only failures',
828                                 IMAGE_PLUS_TEXT: 'image and text failures',
829                                 AUDIO: 'audio failures',
830                                 CRASH: 'crashes',
831                                 LEAK: 'leaks',
832                                 TIMEOUT: 'timeouts',
833                                 MISSING: 'missing results'}
834
835     NON_TEST_OUTCOME_EXPECTATIONS = (REBASELINE, SKIP, SLOW, WONTFIX)
836
837     BUILD_TYPES = ('debug', 'release')
838
839     TIMELINES = {TestExpectationParser.WONTFIX_MODIFIER: WONTFIX,
840                  'now': NOW}
841
842     RESULT_TYPES = {'skip': SKIP,
843                     'pass': PASS,
844                     'fail': FAIL,
845                     'flaky': FLAKY}
846
847     @classmethod
848     def expectation_from_string(cls, string):
849         assert(' ' not in string)  # This only handles one expectation at a time.
850         return cls.EXPECTATIONS.get(string.lower())
851
852     @staticmethod
853     def result_was_expected(result, expected_results, test_needs_rebaselining):
854         """Returns whether we got a result we were expecting.
855         Args:
856             result: actual result of a test execution
857             expected_results: set of results listed in test_expectations
858             test_needs_rebaselining: whether test was marked as REBASELINE"""
859         if not (set(expected_results) - (set(TestExpectations.NON_TEST_OUTCOME_EXPECTATIONS))):
860             expected_results = set([PASS])
861
862         if result in expected_results:
863             return True
864         if result in (PASS, TEXT, IMAGE, IMAGE_PLUS_TEXT, AUDIO, MISSING) and (NEEDS_REBASELINE in expected_results or NEEDS_MANUAL_REBASELINE in expected_results):
865             return True
866         if result in (TEXT, IMAGE_PLUS_TEXT, AUDIO) and (FAIL in expected_results):
867             return True
868         if result == MISSING and test_needs_rebaselining:
869             return True
870         if result == SKIP:
871             return True
872         return False
873
874     @staticmethod
875     def remove_pixel_failures(expected_results):
876         """Returns a copy of the expected results for a test, except that we
877         drop any pixel failures and return the remaining expectations. For example,
878         if we're not running pixel tests, then tests expected to fail as IMAGE
879         will PASS."""
880         expected_results = expected_results.copy()
881         if IMAGE in expected_results:
882             expected_results.remove(IMAGE)
883             expected_results.add(PASS)
884         return expected_results
885
886     @staticmethod
887     def remove_non_sanitizer_failures(expected_results):
888         """Returns a copy of the expected results for a test, except that we
889         drop any failures that the sanitizers don't care about."""
890         expected_results = expected_results.copy()
891         for result in (IMAGE, FAIL, IMAGE_PLUS_TEXT):
892             if result in expected_results:
893                 expected_results.remove(result)
894                 expected_results.add(PASS)
895         return expected_results
896
897     @staticmethod
898     def has_pixel_failures(actual_results):
899         return IMAGE in actual_results or FAIL in actual_results
900
901     @staticmethod
902     def suffixes_for_expectations(expectations):
903         suffixes = set()
904         if IMAGE in expectations:
905             suffixes.add('png')
906         if FAIL in expectations:
907             suffixes.add('txt')
908             suffixes.add('png')
909             suffixes.add('wav')
910         return set(suffixes)
911
912     @staticmethod
913     def suffixes_for_actual_expectations_string(expectations):
914         suffixes = set()
915         if 'TEXT' in expectations:
916             suffixes.add('txt')
917         if 'IMAGE' in expectations:
918             suffixes.add('png')
919         if 'AUDIO' in expectations:
920             suffixes.add('wav')
921         if 'MISSING' in expectations:
922             suffixes.add('txt')
923             suffixes.add('png')
924             suffixes.add('wav')
925         return suffixes
926
927     # FIXME: This constructor does too much work. We should move the actual parsing of
928     # the expectations into separate routines so that linting and handling overrides
929     # can be controlled separately, and the constructor can be more of a no-op.
930     def __init__(self, port, tests=None, include_overrides=True, expectations_dict=None, model_all_expectations=False, is_lint_mode=False):
931         self._full_test_list = tests
932         self._test_config = port.test_configuration()
933         self._is_lint_mode = is_lint_mode
934         self._model_all_expectations = self._is_lint_mode or model_all_expectations
935         self._model = TestExpectationsModel(self._shorten_filename)
936         self._parser = TestExpectationParser(port, tests, self._is_lint_mode)
937         self._port = port
938         self._skipped_tests_warnings = []
939         self._expectations = []
940
941         if not expectations_dict:
942             expectations_dict = port.expectations_dict()
943
944         # Always parse the generic expectations (the generic file is required
945         # to be the first one in the expectations_dict, which must be an OrderedDict).
946         generic_path, generic_exps = expectations_dict.items()[0]
947         expectations = self._parser.parse(generic_path, generic_exps)
948         self._add_expectations(expectations, self._model)
949         self._expectations += expectations
950
951         # Now add the overrides if so requested.
952         if include_overrides:
953             for path, contents in expectations_dict.items()[1:]:
954                 expectations = self._parser.parse(path, contents)
955                 model = TestExpectationsModel(self._shorten_filename)
956                 self._add_expectations(expectations, model)
957                 self._expectations += expectations
958                 self._model.merge_model(model)
959
960         # FIXME: move ignore_tests into port.skipped_layout_tests()
961         self.add_extra_skipped_tests(port.skipped_layout_tests(tests).union(set(port.get_option('ignore_tests', []))))
962         self.add_expectations_from_bot()
963
964         self._has_warnings = False
965         self._report_warnings()
966         self._process_tests_without_expectations()
967
968     # TODO(ojan): Allow for removing skipped tests when getting the list of
969     # tests to run, but not when getting metrics.
970     def model(self):
971         return self._model
972
973     def get_needs_rebaseline_failures(self):
974         return self._model.get_test_set(NEEDS_REBASELINE)
975
976     def get_rebaselining_failures(self):
977         return self._model.get_test_set(REBASELINE)
978
979     # FIXME: Change the callsites to use TestExpectationsModel and remove.
980     def get_expectations(self, test):
981         return self._model.get_expectations(test)
982
983     # FIXME: Change the callsites to use TestExpectationsModel and remove.
984     def get_tests_with_result_type(self, result_type):
985         return self._model.get_tests_with_result_type(result_type)
986
987     # FIXME: Change the callsites to use TestExpectationsModel and remove.
988     def get_test_set(self, expectation, include_skips=True):
989         return self._model.get_test_set(expectation, include_skips)
990
991     # FIXME: Change the callsites to use TestExpectationsModel and remove.
992     def get_tests_with_timeline(self, timeline):
993         return self._model.get_tests_with_timeline(timeline)
994
995     def get_expectations_string(self, test):
996         return self._model.get_expectations_string(test)
997
998     def expectation_to_string(self, expectation):
999         return self._model.expectation_to_string(expectation)
1000
1001     def matches_an_expected_result(self, test, result, pixel_tests_are_enabled, sanitizer_is_enabled):
1002         expected_results = self._model.get_expectations(test)
1003         if sanitizer_is_enabled:
1004             expected_results = self.remove_non_sanitizer_failures(expected_results)
1005         elif not pixel_tests_are_enabled:
1006             expected_results = self.remove_pixel_failures(expected_results)
1007         return self.result_was_expected(result, expected_results, self.is_rebaselining(test))
1008
1009     def is_rebaselining(self, test):
1010         return REBASELINE in self._model.get_expectations(test)
1011
1012     def _shorten_filename(self, filename):
1013         if filename.startswith(self._port.path_from_webkit_base()):
1014             return self._port.host.filesystem.relpath(filename, self._port.path_from_webkit_base())
1015         return filename
1016
1017     def _report_warnings(self):
1018         warnings = []
1019         for expectation in self._expectations:
1020             for warning in expectation.warnings:
1021                 warnings.append('%s:%s %s %s' % (self._shorten_filename(expectation.filename), expectation.line_numbers,
1022                                 warning, expectation.name if expectation.expectations else expectation.original_string))
1023
1024         if warnings:
1025             self._has_warnings = True
1026             if self._is_lint_mode:
1027                 raise ParseError(warnings)
1028             _log.warning('--lint-test-files warnings:')
1029             for warning in warnings:
1030                 _log.warning(warning)
1031             _log.warning('')
1032
1033     def _process_tests_without_expectations(self):
1034         if self._full_test_list:
1035             for test in self._full_test_list:
1036                 if not self._model.has_test(test):
1037                     self._model.add_expectation_line(TestExpectationLine.create_passing_expectation(test))
1038
1039     def has_warnings(self):
1040         return self._has_warnings
1041
1042     def remove_configurations(self, removals):
1043         expectations_to_remove = []
1044         modified_expectations = []
1045
1046         for test, test_configuration in removals:
1047             for expectation in self._expectations:
1048                 if expectation.name != test or not expectation.parsed_expectations:
1049                     continue
1050                 if test_configuration not in expectation.matching_configurations:
1051                     continue
1052
1053                 expectation.matching_configurations.remove(test_configuration)
1054                 if expectation.matching_configurations:
1055                     modified_expectations.append(expectation)
1056                 else:
1057                     expectations_to_remove.append(expectation)
1058
1059         for expectation in expectations_to_remove:
1060             index = self._expectations.index(expectation)
1061             self._expectations.remove(expectation)
1062
1063             if index == len(self._expectations) or self._expectations[index].is_whitespace_or_comment():
1064                 while index and self._expectations[index - 1].is_whitespace_or_comment():
1065                     index = index - 1
1066                     self._expectations.pop(index)
1067
1068         return self.list_to_string(self._expectations, self._parser._test_configuration_converter, modified_expectations)
1069
1070     def _add_expectations(self, expectation_list, model):
1071         for expectation_line in expectation_list:
1072             if not expectation_line.expectations:
1073                 continue
1074
1075             if self._model_all_expectations or self._test_config in expectation_line.matching_configurations:
1076                 model.add_expectation_line(expectation_line, model_all_expectations=self._model_all_expectations)
1077
1078     def add_extra_skipped_tests(self, tests_to_skip):
1079         if not tests_to_skip:
1080             return
1081         for test in self._expectations:
1082             if test.name and test.name in tests_to_skip:
1083                 test.warnings.append('%s:%s %s is also in a Skipped file.' % (test.filename, test.line_numbers, test.name))
1084
1085         model = TestExpectationsModel(self._shorten_filename)
1086         for test_name in tests_to_skip:
1087             expectation_line = self._parser.expectation_for_skipped_test(test_name)
1088             model.add_expectation_line(expectation_line)
1089         self._model.merge_model(model)
1090
1091     def add_expectations_from_bot(self):
1092         # FIXME: With mode 'very-flaky' and 'maybe-flaky', this will show the expectations entry in the flakiness
1093         # dashboard rows for each test to be whatever the bot thinks they should be. Is this a good thing?
1094         bot_expectations = self._port.bot_expectations()
1095         model = TestExpectationsModel(self._shorten_filename)
1096         for test_name in bot_expectations:
1097             expectation_line = self._parser.expectation_line_for_test(test_name, bot_expectations[test_name])
1098
1099             # Unexpected results are merged into existing expectations.
1100             merge = self._port.get_option('ignore_flaky_tests') == 'unexpected'
1101             model.add_expectation_line(expectation_line)
1102         self._model.merge_model(model)
1103
1104     def add_expectation_line(self, expectation_line):
1105         self._model.add_expectation_line(expectation_line)
1106         self._expectations += [expectation_line]
1107
1108     def remove_expectation_line(self, test):
1109         if not self._model.has_test(test):
1110             return
1111         self._expectations.remove(self._model.get_expectation_line(test))
1112         self._model.remove_expectation_line(test)
1113
1114     @staticmethod
1115     def list_to_string(expectation_lines, test_configuration_converter=None, reconstitute_only_these=None):
1116         def serialize(expectation_line):
1117             # If reconstitute_only_these is an empty list, we want to return original_string.
1118             # So we need to compare reconstitute_only_these to None, not just check if it's falsey.
1119             if reconstitute_only_these is None or expectation_line in reconstitute_only_these:
1120                 return expectation_line.to_string(test_configuration_converter)
1121             return expectation_line.original_string
1122
1123         def nones_out(expectation_line):
1124             return expectation_line is not None
1125
1126         return "\n".join(filter(nones_out, map(serialize, expectation_lines)))