- add third_party src.
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / tool / commands / rebaseline.py
1 # Copyright (c) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import json
30 import logging
31 import optparse
32 import re
33 import sys
34 import time
35 import traceback
36 import urllib
37 import urllib2
38
39 from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
40 from webkitpy.common.memoized import memoized
41 from webkitpy.common.system.executive import ScriptError
42 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
43 from webkitpy.layout_tests.models import test_failures
44 from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST, SKIP
45 from webkitpy.layout_tests.port import builders
46 from webkitpy.layout_tests.port import factory
47 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
48
49
50 _log = logging.getLogger(__name__)
51
52
53 # FIXME: Should TestResultWriter know how to compute this string?
54 def _baseline_name(fs, test_name, suffix):
55     return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
56
57
58 class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
59     # not overriding execute() - pylint: disable=W0223
60
61     no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
62         help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
63               'You can use "webkit-patch optimize-baselines" to optimize separately.'))
64
65     platform_options = factory.platform_options(use_globs=True)
66
67     results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
68
69     suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
70         help="Comma-separated-list of file types to rebaseline")
71
72     def __init__(self, options=None):
73         super(AbstractRebaseliningCommand, self).__init__(options=options)
74         self._baseline_suffix_list = BASELINE_SUFFIX_LIST
75
76
77 class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
78     def __init__(self):
79         super(BaseInternalRebaselineCommand, self).__init__(options=[
80             self.results_directory_option,
81             self.suffixes_option,
82             optparse.make_option("--builder", help="Builder to pull new baselines from"),
83             optparse.make_option("--test", help="Test to rebaseline"),
84             ])
85         self._scm_changes = {'add': [], 'remove-lines': []}
86
87     def _add_to_scm(self, path):
88         self._scm_changes['add'].append(path)
89
90     def _baseline_directory(self, builder_name):
91         port = self._tool.port_factory.get_from_builder_name(builder_name)
92         override_dir = builders.rebaseline_override_dir(builder_name)
93         if override_dir:
94             return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
95         return port.baseline_version_dir()
96
97     def _test_root(self, test_name):
98         return self._tool.filesystem.splitext(test_name)[0]
99
100     def _file_name_for_actual_result(self, test_name, suffix):
101         return "%s-actual.%s" % (self._test_root(test_name), suffix)
102
103     def _file_name_for_expected_result(self, test_name, suffix):
104         return "%s-expected.%s" % (self._test_root(test_name), suffix)
105
106
107 class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
108     name = "copy-existing-baselines-internal"
109     help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
110
111     @memoized
112     def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
113         port_names = self._tool.port_factory.all_port_names()
114         immediate_predecessors_in_fallback = []
115         for port_name in port_names:
116             port = self._tool.port_factory.get(port_name)
117             if not port.buildbot_archives_baselines():
118                 continue
119             baseline_search_path = port.baseline_search_path()
120             try:
121                 index = baseline_search_path.index(path_to_rebaseline)
122                 if index:
123                     immediate_predecessors_in_fallback.append(self._tool.filesystem.basename(baseline_search_path[index - 1]))
124             except ValueError:
125                 # index throw's a ValueError if the item isn't in the list.
126                 pass
127         return immediate_predecessors_in_fallback
128
129     def _port_for_primary_baseline(self, baseline):
130         for port in [self._tool.port_factory.get(port_name) for port_name in self._tool.port_factory.all_port_names()]:
131             if self._tool.filesystem.basename(port.baseline_version_dir()) == baseline:
132                 return port
133         raise Exception("Failed to find port for primary baseline %s." % baseline)
134
135     def _copy_existing_baseline(self, builder_name, test_name, suffix):
136         baseline_directory = self._baseline_directory(builder_name)
137         ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
138
139         old_baselines = []
140         new_baselines = []
141
142         # Need to gather all the baseline paths before modifying the filesystem since
143         # the modifications can affect the results of port.expected_filename.
144         for port in ports:
145             old_baseline = port.expected_filename(test_name, "." + suffix)
146             if not self._tool.filesystem.exists(old_baseline):
147                 _log.debug("No existing baseline for %s." % test_name)
148                 continue
149
150             new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
151             if self._tool.filesystem.exists(new_baseline):
152                 _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
153                 continue
154
155             expectations = TestExpectations(port, [test_name])
156             if SKIP in expectations.get_expectations(test_name):
157                 _log.debug("%s is skipped on %s." % (test_name, port.name()))
158                 continue
159
160             old_baselines.append(old_baseline)
161             new_baselines.append(new_baseline)
162
163         for i in range(len(old_baselines)):
164             old_baseline = old_baselines[i]
165             new_baseline = new_baselines[i]
166
167             _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
168             self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
169             self._tool.filesystem.copyfile(old_baseline, new_baseline)
170             if not self._tool.scm().exists(new_baseline):
171                 self._add_to_scm(new_baseline)
172
173     def execute(self, options, args, tool):
174         for suffix in options.suffixes.split(','):
175             self._copy_existing_baseline(options.builder, options.test, suffix)
176         print json.dumps(self._scm_changes)
177
178
179 class RebaselineTest(BaseInternalRebaselineCommand):
180     name = "rebaseline-test-internal"
181     help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
182
183     def _results_url(self, builder_name):
184         return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
185
186     def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
187         if not data:
188             _log.debug("No baseline data to save.")
189             return
190
191         filesystem = self._tool.filesystem
192         filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
193         filesystem.write_binary_file(target_baseline, data)
194         if not self._tool.scm().exists(target_baseline):
195             self._add_to_scm(target_baseline)
196
197     def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
198         baseline_directory = self._baseline_directory(builder_name)
199
200         source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
201         target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
202
203         _log.debug("Retrieving %s." % source_baseline)
204         self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
205
206     def _rebaseline_test_and_update_expectations(self, options):
207         port = self._tool.port_factory.get_from_builder_name(options.builder)
208         if (port.reference_files(options.test)):
209             _log.warning("Cannot rebaseline reftest: %s", options.test)
210             return
211
212         if options.results_directory:
213             results_url = 'file://' + options.results_directory
214         else:
215             results_url = self._results_url(options.builder)
216         self._baseline_suffix_list = options.suffixes.split(',')
217
218         for suffix in self._baseline_suffix_list:
219             self._rebaseline_test(options.builder, options.test, suffix, results_url)
220         self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
221
222     def execute(self, options, args, tool):
223         self._rebaseline_test_and_update_expectations(options)
224         print json.dumps(self._scm_changes)
225
226
227 class OptimizeBaselines(AbstractRebaseliningCommand):
228     name = "optimize-baselines"
229     help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
230     show_in_main_help = True
231     argument_names = "TEST_NAMES"
232
233     def __init__(self):
234         super(OptimizeBaselines, self).__init__(options=[self.suffixes_option] + self.platform_options)
235
236     def _optimize_baseline(self, optimizer, test_name):
237         for suffix in self._baseline_suffix_list:
238             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
239             if not optimizer.optimize(baseline_name):
240                 print "Heuristics failed to optimize %s" % baseline_name
241
242     def execute(self, options, args, tool):
243         self._baseline_suffix_list = options.suffixes.split(',')
244         port_names = tool.port_factory.all_port_names(options.platform)
245         if not port_names:
246             print "No port names match '%s'" % options.platform
247             return
248
249         optimizer = BaselineOptimizer(tool, port_names)
250         port = tool.port_factory.get(port_names[0])
251         for test_name in port.tests(args):
252             _log.info("Optimizing %s" % test_name)
253             self._optimize_baseline(optimizer, test_name)
254
255
256 class AnalyzeBaselines(AbstractRebaseliningCommand):
257     name = "analyze-baselines"
258     help_text = "Analyzes the baselines for the given tests and prints results that are identical."
259     show_in_main_help = True
260     argument_names = "TEST_NAMES"
261
262     def __init__(self):
263         super(AnalyzeBaselines, self).__init__(options=[
264             self.suffixes_option,
265             optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
266             ] + self.platform_options)
267         self._optimizer_class = BaselineOptimizer  # overridable for testing
268         self._baseline_optimizer = None
269         self._port = None
270
271     def _write(self, msg):
272         print msg
273
274     def _analyze_baseline(self, options, test_name):
275         for suffix in self._baseline_suffix_list:
276             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
277             results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
278             if results_by_directory:
279                 self._write("%s:" % baseline_name)
280                 self._baseline_optimizer.write_by_directory(results_by_directory, self._write, "  ")
281             elif options.missing:
282                 self._write("%s: (no baselines found)" % baseline_name)
283
284     def execute(self, options, args, tool):
285         self._baseline_suffix_list = options.suffixes.split(',')
286         port_names = tool.port_factory.all_port_names(options.platform)
287         if not port_names:
288             print "No port names match '%s'" % options.platform
289             return
290
291         self._baseline_optimizer = self._optimizer_class(tool, port_names)
292         self._port = tool.port_factory.get(port_names[0])
293         for test_name in self._port.tests(args):
294             self._analyze_baseline(options, test_name)
295
296
297 class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
298     # not overriding execute() - pylint: disable=W0223
299
300     def __init__(self, options=None):
301         super(AbstractParallelRebaselineCommand, self).__init__(options=options)
302         self._builder_data = {}
303
304     def builder_data(self):
305         if not self._builder_data:
306             for builder_name in self._release_builders():
307                 builder = self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name)
308                 self._builder_data[builder_name] = builder.latest_layout_test_results()
309         return self._builder_data
310
311     # The release builders cycle much faster than the debug ones and cover all the platforms.
312     def _release_builders(self):
313         release_builders = []
314         for builder_name in builders.all_builder_names():
315             port = self._tool.port_factory.get_from_builder_name(builder_name)
316             if port.test_configuration().build_type == 'release':
317                 release_builders.append(builder_name)
318         return release_builders
319
320     def _run_webkit_patch(self, args, verbose):
321         try:
322             verbose_args = ['--verbose'] if verbose else []
323             stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
324             for line in stderr.splitlines():
325                 _log.warning(line)
326         except ScriptError, e:
327             _log.error(e)
328
329     def _builders_to_fetch_from(self, builders_to_check):
330         # This routine returns the subset of builders that will cover all of the baseline search paths
331         # used in the input list. In particular, if the input list contains both Release and Debug
332         # versions of a configuration, we *only* return the Release version (since we don't save
333         # debug versions of baselines).
334         release_builders = set()
335         debug_builders = set()
336         builders_to_fallback_paths = {}
337         for builder in builders_to_check:
338             port = self._tool.port_factory.get_from_builder_name(builder)
339             if port.test_configuration().build_type == 'release':
340                 release_builders.add(builder)
341             else:
342                 debug_builders.add(builder)
343         for builder in list(release_builders) + list(debug_builders):
344             port = self._tool.port_factory.get_from_builder_name(builder)
345             fallback_path = port.baseline_search_path()
346             if fallback_path not in builders_to_fallback_paths.values():
347                 builders_to_fallback_paths[builder] = fallback_path
348         return builders_to_fallback_paths.keys()
349
350     def _rebaseline_commands(self, test_prefix_list, options):
351         path_to_webkit_patch = self._tool.path()
352         cwd = self._tool.scm().checkout_root
353         copy_baseline_commands = []
354         rebaseline_commands = []
355         port = self._tool.port_factory.get()
356
357         for test_prefix in test_prefix_list:
358             for test in port.tests([test_prefix]):
359                 for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
360                     actual_failures_suffixes = self._suffixes_for_actual_failures(test, builder, test_prefix_list[test_prefix][builder])
361                     if not actual_failures_suffixes:
362                         continue
363
364                     suffixes = ','.join(actual_failures_suffixes)
365                     cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
366                     if options.results_directory:
367                         cmd_line.extend(['--results-directory', options.results_directory])
368                     if options.verbose:
369                         cmd_line.append('--verbose')
370                     copy_baseline_commands.append(tuple([[path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
371                     rebaseline_commands.append(tuple([[path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
372         return copy_baseline_commands, rebaseline_commands
373
374     def _files_to_add(self, command_results):
375         files_to_add = set()
376         lines_to_remove = {}
377         for output in [result[1].split('\n') for result in command_results]:
378             file_added = False
379             for line in output:
380                 try:
381                     if line:
382                         parsed_line = json.loads(line)
383                         if 'add' in parsed_line:
384                             files_to_add.update(parsed_line['add'])
385                         if 'remove-lines' in parsed_line:
386                             for line_to_remove in parsed_line['remove-lines']:
387                                 test = line_to_remove['test']
388                                 builder = line_to_remove['builder']
389                                 if test not in lines_to_remove:
390                                     lines_to_remove[test] = []
391                                 lines_to_remove[test].append(builder)
392                         file_added = True
393                 except ValueError:
394                     _log.debug('"%s" is not a JSON object, ignoring' % line)
395
396             if not file_added:
397                 _log.debug('Could not add file based off output "%s"' % output)
398
399         return list(files_to_add), lines_to_remove
400
401     def _optimize_baselines(self, test_prefix_list, verbose=False):
402         # We don't run this in parallel because modifying the SCM in parallel is unreliable.
403         for test in test_prefix_list:
404             all_suffixes = set()
405             for builder in self._builders_to_fetch_from(test_prefix_list[test]):
406                 all_suffixes.update(self._suffixes_for_actual_failures(test, builder, test_prefix_list[test][builder]))
407             # FIXME: We should propagate the platform options as well.
408             self._run_webkit_patch(['optimize-baselines', '--suffixes', ','.join(all_suffixes), test], verbose)
409
410     def _update_expectations_files(self, lines_to_remove):
411         # FIXME: This routine is way too expensive. We're creating N ports and N TestExpectations
412         # objects and (re-)writing the actual expectations file N times, for each test we update.
413         # We should be able to update everything in memory, once, and then write the file out a single time.
414         for test in lines_to_remove:
415             for builder in lines_to_remove[test]:
416                 port = self._tool.port_factory.get_from_builder_name(builder)
417                 path = port.path_to_generic_test_expectations_file()
418                 expectations = TestExpectations(port, include_overrides=False)
419                 for test_configuration in port.all_test_configurations():
420                     if test_configuration.version == port.test_configuration().version:
421                         expectationsString = expectations.remove_configuration_from_test(test, test_configuration)
422                 self._tool.filesystem.write_text_file(path, expectationsString)
423
424             for port_name in self._tool.port_factory.all_port_names():
425                 port = self._tool.port_factory.get(port_name)
426                 generic_expectations = TestExpectations(port, tests=[test], include_overrides=False)
427                 if self._port_skips_test(port, test, generic_expectations):
428                     for test_configuration in port.all_test_configurations():
429                         if test_configuration.version == port.test_configuration().version:
430                             expectationsString = generic_expectations.remove_configuration_from_test(test, test_configuration)
431                     generic_path = port.path_to_generic_test_expectations_file()
432                     self._tool.filesystem.write_text_file(generic_path, expectationsString)
433
434     def _port_skips_test(self, port, test, generic_expectations):
435         fs = port.host.filesystem
436         if port.default_smoke_test_only():
437             smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
438             if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
439                 return True
440
441         full_expectations = TestExpectations(port, tests=[test], include_overrides=True)
442         return (SKIP in full_expectations.get_expectations(test) and
443                 SKIP not in generic_expectations.get_expectations(test))
444
445     def _run_in_parallel_and_update_scm(self, commands):
446         command_results = self._tool.executive.run_in_parallel(commands)
447         log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
448         for line in log_output.split('\n'):
449             if line:
450                 print >> sys.stderr, line  # FIXME: Figure out how to log properly.
451
452         files_to_add, lines_to_remove = self._files_to_add(command_results)
453         if files_to_add:
454             self._tool.scm().add_list(list(files_to_add))
455         if lines_to_remove:
456             self._update_expectations_files(lines_to_remove)
457
458     def _rebaseline(self, options, test_prefix_list):
459         for test, builders_to_check in sorted(test_prefix_list.items()):
460             _log.info("Rebaselining %s" % test)
461             for builder, suffixes in sorted(builders_to_check.items()):
462                 _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
463
464         copy_baseline_commands, rebaseline_commands = self._rebaseline_commands(test_prefix_list, options)
465         if copy_baseline_commands:
466             self._run_in_parallel_and_update_scm(copy_baseline_commands)
467         if rebaseline_commands:
468             self._run_in_parallel_and_update_scm(rebaseline_commands)
469
470         if options.optimize:
471             self._optimize_baselines(test_prefix_list, options.verbose)
472
473     def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
474         actual_results = self.builder_data()[builder_name].actual_results(test)
475         if not actual_results:
476             return set()
477         return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
478
479
480 class RebaselineJson(AbstractParallelRebaselineCommand):
481     name = "rebaseline-json"
482     help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
483
484     def __init__(self,):
485         super(RebaselineJson, self).__init__(options=[
486             self.no_optimize_option,
487             self.results_directory_option,
488             ])
489
490     def execute(self, options, args, tool):
491         self._rebaseline(options, json.loads(sys.stdin.read()))
492
493
494 class RebaselineExpectations(AbstractParallelRebaselineCommand):
495     name = "rebaseline-expectations"
496     help_text = "Rebaselines the tests indicated in TestExpectations."
497     show_in_main_help = True
498
499     def __init__(self):
500         super(RebaselineExpectations, self).__init__(options=[
501             self.no_optimize_option,
502             ] + self.platform_options)
503         self._test_prefix_list = None
504
505     def _tests_to_rebaseline(self, port):
506         tests_to_rebaseline = {}
507         for path, value in port.expectations_dict().items():
508             expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
509             for test in expectations.get_rebaselining_failures():
510                 suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
511                 tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
512         return tests_to_rebaseline
513
514     def _add_tests_to_rebaseline_for_port(self, port_name):
515         builder_name = builders.builder_name_for_port_name(port_name)
516         if not builder_name:
517             return
518         tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
519
520         if tests:
521             _log.info("Retrieving results for %s from %s." % (port_name, builder_name))
522
523         for test_name, suffixes in tests:
524             _log.info("    %s (%s)" % (test_name, ','.join(suffixes)))
525             if test_name not in self._test_prefix_list:
526                 self._test_prefix_list[test_name] = {}
527             self._test_prefix_list[test_name][builder_name] = suffixes
528
529     def execute(self, options, args, tool):
530         options.results_directory = None
531         self._test_prefix_list = {}
532         port_names = tool.port_factory.all_port_names(options.platform)
533         for port_name in port_names:
534             self._add_tests_to_rebaseline_for_port(port_name)
535         if not self._test_prefix_list:
536             _log.warning("Did not find any tests marked Rebaseline.")
537             return
538
539         self._rebaseline(options, self._test_prefix_list)
540
541
542 class Rebaseline(AbstractParallelRebaselineCommand):
543     name = "rebaseline"
544     help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
545     show_in_main_help = True
546     argument_names = "[TEST_NAMES]"
547
548     def __init__(self):
549         super(Rebaseline, self).__init__(options=[
550             self.no_optimize_option,
551             # FIXME: should we support the platform options in addition to (or instead of) --builders?
552             self.suffixes_option,
553             self.results_directory_option,
554             optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
555             ])
556
557     def _builders_to_pull_from(self):
558         chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
559         return [self._builder_with_name(name) for name in chosen_names]
560
561     def _builder_with_name(self, name):
562         return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
563
564     def execute(self, options, args, tool):
565         if not args:
566             _log.error("Must list tests to rebaseline.")
567             return
568
569         if options.builders:
570             builders_to_check = []
571             for builder_names in options.builders:
572                 builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
573         else:
574             builders_to_check = self._builders_to_pull_from()
575
576         test_prefix_list = {}
577         suffixes_to_update = options.suffixes.split(",")
578
579         for builder in builders_to_check:
580             for test in args:
581                 if test not in test_prefix_list:
582                     test_prefix_list[test] = {}
583                 test_prefix_list[test][builder.name()] = suffixes_to_update
584
585         if options.verbose:
586             _log.debug("rebaseline-json: " + str(test_prefix_list))
587
588         self._rebaseline(options, test_prefix_list)
589
590
591 class AutoRebaseline(AbstractParallelRebaselineCommand):
592     name = "auto-rebaseline"
593     help_text = "Rebaselines any NeedsRebaseline lines in TestExpectations that have cycled through all the bots."
594     AUTO_REBASELINE_BRANCH_NAME = "auto-rebaseline-temporary-branch"
595
596     # Rietveld uploader stinks. Limit the number of rebaselines in a given patch to keep upload from failing.
597     # FIXME: http://crbug.com/263676 Obviously we should fix the uploader here.
598     MAX_LINES_TO_REBASELINE = 200
599
600     def __init__(self):
601         super(AutoRebaseline, self).__init__(options=[
602             # FIXME: Remove this option.
603             self.no_optimize_option,
604             # FIXME: Remove this option.
605             self.results_directory_option,
606             optparse.make_option("--log-server", help="Server to send logs to.")
607             ])
608
609     def _log_to_server(self, log_server, query):
610         if not log_server:
611             return
612         urllib2.urlopen("http://" + log_server + "/updatelog", data=urllib.urlencode(query))
613
614     # Logs when there are no NeedsRebaseline lines in TestExpectations.
615     # These entries overwrite the existing log entry if the existing
616     # entry is also a noneedsrebaseline entry. This is special cased
617     # so that the log doesn't get bloated with entries like this
618     # when there are no tests that needs rebaselining.
619     def _log_no_needs_rebaseline_lines(self, log_server):
620         self._log_to_server(log_server, {
621             "noneedsrebaseline": "on",
622         })
623
624     # Uploaded log entries append to the existing entry unless the
625     # newentry flag is set. In that case it starts a new entry to
626     # start appending to. So, we need to call this on any fresh run
627     # that is going to end up logging stuff (i.e. any run that isn't
628     # a noneedsrebaseline run).
629     def _start_new_log_entry(self, log_server):
630         self._log_to_server(log_server, {
631             "log": "",
632             "newentry": "on",
633         })
634
635     def _configure_logging(self, log_server):
636         if not log_server:
637             return
638
639         def _log_alias(query):
640             self._log_to_server(log_server, query)
641
642         class LogHandler(logging.Handler):
643             def __init__(self):
644                 logging.Handler.__init__(self)
645                 self._records = []
646
647             # Since this does not have the newentry flag, it will append
648             # to the most recent log entry (i.e. the one created by
649             # _start_new_log_entry.
650             def emit(self, record):
651                 _log_alias({
652                     "log": record.getMessage(),
653                 })
654
655         handler = LogHandler()
656         _log.setLevel(logging.DEBUG)
657         handler.setLevel(logging.DEBUG)
658         _log.addHandler(handler)
659
660     def bot_revision_data(self, log_server):
661         revisions = []
662         for result in self.builder_data().values():
663             if result.run_was_interrupted():
664                 self._start_new_log_entry(log_server)
665                 _log.error("Can't rebaseline because the latest run on %s exited early." % result.builder_name())
666                 return []
667             revisions.append({
668                 "builder": result.builder_name(),
669                 "revision": result.blink_revision(),
670             })
671         return revisions
672
673     def tests_to_rebaseline(self, tool, min_revision, print_revisions, log_server):
674         port = tool.port_factory.get()
675         expectations_file_path = port.path_to_generic_test_expectations_file()
676
677         tests = set()
678         revision = None
679         author = None
680         bugs = set()
681         has_any_needs_rebaseline_lines = False
682
683         for line in tool.scm().blame(expectations_file_path).split("\n"):
684             comment_index = line.find("#")
685             if comment_index == -1:
686                 comment_index = len(line)
687             line_without_comments = re.sub(r"\s+", " ", line[:comment_index].strip())
688
689             if "NeedsRebaseline" not in line_without_comments:
690                 continue
691
692             if not has_any_needs_rebaseline_lines:
693                 self._start_new_log_entry(log_server)
694             has_any_needs_rebaseline_lines = True
695
696             parsed_line = re.match("^(\S*)[^(]*\((\S*).*?([^ ]*)\ \[[^[]*$", line_without_comments)
697
698             commit_hash = parsed_line.group(1)
699             svn_revision = tool.scm().svn_revision_from_git_commit(commit_hash)
700
701             test = parsed_line.group(3)
702             if print_revisions:
703                 _log.info("%s is waiting for r%s" % (test, svn_revision))
704
705             if not svn_revision or svn_revision > min_revision:
706                 continue
707
708             if revision and svn_revision != revision:
709                 continue
710
711             if not revision:
712                 revision = svn_revision
713                 author = parsed_line.group(2)
714
715             bugs.update(re.findall("crbug\.com\/(\d+)", line_without_comments))
716             tests.add(test)
717
718             if len(tests) >= self.MAX_LINES_TO_REBASELINE:
719                 _log.info("Too many tests to rebaseline in one patch. Doing the first %d." % self.MAX_LINES_TO_REBASELINE)
720                 break
721
722         return tests, revision, author, bugs, has_any_needs_rebaseline_lines
723
724     def link_to_patch(self, revision):
725         return "http://src.chromium.org/viewvc/blink?view=revision&revision=" + str(revision)
726
727     def commit_message(self, author, revision, bugs):
728         bug_string = ""
729         if bugs:
730             bug_string = "BUG=%s\n" % ",".join(bugs)
731
732         return """Auto-rebaseline for r%s
733
734 %s
735
736 %sTBR=%s
737 """ % (revision, self.link_to_patch(revision), bug_string, author)
738
739     def get_test_prefix_list(self, tests):
740         test_prefix_list = {}
741         lines_to_remove = {}
742
743         for builder_name in self._release_builders():
744             port_name = builders.port_name_for_builder_name(builder_name)
745             port = self._tool.port_factory.get(port_name)
746             expectations = TestExpectations(port, include_overrides=True)
747             for test in expectations.get_needs_rebaseline_failures():
748                 if test not in tests:
749                     continue
750
751                 if test not in test_prefix_list:
752                     lines_to_remove[test] = []
753                     test_prefix_list[test] = {}
754                 lines_to_remove[test].append(builder_name)
755                 test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST
756
757         return test_prefix_list, lines_to_remove
758
759     def _run_git_cl_command(self, options, command):
760         subprocess_command = ['git', 'cl'] + command
761         if options.verbose:
762             subprocess_command.append('--verbose')
763         # Use call instead of run_command so that stdout doesn't get swallowed.
764         self._tool.executive.call(subprocess_command)
765
766     # FIXME: Move this somewhere more general.
767     def tree_status(self):
768         blink_tree_status_url = "http://blink-status.appspot.com/status"
769         status = urllib2.urlopen(blink_tree_status_url).read().lower()
770         if status.find('closed') != -1 or status == "0":
771             return 'closed'
772         elif status.find('open') != -1 or status == "1":
773             return 'open'
774         return 'unknown'
775
776     def execute(self, options, args, tool):
777         if tool.scm().executable_name == "svn":
778             _log.error("Auto rebaseline only works with a git checkout.")
779             return
780
781         if tool.scm().has_working_directory_changes():
782             _log.error("Cannot proceed with working directory changes. Clean working directory first.")
783             return
784
785         self._configure_logging(options.log_server)
786
787         revision_data = self.bot_revision_data(options.log_server)
788         if not revision_data:
789             return
790
791         min_revision = int(min([item["revision"] for item in revision_data]))
792         tests, revision, author, bugs, has_any_needs_rebaseline_lines = self.tests_to_rebaseline(tool, min_revision, print_revisions=options.verbose, log_server=options.log_server)
793
794         if not has_any_needs_rebaseline_lines:
795             self._log_no_needs_rebaseline_lines(options.log_server)
796             return
797
798         if options.verbose:
799             _log.info("Min revision across all bots is %s." % min_revision)
800             for item in revision_data:
801                 _log.info("%s: r%s" % (item["builder"], item["revision"]))
802
803         if not tests:
804             _log.debug('No tests to rebaseline.')
805             return
806
807         if self.tree_status() == 'closed':
808             _log.info('Cannot proceed. Tree is closed.')
809             return
810
811         _log.info('Rebaselining %s for r%s by %s.' % (list(tests), revision, author))
812
813         test_prefix_list, lines_to_remove = self.get_test_prefix_list(tests)
814
815         try:
816             old_branch_name = tool.scm().current_branch()
817             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
818             tool.scm().create_clean_branch(self.AUTO_REBASELINE_BRANCH_NAME)
819
820             # If the tests are passing everywhere, then this list will be empty. We don't need
821             # to rebaseline, but we'll still need to update TestExpectations.
822             if test_prefix_list:
823                 self._rebaseline(options, test_prefix_list)
824             # If a test is not failing on the bot, we don't try to rebaseline it, but we still
825             # want to remove the NeedsRebaseline line.
826             self._update_expectations_files(lines_to_remove)
827
828             tool.scm().commit_locally_with_message(self.commit_message(author, revision, bugs))
829
830             # FIXME: Log the upload, pull and dcommit stdout/stderr to the log-server.
831
832             # FIXME: It would be nice if we could dcommit the patch without uploading, but still
833             # go through all the precommit hooks. For rebaselines with lots of files, uploading
834             # takes a long time and sometimes fails, but we don't want to commit if, e.g. the
835             # tree is closed.
836             self._run_git_cl_command(options, ['upload', '-f'])
837
838             # Uploading can take a very long time. Do another pull to make sure TestExpectations is up to date,
839             # so the dcommit can go through.
840             tool.executive.run_command(['git', 'pull'])
841
842             self._run_git_cl_command(options, ['dcommit', '-f'])
843         finally:
844             self._run_git_cl_command(options, ['set_close'])
845             tool.scm().ensure_cleanly_tracking_remote_master()
846             tool.scm().checkout_branch(old_branch_name)
847             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
848
849
850 class RebaselineOMatic(AbstractDeclarativeCommand):
851     name = "rebaseline-o-matic"
852     help_text = "Calls webkit-patch auto-rebaseline in a loop."
853     show_in_main_help = True
854
855     SLEEP_TIME_IN_SECONDS = 30
856
857     def execute(self, options, args, tool):
858         while True:
859             try:
860                 tool.executive.run_command(['git', 'pull'])
861                 rebaseline_command = [tool.filesystem.join(tool.scm().checkout_root, 'Tools', 'Scripts', 'webkit-patch'), 'auto-rebaseline', '--log-server', 'blinkrebaseline.appspot.com']
862                 if options.verbose:
863                     rebaseline_command.append('--verbose')
864                 # Use call instead of run_command so that stdout doesn't get swallowed.
865                 tool.executive.call(rebaseline_command)
866             except:
867                 traceback.print_exc(file=sys.stderr)
868
869             time.sleep(self.SLEEP_TIME_IN_SECONDS)