Upstream version 9.38.198.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / tool / commands / rebaseline.py
1 # Copyright (c) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR/ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import Queue
30 import json
31 import logging
32 import optparse
33 import re
34 import sys
35 import threading
36 import time
37 import traceback
38 import urllib
39 import urllib2
40
41 from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
42 from webkitpy.common.memoized import memoized
43 from webkitpy.common.system.executive import ScriptError
44 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
45 from webkitpy.layout_tests.models import test_failures
46 from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST, SKIP
47 from webkitpy.layout_tests.port import builders
48 from webkitpy.layout_tests.port import factory
49 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
50
51
52 _log = logging.getLogger(__name__)
53
54
55 # FIXME: Should TestResultWriter know how to compute this string?
56 def _baseline_name(fs, test_name, suffix):
57     return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
58
59
60 class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
61     # not overriding execute() - pylint: disable=W0223
62
63     no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
64         help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
65               'You can use "webkit-patch optimize-baselines" to optimize separately.'))
66
67     platform_options = factory.platform_options(use_globs=True)
68
69     results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
70
71     suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
72         help="Comma-separated-list of file types to rebaseline")
73
74     def __init__(self, options=None):
75         super(AbstractRebaseliningCommand, self).__init__(options=options)
76         self._baseline_suffix_list = BASELINE_SUFFIX_LIST
77         self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}
78
79     def _add_to_scm_later(self, path):
80         self._scm_changes['add'].append(path)
81
82     def _delete_from_scm_later(self, path):
83         self._scm_changes['delete'].append(path)
84
85
86 class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
87     def __init__(self):
88         super(BaseInternalRebaselineCommand, self).__init__(options=[
89             self.results_directory_option,
90             self.suffixes_option,
91             optparse.make_option("--builder", help="Builder to pull new baselines from"),
92             optparse.make_option("--test", help="Test to rebaseline"),
93             ])
94
95     def _baseline_directory(self, builder_name):
96         port = self._tool.port_factory.get_from_builder_name(builder_name)
97         override_dir = builders.rebaseline_override_dir(builder_name)
98         if override_dir:
99             return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
100         return port.baseline_version_dir()
101
102     def _test_root(self, test_name):
103         return self._tool.filesystem.splitext(test_name)[0]
104
105     def _file_name_for_actual_result(self, test_name, suffix):
106         return "%s-actual.%s" % (self._test_root(test_name), suffix)
107
108     def _file_name_for_expected_result(self, test_name, suffix):
109         return "%s-expected.%s" % (self._test_root(test_name), suffix)
110
111
112 class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
113     name = "copy-existing-baselines-internal"
114     help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
115
116     @memoized
117     def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
118         port_names = self._tool.port_factory.all_port_names()
119         immediate_predecessors_in_fallback = []
120         for port_name in port_names:
121             port = self._tool.port_factory.get(port_name)
122             if not port.buildbot_archives_baselines():
123                 continue
124             baseline_search_path = port.baseline_search_path()
125             try:
126                 index = baseline_search_path.index(path_to_rebaseline)
127                 if index:
128                     immediate_predecessors_in_fallback.append(self._tool.filesystem.basename(baseline_search_path[index - 1]))
129             except ValueError:
130                 # index throw's a ValueError if the item isn't in the list.
131                 pass
132         return immediate_predecessors_in_fallback
133
134     def _port_for_primary_baseline(self, baseline):
135         for port in [self._tool.port_factory.get(port_name) for port_name in self._tool.port_factory.all_port_names()]:
136             if self._tool.filesystem.basename(port.baseline_version_dir()) == baseline:
137                 return port
138         raise Exception("Failed to find port for primary baseline %s." % baseline)
139
140     def _copy_existing_baseline(self, builder_name, test_name, suffix):
141         baseline_directory = self._baseline_directory(builder_name)
142         ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
143
144         old_baselines = []
145         new_baselines = []
146
147         # Need to gather all the baseline paths before modifying the filesystem since
148         # the modifications can affect the results of port.expected_filename.
149         for port in ports:
150             old_baseline = port.expected_filename(test_name, "." + suffix)
151             if not self._tool.filesystem.exists(old_baseline):
152                 _log.debug("No existing baseline for %s." % test_name)
153                 continue
154
155             new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
156             if self._tool.filesystem.exists(new_baseline):
157                 _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
158                 continue
159
160             expectations = TestExpectations(port, [test_name])
161             if SKIP in expectations.get_expectations(test_name):
162                 _log.debug("%s is skipped on %s." % (test_name, port.name()))
163                 continue
164
165             old_baselines.append(old_baseline)
166             new_baselines.append(new_baseline)
167
168         for i in range(len(old_baselines)):
169             old_baseline = old_baselines[i]
170             new_baseline = new_baselines[i]
171
172             _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
173             self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
174             self._tool.filesystem.copyfile(old_baseline, new_baseline)
175             if not self._tool.scm().exists(new_baseline):
176                 self._add_to_scm_later(new_baseline)
177
178     def execute(self, options, args, tool):
179         for suffix in options.suffixes.split(','):
180             self._copy_existing_baseline(options.builder, options.test, suffix)
181         print json.dumps(self._scm_changes)
182
183
184 class RebaselineTest(BaseInternalRebaselineCommand):
185     name = "rebaseline-test-internal"
186     help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
187
188     def _results_url(self, builder_name):
189         return self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name).latest_layout_test_results_url()
190
191     def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
192         if not data:
193             _log.debug("No baseline data to save.")
194             return
195
196         filesystem = self._tool.filesystem
197         filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
198         filesystem.write_binary_file(target_baseline, data)
199         if not self._tool.scm().exists(target_baseline):
200             self._add_to_scm_later(target_baseline)
201
202     def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
203         baseline_directory = self._baseline_directory(builder_name)
204
205         source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
206         target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
207
208         _log.debug("Retrieving %s." % source_baseline)
209         self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
210
211     def _rebaseline_test_and_update_expectations(self, options):
212         port = self._tool.port_factory.get_from_builder_name(options.builder)
213         if (port.reference_files(options.test)):
214             _log.warning("Cannot rebaseline reftest: %s", options.test)
215             return
216
217         if options.results_directory:
218             results_url = 'file://' + options.results_directory
219         else:
220             results_url = self._results_url(options.builder)
221         self._baseline_suffix_list = options.suffixes.split(',')
222
223         for suffix in self._baseline_suffix_list:
224             self._rebaseline_test(options.builder, options.test, suffix, results_url)
225         self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
226
227     def execute(self, options, args, tool):
228         self._rebaseline_test_and_update_expectations(options)
229         print json.dumps(self._scm_changes)
230
231
232 class OptimizeBaselines(AbstractRebaseliningCommand):
233     name = "optimize-baselines"
234     help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
235     show_in_main_help = True
236     argument_names = "TEST_NAMES"
237
238     def __init__(self):
239         super(OptimizeBaselines, self).__init__(options=[
240             self.suffixes_option,
241             optparse.make_option('--no-modify-scm', action='store_true', default=False, help='Dump SCM commands as JSON instead of '),
242             ] + self.platform_options)
243
244     def _optimize_baseline(self, optimizer, test_name):
245         for suffix in self._baseline_suffix_list:
246             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
247             succeeded, files_to_delete, files_to_add = optimizer.optimize(baseline_name)
248             if not succeeded:
249                 print "Heuristics failed to optimize %s" % baseline_name
250             return files_to_delete, files_to_add
251
252     def execute(self, options, args, tool):
253         self._baseline_suffix_list = options.suffixes.split(',')
254         port_names = tool.port_factory.all_port_names(options.platform)
255         if not port_names:
256             print "No port names match '%s'" % options.platform
257             return
258
259         optimizer = BaselineOptimizer(tool, port_names, skip_scm_commands=options.no_modify_scm)
260         port = tool.port_factory.get(port_names[0])
261         for test_name in port.tests(args):
262             _log.info("Optimizing %s" % test_name)
263             files_to_delete, files_to_add = self._optimize_baseline(optimizer, test_name)
264             for path in files_to_delete:
265                 self._delete_from_scm_later(path)
266             for path in files_to_add:
267                 self._add_to_scm_later(path)
268
269         print json.dumps(self._scm_changes)
270
271
272 class AnalyzeBaselines(AbstractRebaseliningCommand):
273     name = "analyze-baselines"
274     help_text = "Analyzes the baselines for the given tests and prints results that are identical."
275     show_in_main_help = True
276     argument_names = "TEST_NAMES"
277
278     def __init__(self):
279         super(AnalyzeBaselines, self).__init__(options=[
280             self.suffixes_option,
281             optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
282             ] + self.platform_options)
283         self._optimizer_class = BaselineOptimizer  # overridable for testing
284         self._baseline_optimizer = None
285         self._port = None
286
287     def _write(self, msg):
288         print msg
289
290     def _analyze_baseline(self, options, test_name):
291         for suffix in self._baseline_suffix_list:
292             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
293             results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
294             if results_by_directory:
295                 self._write("%s:" % baseline_name)
296                 self._baseline_optimizer.write_by_directory(results_by_directory, self._write, "  ")
297             elif options.missing:
298                 self._write("%s: (no baselines found)" % baseline_name)
299
300     def execute(self, options, args, tool):
301         self._baseline_suffix_list = options.suffixes.split(',')
302         port_names = tool.port_factory.all_port_names(options.platform)
303         if not port_names:
304             print "No port names match '%s'" % options.platform
305             return
306
307         self._baseline_optimizer = self._optimizer_class(tool, port_names, skip_scm_commands=False)
308         self._port = tool.port_factory.get(port_names[0])
309         for test_name in self._port.tests(args):
310             self._analyze_baseline(options, test_name)
311
312
313 class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
314     # not overriding execute() - pylint: disable=W0223
315
316     def __init__(self, options=None):
317         super(AbstractParallelRebaselineCommand, self).__init__(options=options)
318         self._builder_data = {}
319
320     def builder_data(self):
321         if not self._builder_data:
322             for builder_name in self._release_builders():
323                 builder = self._tool.buildbot_for_builder_name(builder_name).builder_with_name(builder_name)
324                 self._builder_data[builder_name] = builder.latest_layout_test_results()
325         return self._builder_data
326
327     # The release builders cycle much faster than the debug ones and cover all the platforms.
328     def _release_builders(self):
329         release_builders = []
330         for builder_name in builders.all_builder_names():
331             if builder_name.find('ASAN') != -1:
332                 continue
333             port = self._tool.port_factory.get_from_builder_name(builder_name)
334             if port.test_configuration().build_type == 'release':
335                 release_builders.append(builder_name)
336         return release_builders
337
338     def _run_webkit_patch(self, args, verbose):
339         try:
340             verbose_args = ['--verbose'] if verbose else []
341             stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
342             for line in stderr.splitlines():
343                 _log.warning(line)
344         except ScriptError, e:
345             _log.error(e)
346
347     def _builders_to_fetch_from(self, builders_to_check):
348         # This routine returns the subset of builders that will cover all of the baseline search paths
349         # used in the input list. In particular, if the input list contains both Release and Debug
350         # versions of a configuration, we *only* return the Release version (since we don't save
351         # debug versions of baselines).
352         release_builders = set()
353         debug_builders = set()
354         builders_to_fallback_paths = {}
355         for builder in builders_to_check:
356             port = self._tool.port_factory.get_from_builder_name(builder)
357             if port.test_configuration().build_type == 'release':
358                 release_builders.add(builder)
359             else:
360                 debug_builders.add(builder)
361         for builder in list(release_builders) + list(debug_builders):
362             port = self._tool.port_factory.get_from_builder_name(builder)
363             fallback_path = port.baseline_search_path()
364             if fallback_path not in builders_to_fallback_paths.values():
365                 builders_to_fallback_paths[builder] = fallback_path
366         return builders_to_fallback_paths.keys()
367
368     def _rebaseline_commands(self, test_prefix_list, options):
369         path_to_webkit_patch = self._tool.path()
370         cwd = self._tool.scm().checkout_root
371         copy_baseline_commands = []
372         rebaseline_commands = []
373         lines_to_remove = {}
374         port = self._tool.port_factory.get()
375
376         for test_prefix in test_prefix_list:
377             for test in port.tests([test_prefix]):
378                 for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
379                     actual_failures_suffixes = self._suffixes_for_actual_failures(test, builder, test_prefix_list[test_prefix][builder])
380                     if not actual_failures_suffixes:
381                         # If we're not going to rebaseline the test because it's passing on this
382                         # builder, we still want to remove the line from TestExpectations.
383                         if test not in lines_to_remove:
384                             lines_to_remove[test] = []
385                         lines_to_remove[test].append(builder)
386                         continue
387
388                     suffixes = ','.join(actual_failures_suffixes)
389                     cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
390                     if options.results_directory:
391                         cmd_line.extend(['--results-directory', options.results_directory])
392                     if options.verbose:
393                         cmd_line.append('--verbose')
394                     copy_baseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
395                     rebaseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
396         return copy_baseline_commands, rebaseline_commands, lines_to_remove
397
398     def _serial_commands(self, command_results):
399         files_to_add = set()
400         files_to_delete = set()
401         lines_to_remove = {}
402         for output in [result[1].split('\n') for result in command_results]:
403             file_added = False
404             for line in output:
405                 try:
406                     if line:
407                         parsed_line = json.loads(line)
408                         if 'add' in parsed_line:
409                             files_to_add.update(parsed_line['add'])
410                         if 'delete' in parsed_line:
411                             files_to_delete.update(parsed_line['delete'])
412                         if 'remove-lines' in parsed_line:
413                             for line_to_remove in parsed_line['remove-lines']:
414                                 test = line_to_remove['test']
415                                 builder = line_to_remove['builder']
416                                 if test not in lines_to_remove:
417                                     lines_to_remove[test] = []
418                                 lines_to_remove[test].append(builder)
419                         file_added = True
420                 except ValueError:
421                     _log.debug('"%s" is not a JSON object, ignoring' % line)
422
423             if not file_added:
424                 _log.debug('Could not add file based off output "%s"' % output)
425
426         return list(files_to_add), list(files_to_delete), lines_to_remove
427
428     def _optimize_baselines(self, test_prefix_list, verbose=False):
429         optimize_commands = []
430         for test in test_prefix_list:
431             all_suffixes = set()
432             for builder in self._builders_to_fetch_from(test_prefix_list[test]):
433                 all_suffixes.update(self._suffixes_for_actual_failures(test, builder, test_prefix_list[test][builder]))
434
435             # FIXME: We should propagate the platform options as well.
436             cmd_line = ['--no-modify-scm', '--suffixes', ','.join(all_suffixes), test]
437             if verbose:
438                 cmd_line.append('--verbose')
439
440             path_to_webkit_patch = self._tool.path()
441             cwd = self._tool.scm().checkout_root
442             optimize_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'optimize-baselines'] + cmd_line, cwd]))
443         return optimize_commands
444
445     def _update_expectations_files(self, lines_to_remove):
446         # FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
447         # This is slow and uses a lot of memory.
448         tests = lines_to_remove.keys()
449         to_remove = []
450
451         # This is so we remove lines for builders that skip this test, e.g. Android skips most
452         # tests and we don't want to leave stray [ Android ] lines in TestExpectations..
453         # This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
454         # failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
455         # pass the exact set of ports to rebaseline.
456         for port_name in self._tool.port_factory.all_port_names():
457             port = self._tool.port_factory.get(port_name)
458             generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
459             full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
460             for test in tests:
461                 if self._port_skips_test(port, test, generic_expectations, full_expectations):
462                     for test_configuration in port.all_test_configurations():
463                         if test_configuration.version == port.test_configuration().version:
464                             to_remove.append((test, test_configuration))
465
466         for test in lines_to_remove:
467             for builder in lines_to_remove[test]:
468                 port = self._tool.port_factory.get_from_builder_name(builder)
469                 for test_configuration in port.all_test_configurations():
470                     if test_configuration.version == port.test_configuration().version:
471                         to_remove.append((test, test_configuration))
472
473         port = self._tool.port_factory.get()
474         expectations = TestExpectations(port, include_overrides=False)
475         expectationsString = expectations.remove_configurations(to_remove)
476         path = port.path_to_generic_test_expectations_file()
477         self._tool.filesystem.write_text_file(path, expectationsString)
478
479     def _port_skips_test(self, port, test, generic_expectations, full_expectations):
480         fs = port.host.filesystem
481         if port.default_smoke_test_only():
482             smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
483             if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
484                 return True
485
486         return (SKIP in full_expectations.get_expectations(test) and
487                 SKIP not in generic_expectations.get_expectations(test))
488
489     def _run_in_parallel_and_update_scm(self, commands):
490         command_results = self._tool.executive.run_in_parallel(commands)
491         log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
492         for line in log_output.split('\n'):
493             if line:
494                 print >> sys.stderr, line  # FIXME: Figure out how to log properly.
495
496         files_to_add, files_to_delete, lines_to_remove = self._serial_commands(command_results)
497         if files_to_delete:
498             self._tool.scm().delete_list(files_to_delete)
499         if files_to_add:
500             self._tool.scm().add_list(files_to_add)
501         return lines_to_remove
502
503     def _rebaseline(self, options, test_prefix_list):
504         for test, builders_to_check in sorted(test_prefix_list.items()):
505             _log.info("Rebaselining %s" % test)
506             for builder, suffixes in sorted(builders_to_check.items()):
507                 _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
508
509         copy_baseline_commands, rebaseline_commands, extra_lines_to_remove = self._rebaseline_commands(test_prefix_list, options)
510         lines_to_remove = {}
511
512         if copy_baseline_commands:
513             self._run_in_parallel_and_update_scm(copy_baseline_commands)
514         if rebaseline_commands:
515             lines_to_remove = self._run_in_parallel_and_update_scm(rebaseline_commands)
516
517         for test in extra_lines_to_remove:
518             if test in lines_to_remove:
519                 lines_to_remove[test] = lines_to_remove[test] + extra_lines_to_remove[test]
520             else:
521                 lines_to_remove[test] = extra_lines_to_remove[test]
522
523         if lines_to_remove:
524             self._update_expectations_files(lines_to_remove)
525
526         if options.optimize:
527             self._run_in_parallel_and_update_scm(self._optimize_baselines(test_prefix_list, options.verbose))
528
529     def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
530         actual_results = self.builder_data()[builder_name].actual_results(test)
531         if not actual_results:
532             return set()
533         return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
534
535
536 class RebaselineJson(AbstractParallelRebaselineCommand):
537     name = "rebaseline-json"
538     help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
539
540     def __init__(self,):
541         super(RebaselineJson, self).__init__(options=[
542             self.no_optimize_option,
543             self.results_directory_option,
544             ])
545
546     def execute(self, options, args, tool):
547         self._rebaseline(options, json.loads(sys.stdin.read()))
548
549
550 class RebaselineExpectations(AbstractParallelRebaselineCommand):
551     name = "rebaseline-expectations"
552     help_text = "Rebaselines the tests indicated in TestExpectations."
553     show_in_main_help = True
554
555     def __init__(self):
556         super(RebaselineExpectations, self).__init__(options=[
557             self.no_optimize_option,
558             ] + self.platform_options)
559         self._test_prefix_list = None
560
561     def _tests_to_rebaseline(self, port):
562         tests_to_rebaseline = {}
563         for path, value in port.expectations_dict().items():
564             expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
565             for test in expectations.get_rebaselining_failures():
566                 suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
567                 tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
568         return tests_to_rebaseline
569
570     def _add_tests_to_rebaseline_for_port(self, port_name):
571         builder_name = builders.builder_name_for_port_name(port_name)
572         if not builder_name:
573             return
574         tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
575
576         if tests:
577             _log.info("Retrieving results for %s from %s." % (port_name, builder_name))
578
579         for test_name, suffixes in tests:
580             _log.info("    %s (%s)" % (test_name, ','.join(suffixes)))
581             if test_name not in self._test_prefix_list:
582                 self._test_prefix_list[test_name] = {}
583             self._test_prefix_list[test_name][builder_name] = suffixes
584
585     def execute(self, options, args, tool):
586         options.results_directory = None
587         self._test_prefix_list = {}
588         port_names = tool.port_factory.all_port_names(options.platform)
589         for port_name in port_names:
590             self._add_tests_to_rebaseline_for_port(port_name)
591         if not self._test_prefix_list:
592             _log.warning("Did not find any tests marked Rebaseline.")
593             return
594
595         self._rebaseline(options, self._test_prefix_list)
596
597
598 class Rebaseline(AbstractParallelRebaselineCommand):
599     name = "rebaseline"
600     help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
601     show_in_main_help = True
602     argument_names = "[TEST_NAMES]"
603
604     def __init__(self):
605         super(Rebaseline, self).__init__(options=[
606             self.no_optimize_option,
607             # FIXME: should we support the platform options in addition to (or instead of) --builders?
608             self.suffixes_option,
609             self.results_directory_option,
610             optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
611             ])
612
613     def _builders_to_pull_from(self):
614         chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
615         return [self._builder_with_name(name) for name in chosen_names]
616
617     def _builder_with_name(self, name):
618         return self._tool.buildbot_for_builder_name(name).builder_with_name(name)
619
620     def execute(self, options, args, tool):
621         if not args:
622             _log.error("Must list tests to rebaseline.")
623             return
624
625         if options.builders:
626             builders_to_check = []
627             for builder_names in options.builders:
628                 builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
629         else:
630             builders_to_check = self._builders_to_pull_from()
631
632         test_prefix_list = {}
633         suffixes_to_update = options.suffixes.split(",")
634
635         for builder in builders_to_check:
636             for test in args:
637                 if test not in test_prefix_list:
638                     test_prefix_list[test] = {}
639                 test_prefix_list[test][builder.name()] = suffixes_to_update
640
641         if options.verbose:
642             _log.debug("rebaseline-json: " + str(test_prefix_list))
643
644         self._rebaseline(options, test_prefix_list)
645
646
647 class AutoRebaseline(AbstractParallelRebaselineCommand):
648     name = "auto-rebaseline"
649     help_text = "Rebaselines any NeedsRebaseline lines in TestExpectations that have cycled through all the bots."
650     AUTO_REBASELINE_BRANCH_NAME = "auto-rebaseline-temporary-branch"
651
652     # Rietveld uploader stinks. Limit the number of rebaselines in a given patch to keep upload from failing.
653     # FIXME: http://crbug.com/263676 Obviously we should fix the uploader here.
654     MAX_LINES_TO_REBASELINE = 200
655
656     SECONDS_BEFORE_GIVING_UP = 300
657
658     def __init__(self):
659         super(AutoRebaseline, self).__init__(options=[
660             # FIXME: Remove this option.
661             self.no_optimize_option,
662             # FIXME: Remove this option.
663             self.results_directory_option,
664             ])
665
666     def bot_revision_data(self):
667         revisions = []
668         for result in self.builder_data().values():
669             if result.run_was_interrupted():
670                 _log.error("Can't rebaseline because the latest run on %s exited early." % result.builder_name())
671                 return []
672             revisions.append({
673                 "builder": result.builder_name(),
674                 "revision": result.blink_revision(),
675             })
676         return revisions
677
678     def tests_to_rebaseline(self, tool, min_revision, print_revisions):
679         port = tool.port_factory.get()
680         expectations_file_path = port.path_to_generic_test_expectations_file()
681
682         tests = set()
683         revision = None
684         author = None
685         bugs = set()
686         has_any_needs_rebaseline_lines = False
687
688         for line in tool.scm().blame(expectations_file_path).split("\n"):
689             comment_index = line.find("#")
690             if comment_index == -1:
691                 comment_index = len(line)
692             line_without_comments = re.sub(r"\s+", " ", line[:comment_index].strip())
693
694             if "NeedsRebaseline" not in line_without_comments:
695                 continue
696
697             has_any_needs_rebaseline_lines = True
698
699             parsed_line = re.match("^(\S*)[^(]*\((\S*).*?([^ ]*)\ \[[^[]*$", line_without_comments)
700
701             commit_hash = parsed_line.group(1)
702             svn_revision = tool.scm().svn_revision_from_git_commit(commit_hash)
703
704             test = parsed_line.group(3)
705             if print_revisions:
706                 _log.info("%s is waiting for r%s" % (test, svn_revision))
707
708             if not svn_revision or svn_revision > min_revision:
709                 continue
710
711             if revision and svn_revision != revision:
712                 continue
713
714             if not revision:
715                 revision = svn_revision
716                 author = parsed_line.group(2)
717
718             bugs.update(re.findall("crbug\.com\/(\d+)", line_without_comments))
719             tests.add(test)
720
721             if len(tests) >= self.MAX_LINES_TO_REBASELINE:
722                 _log.info("Too many tests to rebaseline in one patch. Doing the first %d." % self.MAX_LINES_TO_REBASELINE)
723                 break
724
725         return tests, revision, author, bugs, has_any_needs_rebaseline_lines
726
727     def link_to_patch(self, revision):
728         return "http://src.chromium.org/viewvc/blink?view=revision&revision=" + str(revision)
729
730     def commit_message(self, author, revision, bugs):
731         bug_string = ""
732         if bugs:
733             bug_string = "BUG=%s\n" % ",".join(bugs)
734
735         return """Auto-rebaseline for r%s
736
737 %s
738
739 %sTBR=%s
740 """ % (revision, self.link_to_patch(revision), bug_string, author)
741
742     def get_test_prefix_list(self, tests):
743         test_prefix_list = {}
744         lines_to_remove = {}
745
746         for builder_name in self._release_builders():
747             port_name = builders.port_name_for_builder_name(builder_name)
748             port = self._tool.port_factory.get(port_name)
749             expectations = TestExpectations(port, include_overrides=True)
750             for test in expectations.get_needs_rebaseline_failures():
751                 if test not in tests:
752                     continue
753
754                 if test not in test_prefix_list:
755                     lines_to_remove[test] = []
756                     test_prefix_list[test] = {}
757                 lines_to_remove[test].append(builder_name)
758                 test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST
759
760         return test_prefix_list, lines_to_remove
761
762     def _run_git_cl_command(self, options, command):
763         subprocess_command = ['git', 'cl'] + command
764         if options.verbose:
765             subprocess_command.append('--verbose')
766
767         process = self._tool.executive.popen(subprocess_command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
768         last_output_time = time.time()
769
770         # git cl sometimes completely hangs. Bail if we haven't gotten any output to stdout/stderr in a while.
771         while process.poll() == None and time.time() < last_output_time + self.SECONDS_BEFORE_GIVING_UP:
772             # FIXME: This doesn't make any sense. readline blocks, so all this code to
773             # try and bail is useless. Instead, we should do the readline calls on a
774             # subthread. Then the rest of this code would make sense.
775             out = process.stdout.readline().rstrip('\n')
776             if out:
777                 last_output_time = time.time()
778                 _log.info(out)
779
780         if process.poll() == None:
781             _log.error('Command hung: %s' % subprocess_command)
782             return False
783         return True
784
785     # FIXME: Move this somewhere more general.
786     def tree_status(self):
787         blink_tree_status_url = "http://blink-status.appspot.com/status"
788         status = urllib2.urlopen(blink_tree_status_url).read().lower()
789         if status.find('closed') != -1 or status == "0":
790             return 'closed'
791         elif status.find('open') != -1 or status == "1":
792             return 'open'
793         return 'unknown'
794
795     def execute(self, options, args, tool):
796         if tool.scm().executable_name == "svn":
797             _log.error("Auto rebaseline only works with a git checkout.")
798             return
799
800         if tool.scm().has_working_directory_changes():
801             _log.error("Cannot proceed with working directory changes. Clean working directory first.")
802             return
803
804         revision_data = self.bot_revision_data()
805         if not revision_data:
806             return
807
808         min_revision = int(min([item["revision"] for item in revision_data]))
809         tests, revision, author, bugs, has_any_needs_rebaseline_lines = self.tests_to_rebaseline(tool, min_revision, print_revisions=options.verbose)
810
811         if options.verbose:
812             _log.info("Min revision across all bots is %s." % min_revision)
813             for item in revision_data:
814                 _log.info("%s: r%s" % (item["builder"], item["revision"]))
815
816         if not tests:
817             _log.debug('No tests to rebaseline.')
818             return
819
820         if self.tree_status() == 'closed':
821             _log.info('Cannot proceed. Tree is closed.')
822             return
823
824         _log.info('Rebaselining %s for r%s by %s.' % (list(tests), revision, author))
825
826         test_prefix_list, lines_to_remove = self.get_test_prefix_list(tests)
827
828         did_finish = False
829         try:
830             old_branch_name = tool.scm().current_branch()
831             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
832             tool.scm().create_clean_branch(self.AUTO_REBASELINE_BRANCH_NAME)
833
834             # If the tests are passing everywhere, then this list will be empty. We don't need
835             # to rebaseline, but we'll still need to update TestExpectations.
836             if test_prefix_list:
837                 self._rebaseline(options, test_prefix_list)
838
839             tool.scm().commit_locally_with_message(self.commit_message(author, revision, bugs))
840
841             # FIXME: It would be nice if we could dcommit the patch without uploading, but still
842             # go through all the precommit hooks. For rebaselines with lots of files, uploading
843             # takes a long time and sometimes fails, but we don't want to commit if, e.g. the
844             # tree is closed.
845             did_finish = self._run_git_cl_command(options, ['upload', '-f'])
846
847             if did_finish:
848                 # Uploading can take a very long time. Do another pull to make sure TestExpectations is up to date,
849                 # so the dcommit can go through.
850                 # FIXME: Log the pull and dcommit stdout/stderr to the log-server.
851                 tool.executive.run_command(['git', 'pull'])
852
853                 self._run_git_cl_command(options, ['dcommit', '-f'])
854         except Exception as e:
855             _log.error(e)
856         finally:
857             if did_finish:
858                 self._run_git_cl_command(options, ['set_close'])
859             tool.scm().ensure_cleanly_tracking_remote_master()
860             tool.scm().checkout_branch(old_branch_name)
861             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
862
863
864 class RebaselineOMatic(AbstractDeclarativeCommand):
865     name = "rebaseline-o-matic"
866     help_text = "Calls webkit-patch auto-rebaseline in a loop."
867     show_in_main_help = True
868
869     SLEEP_TIME_IN_SECONDS = 30
870     LOG_SERVER = 'blinkrebaseline.appspot.com'
871     QUIT_LOG = '##QUIT##'
872
873     # Uploaded log entries append to the existing entry unless the
874     # newentry flag is set. In that case it starts a new entry to
875     # start appending to.
876     def _log_to_server(self, log='', is_new_entry=False):
877         query = {
878             'log': log,
879         }
880         if is_new_entry:
881             query['newentry'] = 'on'
882         try:
883             urllib2.urlopen("http://" + self.LOG_SERVER + "/updatelog", data=urllib.urlencode(query))
884         except:
885             traceback.print_exc(file=sys.stderr)
886
887     def _log_to_server_thread(self):
888         is_new_entry = True
889         while True:
890             messages = [self._log_queue.get()]
891             while not self._log_queue.empty():
892                 messages.append(self._log_queue.get())
893             self._log_to_server('\n'.join(messages), is_new_entry=is_new_entry)
894             is_new_entry = False
895             if self.QUIT_LOG in messages:
896                 return
897
898     def _post_log_to_server(self, log):
899         self._log_queue.put(log)
900
901     def _log_line(self, handle):
902         out = handle.readline().rstrip('\n')
903         if out:
904             if self._verbose:
905                 print out
906             self._post_log_to_server(out)
907         return out
908
909     def _run_logged_command(self, command):
910         process = self._tool.executive.popen(command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
911
912         out = self._log_line(process.stdout)
913         while out:
914             # FIXME: This should probably batch up lines if they're available and log to the server once.
915             out = self._log_line(process.stdout)
916
917     def _do_one_rebaseline(self):
918         self._log_queue = Queue.Queue(256)
919         log_thread = threading.Thread(name='LogToServer', target=self._log_to_server_thread)
920         log_thread.start()
921         try:
922             old_branch_name = self._tool.scm().current_branch()
923             self._run_logged_command(['git', 'pull'])
924             rebaseline_command = [self._tool.filesystem.join(self._tool.scm().checkout_root, 'Tools', 'Scripts', 'webkit-patch'), 'auto-rebaseline']
925             if self._verbose:
926                 rebaseline_command.append('--verbose')
927             self._run_logged_command(rebaseline_command)
928         except:
929             self._log_queue.put(self.QUIT_LOG)
930             traceback.print_exc(file=sys.stderr)
931             # Sometimes git crashes and leaves us on a detached head.
932             self._tool.scm().checkout_branch(old_branch_name)
933         else:
934             self._log_queue.put(self.QUIT_LOG)
935         log_thread.join()
936
937     def execute(self, options, args, tool):
938         self._verbose = options.verbose
939         while True:
940             self._do_one_rebaseline()
941             time.sleep(self.SLEEP_TIME_IN_SECONDS)