Update To 11.40.268.0
[platform/framework/web/crosswalk.git] / src / third_party / WebKit / Tools / Scripts / webkitpy / tool / commands / rebaseline.py
1 # Copyright (c) 2010 Google Inc. All rights reserved.
2 #
3 # Redistribution and use in source and binary forms, with or without
4 # modification, are permitted provided that the following conditions are
5 # met:
6 #
7 #     * Redistributions of source code must retain the above copyright
8 # notice, this list of conditions and the following disclaimer.
9 #     * Redistributions in binary form must reproduce the above
10 # copyright notice, this list of conditions and the following disclaimer
11 # in the documentation and/or other materials provided with the
12 # distribution.
13 #     * Neither the name of Google Inc. nor the names of its
14 # contributors may be used to endorse or promote products derived from
15 # this software without specific prior written permission.
16 #
17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 # (INCLUDING NEGLIGENCE OR/ OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29 import Queue
30 import json
31 import logging
32 import optparse
33 import re
34 import sys
35 import threading
36 import time
37 import traceback
38 import urllib
39 import urllib2
40
41 from webkitpy.common.checkout.baselineoptimizer import BaselineOptimizer
42 from webkitpy.common.memoized import memoized
43 from webkitpy.common.system.executive import ScriptError
44 from webkitpy.layout_tests.controllers.test_result_writer import TestResultWriter
45 from webkitpy.layout_tests.models import test_failures
46 from webkitpy.layout_tests.models.test_expectations import TestExpectations, BASELINE_SUFFIX_LIST, SKIP
47 from webkitpy.layout_tests.port import builders
48 from webkitpy.layout_tests.port import factory
49 from webkitpy.tool.multicommandtool import AbstractDeclarativeCommand
50
51
52 _log = logging.getLogger(__name__)
53
54
55 # FIXME: Should TestResultWriter know how to compute this string?
56 def _baseline_name(fs, test_name, suffix):
57     return fs.splitext(test_name)[0] + TestResultWriter.FILENAME_SUFFIX_EXPECTED + "." + suffix
58
59
60 class AbstractRebaseliningCommand(AbstractDeclarativeCommand):
61     # not overriding execute() - pylint: disable=W0223
62
63     no_optimize_option = optparse.make_option('--no-optimize', dest='optimize', action='store_false', default=True,
64         help=('Do not optimize/de-dup the expectations after rebaselining (default is to de-dup automatically). '
65               'You can use "webkit-patch optimize-baselines" to optimize separately.'))
66
67     platform_options = factory.platform_options(use_globs=True)
68
69     results_directory_option = optparse.make_option("--results-directory", help="Local results directory to use")
70
71     suffixes_option = optparse.make_option("--suffixes", default=','.join(BASELINE_SUFFIX_LIST), action="store",
72         help="Comma-separated-list of file types to rebaseline")
73
74     def __init__(self, options=None):
75         super(AbstractRebaseliningCommand, self).__init__(options=options)
76         self._baseline_suffix_list = BASELINE_SUFFIX_LIST
77         self._scm_changes = {'add': [], 'delete': [], 'remove-lines': []}
78
79     def _add_to_scm_later(self, path):
80         self._scm_changes['add'].append(path)
81
82     def _delete_from_scm_later(self, path):
83         self._scm_changes['delete'].append(path)
84
85
86 class BaseInternalRebaselineCommand(AbstractRebaseliningCommand):
87     def __init__(self):
88         super(BaseInternalRebaselineCommand, self).__init__(options=[
89             self.results_directory_option,
90             self.suffixes_option,
91             optparse.make_option("--builder", help="Builder to pull new baselines from"),
92             optparse.make_option("--test", help="Test to rebaseline"),
93             ])
94
95     def _baseline_directory(self, builder_name):
96         port = self._tool.port_factory.get_from_builder_name(builder_name)
97         override_dir = builders.rebaseline_override_dir(builder_name)
98         if override_dir:
99             return self._tool.filesystem.join(port.layout_tests_dir(), 'platform', override_dir)
100         return port.baseline_version_dir()
101
102     def _test_root(self, test_name):
103         return self._tool.filesystem.splitext(test_name)[0]
104
105     def _file_name_for_actual_result(self, test_name, suffix):
106         return "%s-actual.%s" % (self._test_root(test_name), suffix)
107
108     def _file_name_for_expected_result(self, test_name, suffix):
109         return "%s-expected.%s" % (self._test_root(test_name), suffix)
110
111
112 class CopyExistingBaselinesInternal(BaseInternalRebaselineCommand):
113     name = "copy-existing-baselines-internal"
114     help_text = "Copy existing baselines down one level in the baseline order to ensure new baselines don't break existing passing platforms."
115
116     @memoized
117     def _immediate_predecessors_in_fallback(self, path_to_rebaseline):
118         port_names = self._tool.port_factory.all_port_names()
119         immediate_predecessors_in_fallback = []
120         for port_name in port_names:
121             port = self._tool.port_factory.get(port_name)
122             if not port.buildbot_archives_baselines():
123                 continue
124             baseline_search_path = port.baseline_search_path()
125             try:
126                 index = baseline_search_path.index(path_to_rebaseline)
127                 if index:
128                     immediate_predecessors_in_fallback.append(self._tool.filesystem.basename(baseline_search_path[index - 1]))
129             except ValueError:
130                 # index throw's a ValueError if the item isn't in the list.
131                 pass
132         return immediate_predecessors_in_fallback
133
134     def _port_for_primary_baseline(self, baseline):
135         for port in [self._tool.port_factory.get(port_name) for port_name in self._tool.port_factory.all_port_names()]:
136             if self._tool.filesystem.basename(port.baseline_version_dir()) == baseline:
137                 return port
138         raise Exception("Failed to find port for primary baseline %s." % baseline)
139
140     def _copy_existing_baseline(self, builder_name, test_name, suffix):
141         baseline_directory = self._baseline_directory(builder_name)
142         ports = [self._port_for_primary_baseline(baseline) for baseline in self._immediate_predecessors_in_fallback(baseline_directory)]
143
144         old_baselines = []
145         new_baselines = []
146
147         # Need to gather all the baseline paths before modifying the filesystem since
148         # the modifications can affect the results of port.expected_filename.
149         for port in ports:
150             old_baseline = port.expected_filename(test_name, "." + suffix)
151             if not self._tool.filesystem.exists(old_baseline):
152                 _log.debug("No existing baseline for %s." % test_name)
153                 continue
154
155             new_baseline = self._tool.filesystem.join(port.baseline_path(), self._file_name_for_expected_result(test_name, suffix))
156             if self._tool.filesystem.exists(new_baseline):
157                 _log.debug("Existing baseline at %s, not copying over it." % new_baseline)
158                 continue
159
160             expectations = TestExpectations(port, [test_name])
161             if SKIP in expectations.get_expectations(test_name):
162                 _log.debug("%s is skipped on %s." % (test_name, port.name()))
163                 continue
164
165             old_baselines.append(old_baseline)
166             new_baselines.append(new_baseline)
167
168         for i in range(len(old_baselines)):
169             old_baseline = old_baselines[i]
170             new_baseline = new_baselines[i]
171
172             _log.debug("Copying baseline from %s to %s." % (old_baseline, new_baseline))
173             self._tool.filesystem.maybe_make_directory(self._tool.filesystem.dirname(new_baseline))
174             self._tool.filesystem.copyfile(old_baseline, new_baseline)
175             if not self._tool.scm().exists(new_baseline):
176                 self._add_to_scm_later(new_baseline)
177
178     def execute(self, options, args, tool):
179         for suffix in options.suffixes.split(','):
180             self._copy_existing_baseline(options.builder, options.test, suffix)
181         print json.dumps(self._scm_changes)
182
183
184 class RebaselineTest(BaseInternalRebaselineCommand):
185     name = "rebaseline-test-internal"
186     help_text = "Rebaseline a single test from a buildbot. Only intended for use by other webkit-patch commands."
187
188     def _results_url(self, builder_name):
189         return self._tool.buildbot.builder_with_name(builder_name).latest_layout_test_results_url()
190
191     def _save_baseline(self, data, target_baseline, baseline_directory, test_name, suffix):
192         if not data:
193             _log.debug("No baseline data to save.")
194             return
195
196         filesystem = self._tool.filesystem
197         filesystem.maybe_make_directory(filesystem.dirname(target_baseline))
198         filesystem.write_binary_file(target_baseline, data)
199         if not self._tool.scm().exists(target_baseline):
200             self._add_to_scm_later(target_baseline)
201
202     def _rebaseline_test(self, builder_name, test_name, suffix, results_url):
203         baseline_directory = self._baseline_directory(builder_name)
204
205         source_baseline = "%s/%s" % (results_url, self._file_name_for_actual_result(test_name, suffix))
206         target_baseline = self._tool.filesystem.join(baseline_directory, self._file_name_for_expected_result(test_name, suffix))
207
208         _log.debug("Retrieving %s." % source_baseline)
209         self._save_baseline(self._tool.web.get_binary(source_baseline, convert_404_to_None=True), target_baseline, baseline_directory, test_name, suffix)
210
211     def _rebaseline_test_and_update_expectations(self, options):
212         port = self._tool.port_factory.get_from_builder_name(options.builder)
213         if (port.reference_files(options.test)):
214             _log.warning("Cannot rebaseline reftest: %s", options.test)
215             return
216
217         if options.results_directory:
218             results_url = 'file://' + options.results_directory
219         else:
220             results_url = self._results_url(options.builder)
221         self._baseline_suffix_list = options.suffixes.split(',')
222
223         for suffix in self._baseline_suffix_list:
224             self._rebaseline_test(options.builder, options.test, suffix, results_url)
225         self._scm_changes['remove-lines'].append({'builder': options.builder, 'test': options.test})
226
227     def execute(self, options, args, tool):
228         self._rebaseline_test_and_update_expectations(options)
229         print json.dumps(self._scm_changes)
230
231
232 class OptimizeBaselines(AbstractRebaseliningCommand):
233     name = "optimize-baselines"
234     help_text = "Reshuffles the baselines for the given tests to use as litte space on disk as possible."
235     show_in_main_help = True
236     argument_names = "TEST_NAMES"
237
238     def __init__(self):
239         super(OptimizeBaselines, self).__init__(options=[
240             self.suffixes_option,
241             optparse.make_option('--no-modify-scm', action='store_true', default=False, help='Dump SCM commands as JSON instead of '),
242             ] + self.platform_options)
243
244     def _optimize_baseline(self, optimizer, test_name):
245         files_to_delete = []
246         files_to_add = []
247         for suffix in self._baseline_suffix_list:
248             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
249             succeeded, more_files_to_delete, more_files_to_add = optimizer.optimize(baseline_name)
250             if not succeeded:
251                 print "Heuristics failed to optimize %s" % baseline_name
252             files_to_delete.extend(more_files_to_delete)
253             files_to_add.extend(more_files_to_add)
254         return files_to_delete, files_to_add
255
256     def execute(self, options, args, tool):
257         self._baseline_suffix_list = options.suffixes.split(',')
258         port_names = tool.port_factory.all_port_names(options.platform)
259         if not port_names:
260             print "No port names match '%s'" % options.platform
261             return
262         port = tool.port_factory.get(port_names[0])
263         optimizer = BaselineOptimizer(tool, port, port_names, skip_scm_commands=options.no_modify_scm)
264         tests = port.tests(args)
265         for test_name in tests:
266             files_to_delete, files_to_add = self._optimize_baseline(optimizer, test_name)
267             for path in files_to_delete:
268                 self._delete_from_scm_later(path)
269             for path in files_to_add:
270                 self._add_to_scm_later(path)
271
272         print json.dumps(self._scm_changes)
273
274
275 class AnalyzeBaselines(AbstractRebaseliningCommand):
276     name = "analyze-baselines"
277     help_text = "Analyzes the baselines for the given tests and prints results that are identical."
278     show_in_main_help = True
279     argument_names = "TEST_NAMES"
280
281     def __init__(self):
282         super(AnalyzeBaselines, self).__init__(options=[
283             self.suffixes_option,
284             optparse.make_option('--missing', action='store_true', default=False, help='show missing baselines as well'),
285             ] + self.platform_options)
286         self._optimizer_class = BaselineOptimizer  # overridable for testing
287         self._baseline_optimizer = None
288         self._port = None
289
290     def _write(self, msg):
291         print msg
292
293     def _analyze_baseline(self, options, test_name):
294         for suffix in self._baseline_suffix_list:
295             baseline_name = _baseline_name(self._tool.filesystem, test_name, suffix)
296             results_by_directory = self._baseline_optimizer.read_results_by_directory(baseline_name)
297             if results_by_directory:
298                 self._write("%s:" % baseline_name)
299                 self._baseline_optimizer.write_by_directory(results_by_directory, self._write, "  ")
300             elif options.missing:
301                 self._write("%s: (no baselines found)" % baseline_name)
302
303     def execute(self, options, args, tool):
304         self._baseline_suffix_list = options.suffixes.split(',')
305         port_names = tool.port_factory.all_port_names(options.platform)
306         if not port_names:
307             print "No port names match '%s'" % options.platform
308             return
309         self._port = tool.port_factory.get(port_names[0])
310         self._baseline_optimizer = self._optimizer_class(tool, self._port, port_names, skip_scm_commands=False)
311         for test_name in self._port.tests(args):
312             self._analyze_baseline(options, test_name)
313
314
315 class AbstractParallelRebaselineCommand(AbstractRebaseliningCommand):
316     # not overriding execute() - pylint: disable=W0223
317
318     def __init__(self, options=None):
319         super(AbstractParallelRebaselineCommand, self).__init__(options=options)
320         self._builder_data = {}
321
322     def builder_data(self):
323         if not self._builder_data:
324             for builder_name in self._release_builders():
325                 builder = self._tool.buildbot.builder_with_name(builder_name)
326                 self._builder_data[builder_name] = builder.latest_layout_test_results()
327         return self._builder_data
328
329     # The release builders cycle much faster than the debug ones and cover all the platforms.
330     def _release_builders(self):
331         release_builders = []
332         for builder_name in builders.all_builder_names():
333             if builder_name.find('ASAN') != -1:
334                 continue
335             port = self._tool.port_factory.get_from_builder_name(builder_name)
336             if port.test_configuration().build_type == 'release':
337                 release_builders.append(builder_name)
338         return release_builders
339
340     def _run_webkit_patch(self, args, verbose):
341         try:
342             verbose_args = ['--verbose'] if verbose else []
343             stderr = self._tool.executive.run_command([self._tool.path()] + verbose_args + args, cwd=self._tool.scm().checkout_root, return_stderr=True)
344             for line in stderr.splitlines():
345                 _log.warning(line)
346         except ScriptError, e:
347             _log.error(e)
348
349     def _builders_to_fetch_from(self, builders_to_check):
350         # This routine returns the subset of builders that will cover all of the baseline search paths
351         # used in the input list. In particular, if the input list contains both Release and Debug
352         # versions of a configuration, we *only* return the Release version (since we don't save
353         # debug versions of baselines).
354         release_builders = set()
355         debug_builders = set()
356         builders_to_fallback_paths = {}
357         for builder in builders_to_check:
358             port = self._tool.port_factory.get_from_builder_name(builder)
359             if port.test_configuration().build_type == 'release':
360                 release_builders.add(builder)
361             else:
362                 debug_builders.add(builder)
363         for builder in list(release_builders) + list(debug_builders):
364             port = self._tool.port_factory.get_from_builder_name(builder)
365             fallback_path = port.baseline_search_path()
366             if fallback_path not in builders_to_fallback_paths.values():
367                 builders_to_fallback_paths[builder] = fallback_path
368         return builders_to_fallback_paths.keys()
369
370     def _rebaseline_commands(self, test_prefix_list, options):
371         path_to_webkit_patch = self._tool.path()
372         cwd = self._tool.scm().checkout_root
373         copy_baseline_commands = []
374         rebaseline_commands = []
375         lines_to_remove = {}
376         port = self._tool.port_factory.get()
377
378         for test_prefix in test_prefix_list:
379             for test in port.tests([test_prefix]):
380                 for builder in self._builders_to_fetch_from(test_prefix_list[test_prefix]):
381                     actual_failures_suffixes = self._suffixes_for_actual_failures(test, builder, test_prefix_list[test_prefix][builder])
382                     if not actual_failures_suffixes:
383                         # If we're not going to rebaseline the test because it's passing on this
384                         # builder, we still want to remove the line from TestExpectations.
385                         if test not in lines_to_remove:
386                             lines_to_remove[test] = []
387                         lines_to_remove[test].append(builder)
388                         continue
389
390                     suffixes = ','.join(actual_failures_suffixes)
391                     cmd_line = ['--suffixes', suffixes, '--builder', builder, '--test', test]
392                     if options.results_directory:
393                         cmd_line.extend(['--results-directory', options.results_directory])
394                     if options.verbose:
395                         cmd_line.append('--verbose')
396                     copy_baseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'copy-existing-baselines-internal'] + cmd_line, cwd]))
397                     rebaseline_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'rebaseline-test-internal'] + cmd_line, cwd]))
398         return copy_baseline_commands, rebaseline_commands, lines_to_remove
399
400     def _serial_commands(self, command_results):
401         files_to_add = set()
402         files_to_delete = set()
403         lines_to_remove = {}
404         for output in [result[1].split('\n') for result in command_results]:
405             file_added = False
406             for line in output:
407                 try:
408                     if line:
409                         parsed_line = json.loads(line)
410                         if 'add' in parsed_line:
411                             files_to_add.update(parsed_line['add'])
412                         if 'delete' in parsed_line:
413                             files_to_delete.update(parsed_line['delete'])
414                         if 'remove-lines' in parsed_line:
415                             for line_to_remove in parsed_line['remove-lines']:
416                                 test = line_to_remove['test']
417                                 builder = line_to_remove['builder']
418                                 if test not in lines_to_remove:
419                                     lines_to_remove[test] = []
420                                 lines_to_remove[test].append(builder)
421                         file_added = True
422                 except ValueError:
423                     _log.debug('"%s" is not a JSON object, ignoring' % line)
424
425             if not file_added:
426                 _log.debug('Could not add file based off output "%s"' % output)
427
428         return list(files_to_add), list(files_to_delete), lines_to_remove
429
430     def _optimize_baselines(self, test_prefix_list, verbose=False):
431         optimize_commands = []
432         for test in test_prefix_list:
433             all_suffixes = set()
434             for builder in self._builders_to_fetch_from(test_prefix_list[test]):
435                 all_suffixes.update(self._suffixes_for_actual_failures(test, builder, test_prefix_list[test][builder]))
436
437             # FIXME: We should propagate the platform options as well.
438             cmd_line = ['--no-modify-scm', '--suffixes', ','.join(all_suffixes), test]
439             if verbose:
440                 cmd_line.append('--verbose')
441
442             path_to_webkit_patch = self._tool.path()
443             cwd = self._tool.scm().checkout_root
444             optimize_commands.append(tuple([[self._tool.executable, path_to_webkit_patch, 'optimize-baselines'] + cmd_line, cwd]))
445         return optimize_commands
446
447     def _update_expectations_files(self, lines_to_remove):
448         # FIXME: This routine is way too expensive. We're creating O(n ports) TestExpectations objects.
449         # This is slow and uses a lot of memory.
450         tests = lines_to_remove.keys()
451         to_remove = []
452
453         # This is so we remove lines for builders that skip this test, e.g. Android skips most
454         # tests and we don't want to leave stray [ Android ] lines in TestExpectations..
455         # This is only necessary for "webkit-patch rebaseline" and for rebaselining expected
456         # failures from garden-o-matic. rebaseline-expectations and auto-rebaseline will always
457         # pass the exact set of ports to rebaseline.
458         for port_name in self._tool.port_factory.all_port_names():
459             port = self._tool.port_factory.get(port_name)
460             generic_expectations = TestExpectations(port, tests=tests, include_overrides=False)
461             full_expectations = TestExpectations(port, tests=tests, include_overrides=True)
462             for test in tests:
463                 if self._port_skips_test(port, test, generic_expectations, full_expectations):
464                     for test_configuration in port.all_test_configurations():
465                         if test_configuration.version == port.test_configuration().version:
466                             to_remove.append((test, test_configuration))
467
468         for test in lines_to_remove:
469             for builder in lines_to_remove[test]:
470                 port = self._tool.port_factory.get_from_builder_name(builder)
471                 for test_configuration in port.all_test_configurations():
472                     if test_configuration.version == port.test_configuration().version:
473                         to_remove.append((test, test_configuration))
474
475         port = self._tool.port_factory.get()
476         expectations = TestExpectations(port, include_overrides=False)
477         expectationsString = expectations.remove_configurations(to_remove)
478         path = port.path_to_generic_test_expectations_file()
479         self._tool.filesystem.write_text_file(path, expectationsString)
480
481     def _port_skips_test(self, port, test, generic_expectations, full_expectations):
482         fs = port.host.filesystem
483         if port.default_smoke_test_only():
484             smoke_test_filename = fs.join(port.layout_tests_dir(), 'SmokeTests')
485             if fs.exists(smoke_test_filename) and test not in fs.read_text_file(smoke_test_filename):
486                 return True
487
488         return (SKIP in full_expectations.get_expectations(test) and
489                 SKIP not in generic_expectations.get_expectations(test))
490
491     def _run_in_parallel_and_update_scm(self, commands):
492         command_results = self._tool.executive.run_in_parallel(commands)
493         log_output = '\n'.join(result[2] for result in command_results).replace('\n\n', '\n')
494         for line in log_output.split('\n'):
495             if line:
496                 print >> sys.stderr, line  # FIXME: Figure out how to log properly.
497
498         files_to_add, files_to_delete, lines_to_remove = self._serial_commands(command_results)
499         if files_to_delete:
500             self._tool.scm().delete_list(files_to_delete)
501         if files_to_add:
502             self._tool.scm().add_list(files_to_add)
503         return lines_to_remove
504
505     def _rebaseline(self, options, test_prefix_list):
506         for test, builders_to_check in sorted(test_prefix_list.items()):
507             _log.info("Rebaselining %s" % test)
508             for builder, suffixes in sorted(builders_to_check.items()):
509                 _log.debug("  %s: %s" % (builder, ",".join(suffixes)))
510
511         copy_baseline_commands, rebaseline_commands, extra_lines_to_remove = self._rebaseline_commands(test_prefix_list, options)
512         lines_to_remove = {}
513
514         if copy_baseline_commands:
515             self._run_in_parallel_and_update_scm(copy_baseline_commands)
516         if rebaseline_commands:
517             lines_to_remove = self._run_in_parallel_and_update_scm(rebaseline_commands)
518
519         for test in extra_lines_to_remove:
520             if test in lines_to_remove:
521                 lines_to_remove[test] = lines_to_remove[test] + extra_lines_to_remove[test]
522             else:
523                 lines_to_remove[test] = extra_lines_to_remove[test]
524
525         if lines_to_remove:
526             self._update_expectations_files(lines_to_remove)
527
528         if options.optimize:
529             self._run_in_parallel_and_update_scm(self._optimize_baselines(test_prefix_list, options.verbose))
530
531     def _suffixes_for_actual_failures(self, test, builder_name, existing_suffixes):
532         actual_results = self.builder_data()[builder_name].actual_results(test)
533         if not actual_results:
534             return set()
535         return set(existing_suffixes) & TestExpectations.suffixes_for_actual_expectations_string(actual_results)
536
537
538 class RebaselineJson(AbstractParallelRebaselineCommand):
539     name = "rebaseline-json"
540     help_text = "Rebaseline based off JSON passed to stdin. Intended to only be called from other scripts."
541
542     def __init__(self,):
543         super(RebaselineJson, self).__init__(options=[
544             self.no_optimize_option,
545             self.results_directory_option,
546             ])
547
548     def execute(self, options, args, tool):
549         self._rebaseline(options, json.loads(sys.stdin.read()))
550
551
552 class RebaselineExpectations(AbstractParallelRebaselineCommand):
553     name = "rebaseline-expectations"
554     help_text = "Rebaselines the tests indicated in TestExpectations."
555     show_in_main_help = True
556
557     def __init__(self):
558         super(RebaselineExpectations, self).__init__(options=[
559             self.no_optimize_option,
560             ] + self.platform_options)
561         self._test_prefix_list = None
562
563     def _tests_to_rebaseline(self, port):
564         tests_to_rebaseline = {}
565         for path, value in port.expectations_dict().items():
566             expectations = TestExpectations(port, include_overrides=False, expectations_dict={path: value})
567             for test in expectations.get_rebaselining_failures():
568                 suffixes = TestExpectations.suffixes_for_expectations(expectations.get_expectations(test))
569                 tests_to_rebaseline[test] = suffixes or BASELINE_SUFFIX_LIST
570         return tests_to_rebaseline
571
572     def _add_tests_to_rebaseline_for_port(self, port_name):
573         builder_name = builders.builder_name_for_port_name(port_name)
574         if not builder_name:
575             return
576         tests = self._tests_to_rebaseline(self._tool.port_factory.get(port_name)).items()
577
578         if tests:
579             _log.info("Retrieving results for %s from %s." % (port_name, builder_name))
580
581         for test_name, suffixes in tests:
582             _log.info("    %s (%s)" % (test_name, ','.join(suffixes)))
583             if test_name not in self._test_prefix_list:
584                 self._test_prefix_list[test_name] = {}
585             self._test_prefix_list[test_name][builder_name] = suffixes
586
587     def execute(self, options, args, tool):
588         options.results_directory = None
589         self._test_prefix_list = {}
590         port_names = tool.port_factory.all_port_names(options.platform)
591         for port_name in port_names:
592             self._add_tests_to_rebaseline_for_port(port_name)
593         if not self._test_prefix_list:
594             _log.warning("Did not find any tests marked Rebaseline.")
595             return
596
597         self._rebaseline(options, self._test_prefix_list)
598
599
600 class Rebaseline(AbstractParallelRebaselineCommand):
601     name = "rebaseline"
602     help_text = "Rebaseline tests with results from the build bots. Shows the list of failing tests on the builders if no test names are provided."
603     show_in_main_help = True
604     argument_names = "[TEST_NAMES]"
605
606     def __init__(self):
607         super(Rebaseline, self).__init__(options=[
608             self.no_optimize_option,
609             # FIXME: should we support the platform options in addition to (or instead of) --builders?
610             self.suffixes_option,
611             self.results_directory_option,
612             optparse.make_option("--builders", default=None, action="append", help="Comma-separated-list of builders to pull new baselines from (can also be provided multiple times)"),
613             ])
614
615     def _builders_to_pull_from(self):
616         chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", self._release_builders(), can_choose_multiple=True)
617         return [self._builder_with_name(name) for name in chosen_names]
618
619     def _builder_with_name(self, name):
620         return self._tool.buildbot.builder_with_name(name)
621
622     def execute(self, options, args, tool):
623         if not args:
624             _log.error("Must list tests to rebaseline.")
625             return
626
627         if options.builders:
628             builders_to_check = []
629             for builder_names in options.builders:
630                 builders_to_check += [self._builder_with_name(name) for name in builder_names.split(",")]
631         else:
632             builders_to_check = self._builders_to_pull_from()
633
634         test_prefix_list = {}
635         suffixes_to_update = options.suffixes.split(",")
636
637         for builder in builders_to_check:
638             for test in args:
639                 if test not in test_prefix_list:
640                     test_prefix_list[test] = {}
641                 test_prefix_list[test][builder.name()] = suffixes_to_update
642
643         if options.verbose:
644             _log.debug("rebaseline-json: " + str(test_prefix_list))
645
646         self._rebaseline(options, test_prefix_list)
647
648
649 class AutoRebaseline(AbstractParallelRebaselineCommand):
650     name = "auto-rebaseline"
651     help_text = "Rebaselines any NeedsRebaseline lines in TestExpectations that have cycled through all the bots."
652     AUTO_REBASELINE_BRANCH_NAME = "auto-rebaseline-temporary-branch"
653
654     # Rietveld uploader stinks. Limit the number of rebaselines in a given patch to keep upload from failing.
655     # FIXME: http://crbug.com/263676 Obviously we should fix the uploader here.
656     MAX_LINES_TO_REBASELINE = 200
657
658     SECONDS_BEFORE_GIVING_UP = 300
659
660     def __init__(self):
661         super(AutoRebaseline, self).__init__(options=[
662             # FIXME: Remove this option.
663             self.no_optimize_option,
664             # FIXME: Remove this option.
665             self.results_directory_option,
666             ])
667
668     def bot_revision_data(self):
669         revisions = []
670         for result in self.builder_data().values():
671             if result.run_was_interrupted():
672                 _log.error("Can't rebaseline because the latest run on %s exited early." % result.builder_name())
673                 return []
674             revisions.append({
675                 "builder": result.builder_name(),
676                 "revision": result.blink_revision(),
677             })
678         return revisions
679
680     def tests_to_rebaseline(self, tool, min_revision, print_revisions):
681         port = tool.port_factory.get()
682         expectations_file_path = port.path_to_generic_test_expectations_file()
683
684         tests = set()
685         revision = None
686         author = None
687         bugs = set()
688         has_any_needs_rebaseline_lines = False
689
690         for line in tool.scm().blame(expectations_file_path).split("\n"):
691             comment_index = line.find("#")
692             if comment_index == -1:
693                 comment_index = len(line)
694             line_without_comments = re.sub(r"\s+", " ", line[:comment_index].strip())
695
696             if "NeedsRebaseline" not in line_without_comments:
697                 continue
698
699             has_any_needs_rebaseline_lines = True
700
701             parsed_line = re.match("^(\S*)[^(]*\((\S*).*?([^ ]*)\ \[[^[]*$", line_without_comments)
702
703             commit_hash = parsed_line.group(1)
704             svn_revision = tool.scm().svn_revision_from_git_commit(commit_hash)
705
706             test = parsed_line.group(3)
707             if print_revisions:
708                 _log.info("%s is waiting for r%s" % (test, svn_revision))
709
710             if not svn_revision or svn_revision > min_revision:
711                 continue
712
713             if revision and svn_revision != revision:
714                 continue
715
716             if not revision:
717                 revision = svn_revision
718                 author = parsed_line.group(2)
719
720             bugs.update(re.findall("crbug\.com\/(\d+)", line_without_comments))
721             tests.add(test)
722
723             if len(tests) >= self.MAX_LINES_TO_REBASELINE:
724                 _log.info("Too many tests to rebaseline in one patch. Doing the first %d." % self.MAX_LINES_TO_REBASELINE)
725                 break
726
727         return tests, revision, author, bugs, has_any_needs_rebaseline_lines
728
729     def link_to_patch(self, revision):
730         return "http://src.chromium.org/viewvc/blink?view=revision&revision=" + str(revision)
731
732     def commit_message(self, author, revision, bugs):
733         bug_string = ""
734         if bugs:
735             bug_string = "BUG=%s\n" % ",".join(bugs)
736
737         return """Auto-rebaseline for r%s
738
739 %s
740
741 %sTBR=%s
742 """ % (revision, self.link_to_patch(revision), bug_string, author)
743
744     def get_test_prefix_list(self, tests):
745         test_prefix_list = {}
746         lines_to_remove = {}
747
748         for builder_name in self._release_builders():
749             port_name = builders.port_name_for_builder_name(builder_name)
750             port = self._tool.port_factory.get(port_name)
751             expectations = TestExpectations(port, include_overrides=True)
752             for test in expectations.get_needs_rebaseline_failures():
753                 if test not in tests:
754                     continue
755
756                 if test not in test_prefix_list:
757                     lines_to_remove[test] = []
758                     test_prefix_list[test] = {}
759                 lines_to_remove[test].append(builder_name)
760                 test_prefix_list[test][builder_name] = BASELINE_SUFFIX_LIST
761
762         return test_prefix_list, lines_to_remove
763
764     def _run_git_cl_command(self, options, command):
765         subprocess_command = ['git', 'cl'] + command
766         if options.verbose:
767             subprocess_command.append('--verbose')
768
769         process = self._tool.executive.popen(subprocess_command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
770         last_output_time = time.time()
771
772         # git cl sometimes completely hangs. Bail if we haven't gotten any output to stdout/stderr in a while.
773         while process.poll() == None and time.time() < last_output_time + self.SECONDS_BEFORE_GIVING_UP:
774             # FIXME: This doesn't make any sense. readline blocks, so all this code to
775             # try and bail is useless. Instead, we should do the readline calls on a
776             # subthread. Then the rest of this code would make sense.
777             out = process.stdout.readline().rstrip('\n')
778             if out:
779                 last_output_time = time.time()
780                 _log.info(out)
781
782         if process.poll() == None:
783             _log.error('Command hung: %s' % subprocess_command)
784             return False
785         return True
786
787     # FIXME: Move this somewhere more general.
788     def tree_status(self):
789         blink_tree_status_url = "http://blink-status.appspot.com/status"
790         status = urllib2.urlopen(blink_tree_status_url).read().lower()
791         if status.find('closed') != -1 or status == "0":
792             return 'closed'
793         elif status.find('open') != -1 or status == "1":
794             return 'open'
795         return 'unknown'
796
797     def execute(self, options, args, tool):
798         if tool.scm().executable_name == "svn":
799             _log.error("Auto rebaseline only works with a git checkout.")
800             return
801
802         if tool.scm().has_working_directory_changes():
803             _log.error("Cannot proceed with working directory changes. Clean working directory first.")
804             return
805
806         revision_data = self.bot_revision_data()
807         if not revision_data:
808             return
809
810         min_revision = int(min([item["revision"] for item in revision_data]))
811         tests, revision, author, bugs, has_any_needs_rebaseline_lines = self.tests_to_rebaseline(tool, min_revision, print_revisions=options.verbose)
812
813         if options.verbose:
814             _log.info("Min revision across all bots is %s." % min_revision)
815             for item in revision_data:
816                 _log.info("%s: r%s" % (item["builder"], item["revision"]))
817
818         if not tests:
819             _log.debug('No tests to rebaseline.')
820             return
821
822         if self.tree_status() == 'closed':
823             _log.info('Cannot proceed. Tree is closed.')
824             return
825
826         _log.info('Rebaselining %s for r%s by %s.' % (list(tests), revision, author))
827
828         test_prefix_list, lines_to_remove = self.get_test_prefix_list(tests)
829
830         did_finish = False
831         try:
832             old_branch_name = tool.scm().current_branch()
833             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
834             tool.scm().create_clean_branch(self.AUTO_REBASELINE_BRANCH_NAME)
835
836             # If the tests are passing everywhere, then this list will be empty. We don't need
837             # to rebaseline, but we'll still need to update TestExpectations.
838             if test_prefix_list:
839                 self._rebaseline(options, test_prefix_list)
840
841             tool.scm().commit_locally_with_message(self.commit_message(author, revision, bugs))
842
843             # FIXME: It would be nice if we could dcommit the patch without uploading, but still
844             # go through all the precommit hooks. For rebaselines with lots of files, uploading
845             # takes a long time and sometimes fails, but we don't want to commit if, e.g. the
846             # tree is closed.
847             did_finish = self._run_git_cl_command(options, ['upload', '-f'])
848
849             if did_finish:
850                 # Uploading can take a very long time. Do another pull to make sure TestExpectations is up to date,
851                 # so the dcommit can go through.
852                 # FIXME: Log the pull and dcommit stdout/stderr to the log-server.
853                 tool.executive.run_command(['git', 'pull'])
854
855                 self._run_git_cl_command(options, ['dcommit', '-f'])
856         except Exception as e:
857             _log.error(e)
858         finally:
859             if did_finish:
860                 self._run_git_cl_command(options, ['set_close'])
861             tool.scm().ensure_cleanly_tracking_remote_master()
862             tool.scm().checkout_branch(old_branch_name)
863             tool.scm().delete_branch(self.AUTO_REBASELINE_BRANCH_NAME)
864
865
866 class RebaselineOMatic(AbstractDeclarativeCommand):
867     name = "rebaseline-o-matic"
868     help_text = "Calls webkit-patch auto-rebaseline in a loop."
869     show_in_main_help = True
870
871     SLEEP_TIME_IN_SECONDS = 30
872     LOG_SERVER = 'blinkrebaseline.appspot.com'
873     QUIT_LOG = '##QUIT##'
874
875     # Uploaded log entries append to the existing entry unless the
876     # newentry flag is set. In that case it starts a new entry to
877     # start appending to.
878     def _log_to_server(self, log='', is_new_entry=False):
879         query = {
880             'log': log,
881         }
882         if is_new_entry:
883             query['newentry'] = 'on'
884         try:
885             urllib2.urlopen("http://" + self.LOG_SERVER + "/updatelog", data=urllib.urlencode(query))
886         except:
887             traceback.print_exc(file=sys.stderr)
888
889     def _log_to_server_thread(self):
890         is_new_entry = True
891         while True:
892             messages = [self._log_queue.get()]
893             while not self._log_queue.empty():
894                 messages.append(self._log_queue.get())
895             self._log_to_server('\n'.join(messages), is_new_entry=is_new_entry)
896             is_new_entry = False
897             if self.QUIT_LOG in messages:
898                 return
899
900     def _post_log_to_server(self, log):
901         self._log_queue.put(log)
902
903     def _log_line(self, handle):
904         out = handle.readline().rstrip('\n')
905         if out:
906             if self._verbose:
907                 print out
908             self._post_log_to_server(out)
909         return out
910
911     def _run_logged_command(self, command):
912         process = self._tool.executive.popen(command, stdout=self._tool.executive.PIPE, stderr=self._tool.executive.STDOUT)
913
914         out = self._log_line(process.stdout)
915         while out:
916             # FIXME: This should probably batch up lines if they're available and log to the server once.
917             out = self._log_line(process.stdout)
918
919     def _do_one_rebaseline(self):
920         self._log_queue = Queue.Queue(256)
921         log_thread = threading.Thread(name='LogToServer', target=self._log_to_server_thread)
922         log_thread.start()
923         try:
924             old_branch_name = self._tool.scm().current_branch()
925             self._run_logged_command(['git', 'pull'])
926             rebaseline_command = [self._tool.filesystem.join(self._tool.scm().checkout_root, 'Tools', 'Scripts', 'webkit-patch'), 'auto-rebaseline']
927             if self._verbose:
928                 rebaseline_command.append('--verbose')
929             self._run_logged_command(rebaseline_command)
930         except:
931             self._log_queue.put(self.QUIT_LOG)
932             traceback.print_exc(file=sys.stderr)
933             # Sometimes git crashes and leaves us on a detached head.
934             self._tool.scm().checkout_branch(old_branch_name)
935         else:
936             self._log_queue.put(self.QUIT_LOG)
937         log_thread.join()
938
939     def execute(self, options, args, tool):
940         self._verbose = options.verbose
941         while True:
942             self._do_one_rebaseline()
943             time.sleep(self.SLEEP_TIME_IN_SECONDS)