2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
57 import post_perf_builder_job
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
82 "src" : "src/third_party/WebKit",
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
94 "from" : ['chromium'],
96 'deps_var': 'angle_revision'
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
117 "src" : "src/third_party/skia/src",
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
126 "src" : "src/third_party/skia/include",
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
135 "src" : "src/third_party/skia/gyp",
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
162 MAX_MAC_BUILD_TIME = 14400
163 MAX_WIN_BUILD_TIME = 14400
164 MAX_LINUX_BUILD_TIME = 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN = 'mean'
183 BISECT_MODE_STD_DEV = 'std_dev'
184 BISECT_MODE_RETURN_CODE = 'return_code'
187 def _AddAdditionalDepotInfo(depot_info):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
191 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
193 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
196 def CalculateTruncatedMean(data_set, truncate_percent):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
209 The truncated mean as a float.
212 TypeError: The data set was empty after discarding values.
214 if len(data_set) > 2:
215 data_set = sorted(data_set)
217 discard_num_float = len(data_set) * truncate_percent
218 discard_num_int = int(math.floor(discard_num_float))
219 kept_weight = len(data_set) - discard_num_float * 2
221 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
223 weight_left = 1.0 - (discard_num_float - discard_num_int)
226 # If the % to discard leaves a fractional portion, need to weight those
228 unweighted_vals = data_set[1:len(data_set)-1]
229 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230 weighted_vals = [w * weight_left for w in weighted_vals]
231 data_set = weighted_vals + unweighted_vals
233 kept_weight = len(data_set)
235 truncated_mean = reduce(lambda x, y: float(x) + float(y),
236 data_set) / kept_weight
238 return truncated_mean
241 def CalculateMean(values):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values, 0.0)
246 def CalculateConfidence(good_results_lists, bad_results_lists):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good = map(CalculateMean, good_results_lists)
268 means_bad = map(CalculateMean, bad_results_lists)
269 bounds_good = (min(means_good), max(means_good))
270 bounds_bad = (min(means_bad), max(means_bad))
271 dist_between_groups = min(
272 math.fabs(bounds_bad[1] - bounds_good[0]),
273 math.fabs(bounds_bad[0] - bounds_good[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened = sum(good_results_lists, [])
277 bad_results_flattened = sum(bad_results_lists, [])
278 stddev_good = CalculateStandardDeviation(good_results_flattened)
279 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280 stddev_sum = stddev_good + stddev_bad
282 confidence = dist_between_groups / (max(0.0001, stddev_sum))
283 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
287 def CalculateStandardDeviation(values):
288 """Calculates the sample standard deviation of the given list of values."""
292 mean = CalculateMean(values)
293 differences_from_mean = [float(x) - mean for x in values]
294 squared_differences = [float(x * x) for x in differences_from_mean]
295 variance = sum(squared_differences) / (len(values) - 1)
296 std_dev = math.sqrt(variance)
301 def CalculateRelativeChange(before, after):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
314 A non-negative floating point number; 0.1 represents a 10% change.
320 difference = after - before
321 return math.fabs(difference / before)
324 def CalculatePooledStandardError(work_sets):
329 for current_set in work_sets:
330 std_dev = CalculateStandardDeviation(current_set)
331 numerator += (len(current_set) - 1) * std_dev ** 2
332 denominator1 += len(current_set) - 1
333 denominator2 += 1.0 / len(current_set)
336 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
340 def CalculateStandardError(values):
341 """Calculates the standard error of a list of values."""
345 std_dev = CalculateStandardDeviation(values)
347 return std_dev / math.sqrt(len(values))
350 def IsStringFloat(string_to_check):
351 """Checks whether or not the given string can be converted to a floating
355 string_to_check: Input string to check if it can be converted to a float.
358 True if the string can be converted to a float.
361 float(string_to_check)
368 def IsStringInt(string_to_check):
369 """Checks whether or not the given string can be converted to a integer.
372 string_to_check: Input string to check if it can be converted to an int.
375 True if the string can be converted to an int.
386 """Checks whether or not the script is running on Windows.
389 True if running on Windows.
391 return sys.platform == 'cygwin' or sys.platform.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
398 True if Windows is 64-bit, False if 32-bit.
400 platform = os.environ['PROCESSOR_ARCHITECTURE']
402 platform = os.environ['PROCESSOR_ARCHITEW6432']
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
407 return platform in ['AMD64', 'I64']
411 """Checks whether or not the script is running on Linux.
414 True if running on Linux.
416 return sys.platform.startswith('linux')
420 """Checks whether or not the script is running on Mac.
423 True if running on Mac.
425 return sys.platform.startswith('darwin')
428 def GetSHA1HexDigest(contents):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib.sha1(contents).hexdigest()
433 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434 """Gets the archive file name for the given revision."""
436 """Return a string to be used in paths for the platform."""
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch == 'x64':
447 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
449 base_name = 'full-build-%s' % PlatformName()
450 if not build_revision:
453 build_revision = '%s_%s' % (build_revision , patch_sha)
454 return '%s_%s.zip' % (base_name, build_revision)
457 def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
458 """Compute the url to download the build from."""
459 def GetGSRootFolderName():
460 """Gets Google Cloud Storage root folder names"""
462 if Is64BitWindows() and target_arch == 'x64':
463 return 'Win x64 Builder'
466 return 'Linux Builder'
469 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
471 base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
472 builder_folder = GetGSRootFolderName()
473 return '%s/%s' % (builder_folder, base_filename)
476 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
477 """Fetches file(s) from the Google Cloud Storage.
480 bucket_name: Google Storage bucket name.
481 source_path: Source file path.
482 destination_path: Destination file path.
485 True if the fetching succeeds, otherwise False.
487 target_file = os.path.join(destination_path, os.path.basename(source_path))
489 if cloud_storage.Exists(bucket_name, source_path):
490 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
491 cloud_storage.Get(bucket_name, source_path, destination_path)
492 if os.path.exists(target_file):
495 print ('File gs://%s/%s not found in cloud storage.' % (
496 bucket_name, source_path))
497 except Exception as e:
498 print 'Something went wrong while fetching file from cloud: %s' % e
499 if os.path.exists(target_file):
500 os.remove(target_file)
504 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505 def MaybeMakeDirectory(*path):
506 """Creates an entire path, if it doesn't already exist."""
507 file_path = os.path.join(*path)
509 os.makedirs(file_path)
511 if e.errno != errno.EEXIST:
516 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517 def ExtractZip(filename, output_dir, verbose=True):
518 """ Extract the zip archive in the output directory."""
519 MaybeMakeDirectory(output_dir)
521 # On Linux and Mac, we use the unzip command as it will
522 # handle links and file bits (executable), which is much
523 # easier then trying to do that with ZipInfo options.
525 # On Windows, try to use 7z if it is installed, otherwise fall back to python
526 # zip module and pray we don't have files larger than 512MB to unzip.
528 if IsMac() or IsLinux():
529 unzip_cmd = ['unzip', '-o']
530 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
531 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
534 # Make sure path is absolute before changing directories.
535 filepath = os.path.abspath(filename)
536 saved_dir = os.getcwd()
538 command = unzip_cmd + [filepath]
539 result = RunProcess(command)
542 raise IOError('unzip failed: %s => %s' % (str(command), result))
545 zf = zipfile.ZipFile(filename)
546 for name in zf.namelist():
548 print 'Extracting %s' % name
549 zf.extract(name, output_dir)
552 def RunProcess(command):
553 """Runs an arbitrary command.
555 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
558 command: A list containing the command and args to execute.
561 The return code of the call.
563 # On Windows, use shell=True to get PATH interpretation.
565 return subprocess.call(command, shell=shell)
568 def RunProcessAndRetrieveOutput(command, cwd=None):
569 """Runs an arbitrary command, returning its output and return code.
571 Since output is collected via communicate(), there will be no output until
572 the call terminates. If you need output while the program runs (ie. so
573 that the buildbot doesn't terminate the script), consider RunProcess().
576 command: A list containing the command and args to execute.
577 cwd: A directory to change to while running the command. The command can be
578 relative to this directory. If this is None, the command will be run in
579 the current directory.
582 A tuple of the output and return code.
585 original_cwd = os.getcwd()
588 # On Windows, use shell=True to get PATH interpretation.
590 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
591 (output, _) = proc.communicate()
594 os.chdir(original_cwd)
596 return (output, proc.returncode)
599 def RunGit(command, cwd=None):
600 """Run a git subcommand, returning its output and return code.
603 command: A list containing the args to git.
604 cwd: A directory to change to while running the git command (optional).
607 A tuple of the output and return code.
609 command = ['git'] + command
611 return RunProcessAndRetrieveOutput(command, cwd=cwd)
614 def CheckRunGit(command, cwd=None):
615 """Run a git subcommand, returning its output and return code. Asserts if
616 the return code of the call is non-zero.
619 command: A list containing the args to git.
622 A tuple of the output and return code.
624 (output, return_code) = RunGit(command, cwd=cwd)
626 assert not return_code, 'An error occurred while running'\
627 ' "git %s"' % ' '.join(command)
631 def SetBuildSystemDefault(build_system):
632 """Sets up any environment variables needed to build with the specified build
636 build_system: A string specifying build system. Currently only 'ninja' or
637 'make' are supported."""
638 if build_system == 'ninja':
639 gyp_var = os.getenv('GYP_GENERATORS')
641 if not gyp_var or not 'ninja' in gyp_var:
643 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
645 os.environ['GYP_GENERATORS'] = 'ninja'
648 os.environ['GYP_DEFINES'] = 'component=shared_library '\
649 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
651 elif build_system == 'make':
652 os.environ['GYP_GENERATORS'] = 'make'
654 raise RuntimeError('%s build not supported.' % build_system)
657 def BuildWithMake(threads, targets):
658 cmd = ['make', 'BUILDTYPE=Release']
661 cmd.append('-j%d' % threads)
665 return_code = RunProcess(cmd)
667 return not return_code
670 def BuildWithNinja(threads, targets):
671 cmd = ['ninja', '-C', os.path.join('out', 'Release')]
674 cmd.append('-j%d' % threads)
678 return_code = RunProcess(cmd)
680 return not return_code
683 def BuildWithVisualStudio(targets):
684 path_to_devenv = os.path.abspath(
685 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
686 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
687 cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
690 cmd.extend(['/Project', t])
692 return_code = RunProcess(cmd)
694 return not return_code
697 def WriteStringToFile(text, file_name):
699 with open(file_name, "wb") as f:
702 raise RuntimeError('Error writing to file [%s]' % file_name )
705 def ReadStringFromFile(file_name):
707 with open(file_name) as f:
710 raise RuntimeError('Error reading file [%s]' % file_name )
713 def ChangeBackslashToSlashInPatch(diff_text):
714 """Formats file paths in the given text to unix-style paths."""
716 diff_lines = diff_text.split('\n')
717 for i in range(len(diff_lines)):
718 if (diff_lines[i].startswith('--- ') or
719 diff_lines[i].startswith('+++ ')):
720 diff_lines[i] = diff_lines[i].replace('\\', '/')
721 return '\n'.join(diff_lines)
725 class Builder(object):
726 """Builder is used by the bisect script to build relevant targets and deploy.
728 def __init__(self, opts):
729 """Performs setup for building with target build system.
732 opts: Options parsed from command line.
735 if not opts.build_preference:
736 opts.build_preference = 'msvs'
738 if opts.build_preference == 'msvs':
739 if not os.getenv('VS100COMNTOOLS'):
741 'Path to visual studio could not be determined.')
743 SetBuildSystemDefault(opts.build_preference)
745 if not opts.build_preference:
746 if 'ninja' in os.getenv('GYP_GENERATORS'):
747 opts.build_preference = 'ninja'
749 opts.build_preference = 'make'
751 SetBuildSystemDefault(opts.build_preference)
753 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
754 raise RuntimeError('Failed to set platform environment.')
759 if opts.target_platform == 'cros':
760 builder = CrosBuilder(opts)
761 elif opts.target_platform == 'android':
762 builder = AndroidBuilder(opts)
763 elif opts.target_platform == 'android-chrome':
764 builder = AndroidChromeBuilder(opts)
766 builder = DesktopBuilder(opts)
769 def Build(self, depot, opts):
770 raise NotImplementedError()
772 def GetBuildOutputDirectory(self, opts, src_dir=None):
773 raise NotImplementedError()
776 class DesktopBuilder(Builder):
777 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
778 def __init__(self, opts):
779 super(DesktopBuilder, self).__init__(opts)
781 def Build(self, depot, opts):
782 """Builds chromium_builder_perf target using options passed into
786 depot: Current depot being bisected.
787 opts: The options parsed from the command line.
790 True if build was successful.
792 targets = ['chromium_builder_perf']
798 build_success = False
799 if opts.build_preference == 'make':
800 build_success = BuildWithMake(threads, targets)
801 elif opts.build_preference == 'ninja':
802 build_success = BuildWithNinja(threads, targets)
803 elif opts.build_preference == 'msvs':
804 assert IsWindows(), 'msvs is only supported on Windows.'
805 build_success = BuildWithVisualStudio(targets)
807 assert False, 'No build system defined.'
810 def GetBuildOutputDirectory(self, opts, src_dir=None):
811 """Returns the path to the build directory, relative to the checkout root.
813 Assumes that the current working directory is the checkout root.
815 src_dir = src_dir or 'src'
816 if opts.build_preference == 'ninja' or IsLinux():
817 return os.path.join(src_dir, 'out')
819 return os.path.join(src_dir, 'xcodebuild')
821 return os.path.join(src_dir, 'build')
822 raise NotImplementedError('Unexpected platform %s' % sys.platform)
825 class AndroidBuilder(Builder):
826 """AndroidBuilder is used to build on android."""
827 def __init__(self, opts):
828 super(AndroidBuilder, self).__init__(opts)
830 def _GetTargets(self):
831 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
833 def Build(self, depot, opts):
834 """Builds the android content shell and other necessary tools using options
835 passed into the script.
838 depot: Current depot being bisected.
839 opts: The options parsed from the command line.
842 True if build was successful.
848 build_success = False
849 if opts.build_preference == 'ninja':
850 build_success = BuildWithNinja(threads, self._GetTargets())
852 assert False, 'No build system defined.'
857 class AndroidChromeBuilder(AndroidBuilder):
858 """AndroidBuilder is used to build on android's chrome."""
859 def __init__(self, opts):
860 super(AndroidChromeBuilder, self).__init__(opts)
862 def _GetTargets(self):
863 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
866 class CrosBuilder(Builder):
867 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
869 def __init__(self, opts):
870 super(CrosBuilder, self).__init__(opts)
872 def ImageToTarget(self, opts):
873 """Installs latest image to target specified by opts.cros_remote_ip.
876 opts: Program options containing cros_board and cros_remote_ip.
882 # Keys will most likely be set to 0640 after wiping the chroot.
883 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
884 os.chmod(CROS_TEST_KEY_PATH, 0600)
885 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
886 '--remote=%s' % opts.cros_remote_ip,
887 '--board=%s' % opts.cros_board, '--test', '--verbose']
889 return_code = RunProcess(cmd)
890 return not return_code
894 def BuildPackages(self, opts, depot):
895 """Builds packages for cros.
898 opts: Program options containing cros_board.
899 depot: The depot being bisected.
904 cmd = [CROS_SDK_PATH]
907 path_to_chrome = os.path.join(os.getcwd(), '..')
908 cmd += ['--chrome_root=%s' % path_to_chrome]
913 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
915 cmd += ['BUILDTYPE=Release', './build_packages',
916 '--board=%s' % opts.cros_board]
917 return_code = RunProcess(cmd)
919 return not return_code
921 def BuildImage(self, opts, depot):
922 """Builds test image for cros.
925 opts: Program options containing cros_board.
926 depot: The depot being bisected.
931 cmd = [CROS_SDK_PATH]
934 path_to_chrome = os.path.join(os.getcwd(), '..')
935 cmd += ['--chrome_root=%s' % path_to_chrome]
940 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
942 cmd += ['BUILDTYPE=Release', '--', './build_image',
943 '--board=%s' % opts.cros_board, 'test']
945 return_code = RunProcess(cmd)
947 return not return_code
949 def Build(self, depot, opts):
950 """Builds targets using options passed into the script.
953 depot: Current depot being bisected.
954 opts: The options parsed from the command line.
957 True if build was successful.
959 if self.BuildPackages(opts, depot):
960 if self.BuildImage(opts, depot):
961 return self.ImageToTarget(opts)
965 class SourceControl(object):
966 """SourceControl is an abstraction over the underlying source control
967 system used for chromium. For now only git is supported, but in the
968 future, the svn workflow could be added as well."""
970 super(SourceControl, self).__init__()
972 def SyncToRevisionWithGClient(self, revision):
973 """Uses gclient to sync to the specified revision.
975 ie. gclient sync --revision <revision>
978 revision: The git SHA1 or svn CL (depending on workflow).
981 The return code of the call.
983 return bisect_utils.RunGClient(['sync', '--revision',
984 revision, '--verbose', '--nohooks', '--reset', '--force'])
986 def SyncToRevisionWithRepo(self, timestamp):
987 """Uses repo to sync all the underlying git depots to the specified
991 timestamp: The unix timestamp to sync to.
994 The return code of the call.
996 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
999 class GitSourceControl(SourceControl):
1000 """GitSourceControl is used to query the underlying source control. """
1001 def __init__(self, opts):
1002 super(GitSourceControl, self).__init__()
1008 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1009 """Retrieves a list of revisions between |revision_range_start| and
1010 |revision_range_end|.
1013 revision_range_end: The SHA1 for the end of the range.
1014 revision_range_start: The SHA1 for the beginning of the range.
1017 A list of the revisions between |revision_range_start| and
1018 |revision_range_end| (inclusive).
1020 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1021 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1022 log_output = CheckRunGit(cmd, cwd=cwd)
1024 revision_hash_list = log_output.split()
1025 revision_hash_list.append(revision_range_start)
1027 return revision_hash_list
1029 def SyncToRevision(self, revision, sync_client=None):
1030 """Syncs to the specified revision.
1033 revision: The revision to sync to.
1034 use_gclient: Specifies whether or not we should sync using gclient or
1035 just use source control directly.
1042 results = RunGit(['checkout', revision])[1]
1043 elif sync_client == 'gclient':
1044 results = self.SyncToRevisionWithGClient(revision)
1045 elif sync_client == 'repo':
1046 results = self.SyncToRevisionWithRepo(revision)
1050 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1051 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1054 revision_to_check: The user supplied revision string that may need to be
1055 resolved to a git SHA1.
1056 depot: The depot the revision_to_check is from.
1057 search: The number of changelists to try if the first fails to resolve
1058 to a git hash. If the value is negative, the function will search
1059 backwards chronologically, otherwise it will search forward.
1062 A string containing a git SHA1 hash, otherwise None.
1064 # Android-chrome is git only, so no need to resolve this to anything else.
1065 if depot == 'android-chrome':
1066 return revision_to_check
1069 if not IsStringInt(revision_to_check):
1070 return revision_to_check
1072 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1074 if depot != 'chromium':
1075 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1077 svn_revision = int(revision_to_check)
1081 search_range = xrange(svn_revision, svn_revision + search, 1)
1083 search_range = xrange(svn_revision, svn_revision + search, -1)
1085 for i in search_range:
1086 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1087 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1090 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1092 assert not return_code, 'An error occurred while running'\
1093 ' "git %s"' % ' '.join(cmd)
1096 log_output = log_output.strip()
1099 git_revision = log_output
1105 if IsStringInt(revision_to_check):
1106 return int(revision_to_check)
1109 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1110 'chromiumos-overlay'))
1111 pattern = CROS_VERSION_PATTERN % revision_to_check
1112 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1116 log_output = CheckRunGit(cmd, cwd=cwd)
1118 git_revision = log_output
1119 git_revision = int(log_output.strip())
1124 def IsInProperBranch(self):
1125 """Confirms they're in the master branch for performing the bisection.
1126 This is needed or gclient will fail to sync properly.
1129 True if the current branch on src is 'master'
1131 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1132 log_output = CheckRunGit(cmd)
1133 log_output = log_output.strip()
1135 return log_output == "master"
1137 def SVNFindRev(self, revision, cwd=None):
1138 """Maps directly to the 'git svn find-rev' command.
1141 revision: The git SHA1 to use.
1144 An integer changelist #, otherwise None.
1147 cmd = ['svn', 'find-rev', revision]
1149 output = CheckRunGit(cmd, cwd)
1150 svn_revision = output.strip()
1152 if IsStringInt(svn_revision):
1153 return int(svn_revision)
1157 def QueryRevisionInfo(self, revision, cwd=None):
1158 """Gathers information on a particular revision, such as author's name,
1159 email, subject, and date.
1162 revision: Revision you want to gather information on.
1164 A dict in the following format:
1175 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1176 targets = ['author', 'email', 'subject', 'date', 'body']
1178 for i in xrange(len(formats)):
1179 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1180 output = CheckRunGit(cmd, cwd=cwd)
1181 commit_info[targets[i]] = output.rstrip()
1185 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1186 """Performs a checkout on a file at the given revision.
1191 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1193 def RevertFileToHead(self, file_name):
1194 """Unstages a file and returns it to HEAD.
1199 # Reset doesn't seem to return 0 on success.
1200 RunGit(['reset', 'HEAD', file_name])
1202 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1204 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1205 """Returns a list of commits that modified this file.
1208 filename: Name of file.
1209 revision_start: Start of revision range.
1210 revision_end: End of revision range.
1213 Returns a list of commits that touched this file.
1215 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1217 output = CheckRunGit(cmd)
1219 return [o for o in output.split('\n') if o]
1222 class BisectPerformanceMetrics(object):
1223 """This class contains functionality to perform a bisection of a range of
1224 revisions to narrow down where performance regressions may have occurred.
1226 The main entry-point is the Run method.
1229 def __init__(self, source_control, opts):
1230 super(BisectPerformanceMetrics, self).__init__()
1233 self.source_control = source_control
1234 self.src_cwd = os.getcwd()
1235 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1237 self.cleanup_commands = []
1239 self.builder = Builder.FromOpts(opts)
1241 # This always starts true since the script grabs latest first.
1242 self.was_blink = True
1244 for d in DEPOT_NAMES:
1245 # The working directory of each depot is just the path to the depot, but
1246 # since we're already in 'src', we can skip that part.
1248 self.depot_cwd[d] = os.path.join(
1249 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1251 def PerformCleanup(self):
1252 """Performs cleanup when script is finished."""
1253 os.chdir(self.src_cwd)
1254 for c in self.cleanup_commands:
1256 shutil.move(c[1], c[2])
1258 assert False, 'Invalid cleanup command.'
1260 def GetRevisionList(self, depot, bad_revision, good_revision):
1261 """Retrieves a list of all the commits between the bad revision and
1262 last known good revision."""
1264 revision_work_list = []
1267 revision_range_start = good_revision
1268 revision_range_end = bad_revision
1271 self.ChangeToDepotWorkingDirectory('cros')
1273 # Print the commit timestamps for every commit in the revision time
1274 # range. We'll sort them and bisect by that. There is a remote chance that
1275 # 2 (or more) commits will share the exact same timestamp, but it's
1276 # probably safe to ignore that case.
1277 cmd = ['repo', 'forall', '-c',
1278 'git log --format=%%ct --before=%d --after=%d' % (
1279 revision_range_end, revision_range_start)]
1280 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1282 assert not return_code, 'An error occurred while running'\
1283 ' "%s"' % ' '.join(cmd)
1287 revision_work_list = list(set(
1288 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1289 revision_work_list = sorted(revision_work_list, reverse=True)
1291 cwd = self._GetDepotDirectory(depot)
1292 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1293 good_revision, cwd=cwd)
1295 return revision_work_list
1297 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1298 svn_revision = self.source_control.SVNFindRev(revision)
1300 if IsStringInt(svn_revision):
1301 # V8 is tricky to bisect, in that there are only a few instances when
1302 # we can dive into bleeding_edge and get back a meaningful result.
1303 # Try to detect a V8 "business as usual" case, which is when:
1304 # 1. trunk revision N has description "Version X.Y.Z"
1305 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1306 # trunk. Now working on X.Y.(Z+1)."
1308 # As of 01/24/2014, V8 trunk descriptions are formatted:
1309 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1310 # So we can just try parsing that out first and fall back to the old way.
1311 v8_dir = self._GetDepotDirectory('v8')
1312 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1314 revision_info = self.source_control.QueryRevisionInfo(revision,
1317 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1319 regex_results = version_re.search(revision_info['subject'])
1324 # Look for "based on bleeding_edge" and parse out revision
1325 if 'based on bleeding_edge' in revision_info['subject']:
1327 bleeding_edge_revision = revision_info['subject'].split(
1328 'bleeding_edge revision r')[1]
1329 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1330 git_revision = self.source_control.ResolveToRevision(
1331 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1332 cwd=v8_bleeding_edge_dir)
1334 except (IndexError, ValueError):
1337 if not git_revision:
1338 # Wasn't successful, try the old way of looking for "Prepare push to"
1339 git_revision = self.source_control.ResolveToRevision(
1340 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1341 cwd=v8_bleeding_edge_dir)
1344 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1345 cwd=v8_bleeding_edge_dir)
1347 if 'Prepare push to trunk' in revision_info['subject']:
1351 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1352 cwd = self._GetDepotDirectory('v8')
1353 cmd = ['log', '--format=%ct', '-1', revision]
1354 output = CheckRunGit(cmd, cwd=cwd)
1355 commit_time = int(output)
1359 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1361 output = CheckRunGit(cmd, cwd=cwd)
1362 output = output.split()
1364 commits = reversed(commits)
1366 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1368 output = CheckRunGit(cmd, cwd=cwd)
1369 output = output.split()
1372 bleeding_edge_revision = None
1375 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1376 if bleeding_edge_revision:
1379 return bleeding_edge_revision
1381 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1382 """Manually parses the vars section of the DEPS file to determine
1383 chromium/blink/etc... revisions.
1386 A dict in the format {depot:revision} if successful, otherwise None.
1388 # We'll parse the "vars" section of the DEPS file.
1389 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1390 re_results = rxp.search(deps_file_contents)
1396 # We should be left with a series of entries in the vars component of
1397 # the DEPS file with the following format:
1398 # 'depot_name': 'revision',
1399 vars_body = re_results.group('vars_body')
1400 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1402 re_results = rxp.findall(vars_body)
1404 return dict(re_results)
1406 def _ParseRevisionsFromDEPSFile(self, depot):
1407 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1408 be needed if the bisect recurses into those depots later.
1411 depot: Depot being bisected.
1414 A dict in the format {depot:revision} if successful, otherwise None.
1417 locals = {'Var': lambda _: locals["vars"][_],
1418 'From': lambda *args: None}
1419 execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
1420 locals = locals['deps']
1423 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1425 for d in DEPOT_NAMES:
1426 if DEPOT_DEPS_NAME[d].has_key('platform'):
1427 if DEPOT_DEPS_NAME[d]['platform'] != os.name:
1430 if (DEPOT_DEPS_NAME[d]['recurse'] and
1431 depot in DEPOT_DEPS_NAME[d]['from']):
1432 if (locals.has_key(DEPOT_DEPS_NAME[d]['src']) or
1433 locals.has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1434 if locals.has_key(DEPOT_DEPS_NAME[d]['src']):
1435 re_results = rxp.search(locals[DEPOT_DEPS_NAME[d]['src']])
1436 self.depot_cwd[d] = \
1437 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1438 elif (DEPOT_DEPS_NAME[d].has_key('src_old') and
1439 locals.has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1441 rxp.search(locals[DEPOT_DEPS_NAME[d]['src_old']])
1442 self.depot_cwd[d] = \
1443 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src_old'][4:])
1446 results[d] = re_results.group('revision')
1448 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1450 if not warningText in self.warnings:
1451 self.warnings.append(warningText)
1453 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1458 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1459 parse_results = self._ParseRevisionsFromDEPSFileManually(
1462 for depot_name, depot_revision in parse_results.iteritems():
1463 depot_revision = depot_revision.strip('@')
1464 print depot_name, depot_revision
1465 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1466 if (current_data.has_key('deps_var') and
1467 current_data['deps_var'] == depot_name):
1468 src_name = current_name
1469 results[src_name] = depot_revision
1473 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1474 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1477 A dict in the format {depot:revision} if successful, otherwise None.
1480 self.ChangeToDepotWorkingDirectory(depot)
1484 if depot == 'chromium' or depot == 'android-chrome':
1485 results = self._ParseRevisionsFromDEPSFile(depot)
1487 elif depot == 'cros':
1488 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1489 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1490 CROS_CHROMEOS_PATTERN]
1491 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1493 assert not return_code, 'An error occurred while running' \
1494 ' "%s"' % ' '.join(cmd)
1496 if len(output) > CROS_CHROMEOS_PATTERN:
1497 output = output[len(CROS_CHROMEOS_PATTERN):]
1500 output = output.split('_')[0]
1503 contents = output.split('.')
1505 version = contents[2]
1507 if contents[3] != '0':
1508 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1509 (version, contents[3], version)
1510 if not warningText in self.warnings:
1511 self.warnings.append(warningText)
1514 self.ChangeToDepotWorkingDirectory('chromium')
1515 return_code = CheckRunGit(['log', '-1', '--format=%H',
1516 '--author=chrome-release@google.com', '--grep=to %s' % version,
1520 results['chromium'] = output.strip()
1522 # We can't try to map the trunk revision to bleeding edge yet, because
1523 # we don't know which direction to try to search in. Have to wait until
1524 # the bisect has narrowed the results down to 2 v8 rolls.
1525 results['v8_bleeding_edge'] = None
1529 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1530 """Backs up or restores build output directory based on restore argument.
1533 restore: Indicates whether to restore or backup. Default is False(Backup)
1534 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1537 Path to backup or restored location as string. otherwise None if it fails.
1539 build_dir = os.path.abspath(
1540 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1541 source_dir = os.path.join(build_dir, build_type)
1542 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1544 source_dir, destination_dir = destination_dir, source_dir
1545 if os.path.exists(source_dir):
1546 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1547 shutil.move(source_dir, destination_dir)
1548 return destination_dir
1551 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1552 """Downloads the build archive for the given revision.
1555 revision: The SVN revision to build.
1556 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1559 True if download succeeds, otherwise False.
1563 # Get the SHA of the DEPS changes patch.
1564 patch_sha = GetSHA1HexDigest(patch)
1566 # Update the DEPS changes patch with a patch to create a new file named
1567 # 'DEPS.sha' and add patch_sha evaluated above to it.
1568 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1570 # Source archive file path on cloud storage.
1571 source_file = GetRemoteBuildPath(revision, self.opts.target_arch, patch_sha)
1573 # Get Build output directory
1574 abs_build_dir = os.path.abspath(
1575 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1576 # Downloaded archive file path.
1577 downloaded_file = os.path.join(
1579 GetZipFileName(revision, self.opts.target_arch, patch_sha))
1581 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1585 if not fetch_build_func():
1586 if not self.PostBuildRequestAndWait(revision,
1587 condition=fetch_build_func,
1589 raise RuntimeError('Somewthing went wrong while processing build'
1590 'request for: %s' % revision)
1591 # Generic name for the archive, created when archive file is extracted.
1592 output_dir = os.path.join(
1593 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1594 # Unzip build archive directory.
1596 RmTreeAndMkDir(output_dir, skip_makedir=True)
1597 ExtractZip(downloaded_file, abs_build_dir)
1598 if os.path.exists(output_dir):
1599 self.BackupOrRestoreOutputdirectory(restore=False)
1600 # Build output directory based on target(e.g. out/Release, out/Debug).
1601 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1602 print 'Moving build from %s to %s' % (
1603 output_dir, target_build_output_dir)
1604 shutil.move(output_dir, target_build_output_dir)
1606 raise IOError('Missing extracted folder %s ' % output_dir)
1607 except Exception as e:
1608 print 'Somewthing went wrong while extracting archive file: %s' % e
1609 self.BackupOrRestoreOutputdirectory(restore=True)
1610 # Cleanup any leftovers from unzipping.
1611 if os.path.exists(output_dir):
1612 RmTreeAndMkDir(output_dir, skip_makedir=True)
1614 # Delete downloaded archive
1615 if os.path.exists(downloaded_file):
1616 os.remove(downloaded_file)
1619 def PostBuildRequestAndWait(self, revision, condition, patch=None):
1620 """POSTs the build request job to the tryserver instance."""
1622 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1623 """Gets builder bot name and buildtime in seconds based on platform."""
1624 # Bot names should match the one listed in tryserver.chromium's
1625 # master.cfg which produces builds for bisect.
1627 if Is64BitWindows() and target_arch == 'x64':
1628 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1629 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1631 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1633 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1634 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1638 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1640 # Creates a try job description.
1641 job_args = {'host': self.opts.builder_host,
1642 'port': self.opts.builder_port,
1643 'revision': 'src@%s' % revision,
1645 'name': 'Bisect Job-%s' % revision
1647 # Update patch information if supplied.
1649 job_args['patch'] = patch
1650 # Posts job to build the revision on the server.
1651 if post_perf_builder_job.PostTryJob(job_args):
1653 start_time = time.time()
1658 elapsed_time = time.time() - start_time
1659 if elapsed_time > build_timeout:
1660 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1661 (build_timeout, revision))
1662 print ('Time elapsed: %ss, still waiting for %s build' %
1663 (elapsed_time, revision))
1664 time.sleep(poll_interval)
1667 def IsDownloadable(self, depot):
1668 """Checks if build is downloadable based on target platform and depot."""
1669 if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1670 return (depot == 'chromium' or
1671 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1672 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1675 def UpdateDeps(self, revision, depot, deps_file):
1676 """Updates DEPS file with new revision of dependency repository.
1678 This method search DEPS for a particular pattern in which depot revision
1679 is specified (e.g "webkit_revision": "123456"). If a match is found then
1680 it resolves the given git hash to SVN revision and replace it in DEPS file.
1683 revision: A git hash revision of the dependency repository.
1684 depot: Current depot being bisected.
1685 deps_file: Path to DEPS file.
1688 True if DEPS file is modified successfully, otherwise False.
1690 if not os.path.exists(deps_file):
1693 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1694 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1696 print 'DEPS update not supported for Depot: %s', depot
1699 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1700 # contains "angle_revision" key that holds git hash instead of SVN revision.
1701 # And sometime "angle_revision" key is not specified in "vars" variable,
1702 # in such cases check "deps" dictionary variable that matches
1703 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1704 if depot == 'angle':
1705 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1708 deps_contents = ReadStringFromFile(deps_file)
1709 # Check whether the depot and revision pattern in DEPS file vars
1710 # e.g. for webkit the format is "webkit_revision": "12345".
1711 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1713 match = re.search(deps_revision, deps_contents)
1715 svn_revision = self.source_control.SVNFindRev(
1716 revision, self._GetDepotDirectory(depot))
1717 if not svn_revision:
1718 print 'Could not determine SVN revision for %s' % revision
1720 # Update the revision information for the given depot
1721 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1723 # For v8_bleeding_edge revisions change V8 branch in order
1724 # to fetch bleeding edge revision.
1725 if depot == 'v8_bleeding_edge':
1726 new_data = self.UpdateV8Branch(new_data)
1729 # Write changes to DEPS file
1730 WriteStringToFile(new_data, deps_file)
1733 print 'Something went wrong while updating DEPS file. [%s]' % e
1736 def UpdateV8Branch(self, deps_content):
1737 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1739 Check for "v8_branch" in DEPS file if exists update its value
1740 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1741 variable from DEPS revision 254916, therefore check for "src/v8":
1742 <v8 source path> in DEPS in order to support prior DEPS revisions
1746 deps_content: DEPS file contents to be modified.
1749 Modified DEPS file contents as a string.
1751 new_branch = r'branches/bleeding_edge'
1752 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1753 if re.search(v8_branch_pattern, deps_content):
1754 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1756 # Replaces the branch assigned to "src/v8" key in DEPS file.
1757 # Format of "src/v8" in DEPS:
1759 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1760 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1761 v8_src_pattern = re.compile(
1762 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1763 if re.search(v8_src_pattern, deps_content):
1764 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1767 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1768 """Updates DEPS file with new revision for Angle repository.
1770 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1771 variable contains "angle_revision" key that holds git hash instead of
1774 And sometimes "angle_revision" key is not specified in "vars" variable,
1775 in such cases check "deps" dictionary variable that matches
1776 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1778 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1780 deps_contents = ReadStringFromFile(deps_file)
1781 # Check whether the depot and revision pattern in DEPS file vars variable
1782 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1783 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1784 deps_var, re.MULTILINE)
1785 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1787 # Update the revision information for the given depot
1788 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1790 # Check whether the depot and revision pattern in DEPS file deps
1792 # "src/third_party/angle": Var("chromium_git") +
1793 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1794 angle_rev_pattern = re.compile(
1795 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1796 match = re.search(angle_rev_pattern, deps_contents)
1798 print 'Could not find angle revision information in DEPS file.'
1800 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1801 # Write changes to DEPS file
1802 WriteStringToFile(new_data, deps_file)
1805 print 'Something went wrong while updating DEPS file, %s' % e
1808 def CreateDEPSPatch(self, depot, revision):
1809 """Modifies DEPS and returns diff as text.
1812 depot: Current depot being bisected.
1813 revision: A git hash revision of the dependency repository.
1816 A tuple with git hash of chromium revision and DEPS patch text.
1818 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1819 if not os.path.exists(deps_file_path):
1820 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1821 # Get current chromium revision (git hash).
1822 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1823 if not chromium_sha:
1824 raise RuntimeError('Failed to determine Chromium revision for %s' %
1826 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1827 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1828 # Checkout DEPS file for the current chromium revision.
1829 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1832 if self.UpdateDeps(revision, depot, deps_file_path):
1833 diff_command = ['diff',
1834 '--src-prefix=src/',
1835 '--dst-prefix=src/',
1837 bisect_utils.FILE_DEPS]
1838 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1839 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1841 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1844 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1848 def BuildCurrentRevision(self, depot, revision=None):
1849 """Builds chrome and performance_ui_tests on the current revision.
1852 True if the build was successful.
1854 if self.opts.debug_ignore_build:
1857 os.chdir(self.src_cwd)
1858 # Fetch build archive for the given revision from the cloud storage when
1859 # the storage bucket is passed.
1860 if self.IsDownloadable(depot) and revision:
1862 if depot != 'chromium':
1863 # Create a DEPS patch with new revision for dependency repository.
1864 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1865 # Get SVN revision for the given SHA, since builds are archived using SVN
1867 chromium_revision = self.source_control.SVNFindRev(revision)
1868 if not chromium_revision:
1870 'Failed to determine SVN revision for %s' % revision)
1871 if self.DownloadCurrentBuild(chromium_revision, patch=deps_patch):
1874 # Reverts the changes to DEPS file.
1875 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1879 raise RuntimeError('Failed to download build archive for revision %s.\n'
1880 'Unfortunately, bisection couldn\'t continue any '
1881 'further. Please try running script without '
1882 '--gs_bucket flag to produce local builds.' % revision)
1885 build_success = self.builder.Build(depot, self.opts)
1887 return build_success
1889 def RunGClientHooks(self):
1890 """Runs gclient with runhooks command.
1893 True if gclient reports no errors.
1896 if self.opts.debug_ignore_build:
1899 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1901 def TryParseHistogramValuesFromOutput(self, metric, text):
1902 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1905 metric: The metric as a list of [<trace>, <value>] strings.
1906 text: The text to parse the metric values from.
1909 A list of floating point numbers found.
1911 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1913 text_lines = text.split('\n')
1916 for current_line in text_lines:
1917 if metric_formatted in current_line:
1918 current_line = current_line[len(metric_formatted):]
1921 histogram_values = eval(current_line)
1923 for b in histogram_values['buckets']:
1924 average_for_bucket = float(b['high'] + b['low']) * 0.5
1925 # Extends the list with N-elements with the average for that bucket.
1926 values_list.extend([average_for_bucket] * b['count'])
1932 def TryParseResultValuesFromOutput(self, metric, text):
1933 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
1936 metric: The metric as a list of [<trace>, <value>] strings.
1937 text: The text to parse the metric values from.
1940 A list of floating point numbers found.
1942 # Format is: RESULT <graph>: <trace>= <value> <units>
1943 metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1945 text_lines = text.split('\n')
1948 for current_line in text_lines:
1949 # Parse the output from the performance test for the metric we're
1951 metric_re = metric_formatted +\
1952 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1953 metric_re = re.compile(metric_re)
1954 regex_results = metric_re.search(current_line)
1956 if not regex_results is None:
1957 values_list += [regex_results.group('values')]
1959 metric_re = metric_formatted +\
1960 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1961 metric_re = re.compile(metric_re)
1962 regex_results = metric_re.search(current_line)
1964 if not regex_results is None:
1965 metric_values = regex_results.group('values')
1967 values_list += metric_values.split(',')
1969 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1971 # If the metric is times/t, we need to sum the timings in order to get
1972 # similar regression results as the try-bots.
1973 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1974 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1976 if metric in metrics_to_sum:
1978 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1982 def ParseMetricValuesFromOutput(self, metric, text):
1983 """Parses output from performance_ui_tests and retrieves the results for
1987 metric: The metric as a list of [<trace>, <value>] strings.
1988 text: The text to parse the metric values from.
1991 A list of floating point numbers found.
1993 metric_values = self.TryParseResultValuesFromOutput(metric, text)
1995 if not metric_values:
1996 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1998 return metric_values
2000 def _GenerateProfileIfNecessary(self, command_args):
2001 """Checks the command line of the performance test for dependencies on
2002 profile generation, and runs tools/perf/generate_profile as necessary.
2005 command_args: Command line being passed to performance test, as a list.
2008 False if profile generation was necessary and failed, otherwise True.
2011 if '--profile-dir' in ' '.join(command_args):
2012 # If we were using python 2.7+, we could just use the argparse
2013 # module's parse_known_args to grab --profile-dir. Since some of the
2014 # bots still run 2.6, have to grab the arguments manually.
2016 args_to_parse = ['--profile-dir', '--browser']
2018 for arg_to_parse in args_to_parse:
2019 for i, current_arg in enumerate(command_args):
2020 if arg_to_parse in current_arg:
2021 current_arg_split = current_arg.split('=')
2023 # Check 2 cases, --arg=<val> and --arg <val>
2024 if len(current_arg_split) == 2:
2025 arg_dict[arg_to_parse] = current_arg_split[1]
2026 elif i + 1 < len(command_args):
2027 arg_dict[arg_to_parse] = command_args[i+1]
2029 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2031 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2032 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2033 return not RunProcess(['python', path_to_generate,
2034 '--profile-type-to-generate', profile_type,
2035 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2039 def _IsBisectModeUsingMetric(self):
2040 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2042 def _IsBisectModeReturnCode(self):
2043 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2045 def _IsBisectModeStandardDeviation(self):
2046 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2048 def RunPerformanceTestAndParseResults(
2049 self, command_to_run, metric, reset_on_first_run=False,
2050 upload_on_last_run=False, results_label=None):
2051 """Runs a performance test on the current revision and parses the results.
2054 command_to_run: The command to be run to execute the performance test.
2055 metric: The metric to parse out from the results of the performance test.
2056 This is the result chart name and trace name, separated by slash.
2057 reset_on_first_run: If True, pass the flag --reset-results on first run.
2058 upload_on_last_run: If True, pass the flag --upload-results on last run.
2059 results_label: A value for the option flag --results-label.
2060 The arguments reset_on_first_run, upload_on_last_run and results_label
2061 are all ignored if the test is not a Telemetry test.
2064 (values dict, 0) if --debug_ignore_perf_test was passed.
2065 (values dict, 0, test output) if the test was run successfully.
2066 (error message, -1) if the test couldn't be run.
2067 (error message, -1, test output) if the test ran but there was an error.
2069 success_code, failure_code = 0, -1
2071 if self.opts.debug_ignore_perf_test:
2078 return (fake_results, success_code)
2081 command_to_run = command_to_run.replace('/', r'\\')
2083 args = shlex.split(command_to_run)
2085 if not self._GenerateProfileIfNecessary(args):
2086 err_text = 'Failed to generate profile for performance test.'
2087 return (err_text, failure_code)
2089 # If running a Telemetry test for Chrome OS, insert the remote IP and
2090 # identity parameters.
2091 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2092 if self.opts.target_platform == 'cros' and is_telemetry:
2093 args.append('--remote=%s' % self.opts.cros_remote_ip)
2094 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2096 start_time = time.time()
2099 output_of_all_runs = ''
2100 for i in xrange(self.opts.repeat_test_count):
2101 # Can ignore the return code since if the tests fail, it won't return 0.
2102 current_args = copy.copy(args)
2104 if i == 0 and reset_on_first_run:
2105 current_args.append('--reset-results')
2106 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2107 current_args.append('--upload-results')
2109 current_args.append('--results-label=%s' % results_label)
2111 (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2114 if e.errno == errno.ENOENT:
2115 err_text = ('Something went wrong running the performance test. '
2116 'Please review the command line:\n\n')
2117 if 'src/' in ' '.join(args):
2118 err_text += ('Check that you haven\'t accidentally specified a '
2119 'path with src/ in the command.\n\n')
2120 err_text += ' '.join(args)
2123 return (err_text, failure_code)
2126 output_of_all_runs += output
2127 if self.opts.output_buildbot_annotations:
2130 if self._IsBisectModeUsingMetric():
2131 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2132 # If we're bisecting on a metric (ie, changes in the mean or
2133 # standard deviation) and no metric values are produced, bail out.
2134 if not metric_values:
2136 elif self._IsBisectModeReturnCode():
2137 metric_values.append(return_code)
2139 elapsed_minutes = (time.time() - start_time) / 60.0
2140 if elapsed_minutes >= self.opts.max_time_minutes:
2143 if len(metric_values) == 0:
2144 err_text = 'Metric %s was not found in the test output.' % metric
2145 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2146 # that were found in the output here.
2147 return (err_text, failure_code, output_of_all_runs)
2149 # If we're bisecting on return codes, we're really just looking for zero vs
2151 if self._IsBisectModeReturnCode():
2152 # If any of the return codes is non-zero, output 1.
2153 overall_return_code = 0 if (
2154 all(current_value == 0 for current_value in metric_values)) else 1
2157 'mean': overall_return_code,
2160 'values': metric_values,
2163 print 'Results of performance test: Command returned with %d' % (
2164 overall_return_code)
2167 # Need to get the average value if there were multiple values.
2168 truncated_mean = CalculateTruncatedMean(metric_values,
2169 self.opts.truncate_percent)
2170 standard_err = CalculateStandardError(metric_values)
2171 standard_dev = CalculateStandardDeviation(metric_values)
2173 if self._IsBisectModeStandardDeviation():
2174 metric_values = [standard_dev]
2177 'mean': truncated_mean,
2178 'std_err': standard_err,
2179 'std_dev': standard_dev,
2180 'values': metric_values,
2183 print 'Results of performance test: %12f %12f' % (
2184 truncated_mean, standard_err)
2186 return (values, success_code, output_of_all_runs)
2188 def FindAllRevisionsToSync(self, revision, depot):
2189 """Finds all dependant revisions and depots that need to be synced for a
2190 given revision. This is only useful in the git workflow, as an svn depot
2191 may be split into multiple mirrors.
2193 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2194 skia/include. To sync skia/src properly, one has to find the proper
2195 revisions in skia/gyp and skia/include.
2198 revision: The revision to sync to.
2199 depot: The depot in use at the moment (probably skia).
2202 A list of [depot, revision] pairs that need to be synced.
2204 revisions_to_sync = [[depot, revision]]
2206 is_base = ((depot == 'chromium') or (depot == 'cros') or
2207 (depot == 'android-chrome'))
2209 # Some SVN depots were split into multiple git depots, so we need to
2210 # figure out for each mirror which git revision to grab. There's no
2211 # guarantee that the SVN revision will exist for each of the dependant
2212 # depots, so we have to grep the git logs and grab the next earlier one.
2214 DEPOT_DEPS_NAME[depot]['depends'] and\
2215 self.source_control.IsGit():
2216 svn_rev = self.source_control.SVNFindRev(revision)
2218 for d in DEPOT_DEPS_NAME[depot]['depends']:
2219 self.ChangeToDepotWorkingDirectory(d)
2221 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2224 revisions_to_sync.append([d, dependant_rev])
2226 num_resolved = len(revisions_to_sync)
2227 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2229 self.ChangeToDepotWorkingDirectory(depot)
2231 if not ((num_resolved - 1) == num_needed):
2234 return revisions_to_sync
2236 def PerformPreBuildCleanup(self):
2237 """Performs necessary cleanup between runs."""
2238 print 'Cleaning up between runs.'
2241 # Having these pyc files around between runs can confuse the
2242 # perf tests and cause them to crash.
2243 for (path, _, files) in os.walk(self.src_cwd):
2244 for cur_file in files:
2245 if cur_file.endswith('.pyc'):
2246 path_to_file = os.path.join(path, cur_file)
2247 os.remove(path_to_file)
2249 def PerformWebkitDirectoryCleanup(self, revision):
2250 """If the script is switching between Blink and WebKit during bisect,
2251 its faster to just delete the directory rather than leave it up to git
2257 if not self.source_control.CheckoutFileAtRevision(
2258 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2262 os.chdir(self.src_cwd)
2264 is_blink = bisect_utils.IsDepsFileBlink()
2268 if not self.source_control.RevertFileToHead(
2269 bisect_utils.FILE_DEPS_GIT):
2272 if self.was_blink != is_blink:
2273 self.was_blink = is_blink
2274 return bisect_utils.RemoveThirdPartyWebkitDirectory()
2277 def PerformCrosChrootCleanup(self):
2278 """Deletes the chroot.
2284 self.ChangeToDepotWorkingDirectory('cros')
2285 cmd = [CROS_SDK_PATH, '--delete']
2286 return_code = RunProcess(cmd)
2288 return not return_code
2290 def CreateCrosChroot(self):
2291 """Creates a new chroot.
2297 self.ChangeToDepotWorkingDirectory('cros')
2298 cmd = [CROS_SDK_PATH, '--create']
2299 return_code = RunProcess(cmd)
2301 return not return_code
2303 def PerformPreSyncCleanup(self, revision, depot):
2304 """Performs any necessary cleanup before syncing.
2309 if depot == 'chromium':
2310 if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
2312 return self.PerformWebkitDirectoryCleanup(revision)
2313 elif depot == 'cros':
2314 return self.PerformCrosChrootCleanup()
2317 def RunPostSync(self, depot):
2318 """Performs any work after syncing.
2323 if self.opts.target_platform == 'android':
2324 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2325 path_to_src=self.src_cwd):
2329 return self.CreateCrosChroot()
2331 return self.RunGClientHooks()
2334 def ShouldSkipRevision(self, depot, revision):
2335 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2336 is git based those changes would have no effect.
2339 depot: The depot being bisected.
2340 revision: Current revision we're synced to.
2343 True if we should skip building/testing this revision.
2345 if depot == 'chromium':
2346 if self.source_control.IsGit():
2347 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2348 output = CheckRunGit(cmd)
2350 files = output.splitlines()
2352 if len(files) == 1 and files[0] == 'DEPS':
2357 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2359 """Performs a full sync/build/run of the specified revision.
2362 revision: The revision to sync to.
2363 depot: The depot that's being used at the moment (src, webkit, etc.)
2364 command_to_run: The command to execute the performance test.
2365 metric: The performance metric being tested.
2368 On success, a tuple containing the results of the performance test.
2369 Otherwise, a tuple with the error message.
2372 if depot == 'chromium' or depot == 'android-chrome':
2373 sync_client = 'gclient'
2374 elif depot == 'cros':
2375 sync_client = 'repo'
2377 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2379 if not revisions_to_sync:
2380 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2382 if not self.PerformPreSyncCleanup(revision, depot):
2383 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2387 if not self.opts.debug_ignore_sync:
2388 for r in revisions_to_sync:
2389 self.ChangeToDepotWorkingDirectory(r[0])
2392 self.PerformPreBuildCleanup()
2394 # If you're using gclient to sync, you need to specify the depot you
2395 # want so that all the dependencies sync properly as well.
2396 # ie. gclient sync src@<SHA1>
2397 current_revision = r[1]
2398 if sync_client == 'gclient':
2399 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2401 if not self.source_control.SyncToRevision(current_revision,
2408 success = self.RunPostSync(depot)
2410 if skippable and self.ShouldSkipRevision(depot, revision):
2411 return ('Skipped revision: [%s]' % str(revision),
2412 BUILD_RESULT_SKIPPED)
2414 start_build_time = time.time()
2415 if self.BuildCurrentRevision(depot, revision):
2416 after_build_time = time.time()
2417 results = self.RunPerformanceTestAndParseResults(command_to_run,
2419 # Restore build output directory once the tests are done, to avoid
2421 if self.IsDownloadable(depot) and revision:
2422 self.BackupOrRestoreOutputdirectory(restore=True)
2425 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2428 if not external_revisions is None:
2429 return (results[0], results[1], external_revisions,
2430 time.time() - after_build_time, after_build_time -
2433 return ('Failed to parse DEPS file for external revisions.',
2438 return ('Failed to build revision: [%s]' % (str(revision, )),
2441 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2443 return ('Failed to sync revision: [%s]' % (str(revision, )),
2446 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2447 """Given known good and bad values, decide if the current_value passed
2451 current_value: The value of the metric being checked.
2452 known_bad_value: The reference value for a "failed" run.
2453 known_good_value: The reference value for a "passed" run.
2456 True if the current_value is closer to the known_good_value than the
2459 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2460 dist_to_good_value = abs(current_value['std_dev'] -
2461 known_good_value['std_dev'])
2462 dist_to_bad_value = abs(current_value['std_dev'] -
2463 known_bad_value['std_dev'])
2465 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2466 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2468 return dist_to_good_value < dist_to_bad_value
2470 def _GetDepotDirectory(self, depot_name):
2471 if depot_name == 'chromium':
2473 elif depot_name == 'cros':
2474 return self.cros_cwd
2475 elif depot_name in DEPOT_NAMES:
2476 return self.depot_cwd[depot_name]
2478 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2479 ' was added without proper support?' % depot_name
2481 def ChangeToDepotWorkingDirectory(self, depot_name):
2482 """Given a depot, changes to the appropriate working directory.
2485 depot_name: The name of the depot (see DEPOT_NAMES).
2487 os.chdir(self._GetDepotDirectory(depot_name))
2489 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2490 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2491 search_forward=True)
2492 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2493 search_forward=False)
2494 min_revision_data['external']['v8_bleeding_edge'] = r1
2495 max_revision_data['external']['v8_bleeding_edge'] = r2
2497 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2498 min_revision_data['revision']) or
2499 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2500 max_revision_data['revision'])):
2501 self.warnings.append('Trunk revisions in V8 did not map directly to '
2502 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2503 'did map directly to bleeding_edge revisions, but results might not '
2506 def _FindNextDepotToBisect(self, current_depot, current_revision,
2507 min_revision_data, max_revision_data):
2508 """Given the state of the bisect, decides which depot the script should
2509 dive into next (if any).
2512 current_depot: Current depot being bisected.
2513 current_revision: Current revision synced to.
2514 min_revision_data: Data about the earliest revision in the bisect range.
2515 max_revision_data: Data about the latest revision in the bisect range.
2518 The depot to bisect next, or None.
2520 external_depot = None
2521 for next_depot in DEPOT_NAMES:
2522 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2523 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2526 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2527 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2530 if current_depot == 'v8':
2531 # We grab the bleeding_edge info here rather than earlier because we
2532 # finally have the revision range. From that we can search forwards and
2533 # backwards to try to match trunk revisions to bleeding_edge.
2534 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2536 if (min_revision_data['external'][next_depot] ==
2537 max_revision_data['external'][next_depot]):
2540 if (min_revision_data['external'][next_depot] and
2541 max_revision_data['external'][next_depot]):
2542 external_depot = next_depot
2545 return external_depot
2547 def PrepareToBisectOnDepot(self,
2553 """Changes to the appropriate directory and gathers a list of revisions
2554 to bisect between |start_revision| and |end_revision|.
2557 current_depot: The depot we want to bisect.
2558 end_revision: End of the revision range.
2559 start_revision: Start of the revision range.
2560 previous_depot: The depot we were previously bisecting.
2561 previous_revision: The last revision we synced to on |previous_depot|.
2564 A list containing the revisions between |start_revision| and
2565 |end_revision| inclusive.
2567 # Change into working directory of external library to run
2568 # subsequent commands.
2569 self.ChangeToDepotWorkingDirectory(current_depot)
2571 # V8 (and possibly others) is merged in periodically. Bisecting
2572 # this directory directly won't give much good info.
2573 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2574 config_path = os.path.join(self.src_cwd, '..')
2575 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2576 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2578 if bisect_utils.RunGClient(
2579 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2582 if current_depot == 'v8_bleeding_edge':
2583 self.ChangeToDepotWorkingDirectory('chromium')
2585 shutil.move('v8', 'v8.bak')
2586 shutil.move('v8_bleeding_edge', 'v8')
2588 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2589 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2591 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2592 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2594 self.ChangeToDepotWorkingDirectory(current_depot)
2596 depot_revision_list = self.GetRevisionList(current_depot,
2600 self.ChangeToDepotWorkingDirectory('chromium')
2602 return depot_revision_list
2604 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2605 """Gathers reference values by running the performance tests on the
2606 known good and bad revisions.
2609 good_rev: The last known good revision where the performance regression
2610 has not occurred yet.
2611 bad_rev: A revision where the performance regression has already occurred.
2612 cmd: The command to execute the performance test.
2613 metric: The metric being tested for regression.
2616 A tuple with the results of building and running each revision.
2618 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2623 good_run_results = None
2625 if not bad_run_results[1]:
2626 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2631 return (bad_run_results, good_run_results)
2633 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2634 """Adds new revisions to the revision_data dict and initializes them.
2637 revisions: List of revisions to add.
2638 depot: Depot that's currently in use (src, webkit, etc...)
2639 sort: Sorting key for displaying revisions.
2640 revision_data: A dict to add the new revisions into. Existing revisions
2641 will have their sort keys offset.
2644 num_depot_revisions = len(revisions)
2646 for _, v in revision_data.iteritems():
2647 if v['sort'] > sort:
2648 v['sort'] += num_depot_revisions
2650 for i in xrange(num_depot_revisions):
2653 revision_data[r] = {'revision' : r,
2659 'sort' : i + sort + 1}
2661 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2662 if self.opts.output_buildbot_annotations:
2663 step_name = 'Bisection Range: [%s - %s]' % (
2664 revision_list[len(revision_list)-1], revision_list[0])
2665 bisect_utils.OutputAnnotationStepStart(step_name)
2668 print 'Revisions to bisect on [%s]:' % depot
2669 for revision_id in revision_list:
2670 print ' -> %s' % (revision_id, )
2673 if self.opts.output_buildbot_annotations:
2674 bisect_utils.OutputAnnotationStepClosed()
2676 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2677 """Checks to see if changes to DEPS file occurred, and that the revision
2678 range also includes the change to .DEPS.git. If it doesn't, attempts to
2679 expand the revision range to include it.
2682 bad_rev: First known bad revision.
2683 good_revision: Last known good revision.
2686 A tuple with the new bad and good revisions.
2688 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2689 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2690 'DEPS', good_revision, bad_revision)
2693 # DEPS file was changed, search from the oldest change to DEPS file to
2694 # bad_revision to see if there are matching .DEPS.git changes.
2695 oldest_deps_change = changes_to_deps[-1]
2696 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2697 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2699 if len(changes_to_deps) != len(changes_to_gitdeps):
2700 # Grab the timestamp of the last DEPS change
2701 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2702 output = CheckRunGit(cmd)
2703 commit_time = int(output)
2705 # Try looking for a commit that touches the .DEPS.git file in the
2706 # next 15 minutes after the DEPS file change.
2707 cmd = ['log', '--format=%H', '-1',
2708 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2709 'origin/master', bisect_utils.FILE_DEPS_GIT]
2710 output = CheckRunGit(cmd)
2711 output = output.strip()
2713 self.warnings.append('Detected change to DEPS and modified '
2714 'revision range to include change to .DEPS.git')
2715 return (output, good_revision)
2717 self.warnings.append('Detected change to DEPS but couldn\'t find '
2718 'matching change to .DEPS.git')
2719 return (bad_revision, good_revision)
2721 def CheckIfRevisionsInProperOrder(self,
2725 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2728 good_revision: Number/tag of the known good revision.
2729 bad_revision: Number/tag of the known bad revision.
2732 True if the revisions are in the proper order (good earlier than bad).
2734 if self.source_control.IsGit() and target_depot != 'cros':
2735 cmd = ['log', '--format=%ct', '-1', good_revision]
2736 cwd = self._GetDepotDirectory(target_depot)
2738 output = CheckRunGit(cmd, cwd=cwd)
2739 good_commit_time = int(output)
2741 cmd = ['log', '--format=%ct', '-1', bad_revision]
2742 output = CheckRunGit(cmd, cwd=cwd)
2743 bad_commit_time = int(output)
2745 return good_commit_time <= bad_commit_time
2747 # Cros/svn use integers
2748 return int(good_revision) <= int(bad_revision)
2750 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2751 """Given known good and bad revisions, run a binary search on all
2752 intermediate revisions to determine the CL where the performance regression
2756 command_to_run: Specify the command to execute the performance test.
2757 good_revision: Number/tag of the known good revision.
2758 bad_revision: Number/tag of the known bad revision.
2759 metric: The performance metric to monitor.
2762 A dict with 2 members, 'revision_data' and 'error'. On success,
2763 'revision_data' will contain a dict mapping revision ids to
2764 data about that revision. Each piece of revision data consists of a
2765 dict with the following keys:
2767 'passed': Represents whether the performance test was successful at
2768 that revision. Possible values include: 1 (passed), 0 (failed),
2769 '?' (skipped), 'F' (build failed).
2770 'depot': The depot that this revision is from (ie. WebKit)
2771 'external': If the revision is a 'src' revision, 'external' contains
2772 the revisions of each of the external libraries.
2773 'sort': A sort value for sorting the dict in order of commits.
2790 If an error occurred, the 'error' field will contain the message and
2791 'revision_data' will be empty.
2793 results = {'revision_data' : {},
2796 # Choose depot to bisect first
2797 target_depot = 'chromium'
2798 if self.opts.target_platform == 'cros':
2799 target_depot = 'cros'
2800 elif self.opts.target_platform == 'android-chrome':
2801 target_depot = 'android-chrome'
2804 self.ChangeToDepotWorkingDirectory(target_depot)
2806 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2807 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2809 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2815 if bad_revision is None:
2816 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2819 if good_revision is None:
2820 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2823 # Check that they didn't accidentally swap good and bad revisions.
2824 if not self.CheckIfRevisionsInProperOrder(
2825 target_depot, good_revision, bad_revision):
2826 results['error'] = 'bad_revision < good_revision, did you swap these '\
2830 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2831 bad_revision, good_revision)
2833 if self.opts.output_buildbot_annotations:
2834 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2836 print 'Gathering revision range for bisection.'
2837 # Retrieve a list of revisions to do bisection on.
2838 src_revision_list = self.GetRevisionList(target_depot,
2842 if self.opts.output_buildbot_annotations:
2843 bisect_utils.OutputAnnotationStepClosed()
2845 if src_revision_list:
2846 # revision_data will store information about a revision such as the
2847 # depot it came from, the webkit/V8 revision at that time,
2848 # performance timing, build state, etc...
2849 revision_data = results['revision_data']
2851 # revision_list is the list we're binary searching through at the moment.
2856 for current_revision_id in src_revision_list:
2859 revision_data[current_revision_id] = {'value' : None,
2861 'depot' : target_depot,
2865 'sort' : sort_key_ids}
2866 revision_list.append(current_revision_id)
2869 max_revision = len(revision_list) - 1
2871 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2873 if self.opts.output_buildbot_annotations:
2874 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2876 print 'Gathering reference values for bisection.'
2878 # Perform the performance tests on the good and bad revisions, to get
2880 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2886 if self.opts.output_buildbot_annotations:
2887 bisect_utils.OutputAnnotationStepClosed()
2890 results['error'] = ('An error occurred while building and running '
2891 'the \'bad\' reference value. The bisect cannot continue without '
2892 'a working \'bad\' revision to start from.\n\nError: %s' %
2897 results['error'] = ('An error occurred while building and running '
2898 'the \'good\' reference value. The bisect cannot continue without '
2899 'a working \'good\' revision to start from.\n\nError: %s' %
2904 # We need these reference values to determine if later runs should be
2905 # classified as pass or fail.
2906 known_bad_value = bad_results[0]
2907 known_good_value = good_results[0]
2909 # Can just mark the good and bad revisions explicitly here since we
2910 # already know the results.
2911 bad_revision_data = revision_data[revision_list[0]]
2912 bad_revision_data['external'] = bad_results[2]
2913 bad_revision_data['perf_time'] = bad_results[3]
2914 bad_revision_data['build_time'] = bad_results[4]
2915 bad_revision_data['passed'] = False
2916 bad_revision_data['value'] = known_bad_value
2918 good_revision_data = revision_data[revision_list[max_revision]]
2919 good_revision_data['external'] = good_results[2]
2920 good_revision_data['perf_time'] = good_results[3]
2921 good_revision_data['build_time'] = good_results[4]
2922 good_revision_data['passed'] = True
2923 good_revision_data['value'] = known_good_value
2925 next_revision_depot = target_depot
2928 if not revision_list:
2931 min_revision_data = revision_data[revision_list[min_revision]]
2932 max_revision_data = revision_data[revision_list[max_revision]]
2934 if max_revision - min_revision <= 1:
2935 current_depot = min_revision_data['depot']
2936 if min_revision_data['passed'] == '?':
2937 next_revision_index = min_revision
2938 elif max_revision_data['passed'] == '?':
2939 next_revision_index = max_revision
2940 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2941 previous_revision = revision_list[min_revision]
2942 # If there were changes to any of the external libraries we track,
2943 # should bisect the changes there as well.
2944 external_depot = self._FindNextDepotToBisect(current_depot,
2945 previous_revision, min_revision_data, max_revision_data)
2947 # If there was no change in any of the external depots, the search
2949 if not external_depot:
2950 if current_depot == 'v8':
2951 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2952 'continue any further. The script can only bisect into '
2953 'V8\'s bleeding_edge repository if both the current and '
2954 'previous revisions in trunk map directly to revisions in '
2958 earliest_revision = max_revision_data['external'][external_depot]
2959 latest_revision = min_revision_data['external'][external_depot]
2961 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2964 next_revision_depot,
2967 if not new_revision_list:
2968 results['error'] = 'An error occurred attempting to retrieve'\
2969 ' revision range: [%s..%s]' % \
2970 (earliest_revision, latest_revision)
2973 self.AddRevisionsIntoRevisionData(new_revision_list,
2975 min_revision_data['sort'],
2978 # Reset the bisection and perform it on the newly inserted
2980 revision_list = new_revision_list
2982 max_revision = len(revision_list) - 1
2983 sort_key_ids += len(revision_list)
2985 print 'Regression in metric:%s appears to be the result of changes'\
2986 ' in [%s].' % (metric, external_depot)
2988 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2994 next_revision_index = int((max_revision - min_revision) / 2) +\
2997 next_revision_id = revision_list[next_revision_index]
2998 next_revision_data = revision_data[next_revision_id]
2999 next_revision_depot = next_revision_data['depot']
3001 self.ChangeToDepotWorkingDirectory(next_revision_depot)
3003 if self.opts.output_buildbot_annotations:
3004 step_name = 'Working on [%s]' % next_revision_id
3005 bisect_utils.OutputAnnotationStepStart(step_name)
3007 print 'Working on revision: [%s]' % next_revision_id
3009 run_results = self.SyncBuildAndRunRevision(next_revision_id,
3010 next_revision_depot,
3012 metric, skippable=True)
3014 # If the build is successful, check whether or not the metric
3016 if not run_results[1]:
3017 if len(run_results) > 2:
3018 next_revision_data['external'] = run_results[2]
3019 next_revision_data['perf_time'] = run_results[3]
3020 next_revision_data['build_time'] = run_results[4]
3022 passed_regression = self._CheckIfRunPassed(run_results[0],
3026 next_revision_data['passed'] = passed_regression
3027 next_revision_data['value'] = run_results[0]
3029 if passed_regression:
3030 max_revision = next_revision_index
3032 min_revision = next_revision_index
3034 if run_results[1] == BUILD_RESULT_SKIPPED:
3035 next_revision_data['passed'] = 'Skipped'
3036 elif run_results[1] == BUILD_RESULT_FAIL:
3037 next_revision_data['passed'] = 'Build Failed'
3039 print run_results[0]
3041 # If the build is broken, remove it and redo search.
3042 revision_list.pop(next_revision_index)
3046 if self.opts.output_buildbot_annotations:
3047 self._PrintPartialResults(results)
3048 bisect_utils.OutputAnnotationStepClosed()
3050 # Weren't able to sync and retrieve the revision range.
3051 results['error'] = 'An error occurred attempting to retrieve revision '\
3052 'range: [%s..%s]' % (good_revision, bad_revision)
3056 def _PrintPartialResults(self, results_dict):
3057 revision_data = results_dict['revision_data']
3058 revision_data_sorted = sorted(revision_data.iteritems(),
3059 key = lambda x: x[1]['sort'])
3060 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3062 self._PrintTestedCommitsTable(revision_data_sorted,
3063 results_dict['first_working_revision'],
3064 results_dict['last_broken_revision'],
3065 100, final_step=False)
3067 def _PrintConfidence(self, results_dict):
3068 # The perf dashboard specifically looks for the string
3069 # "Confidence in Bisection Results: 100%" to decide whether or not
3070 # to cc the author(s). If you change this, please update the perf
3071 # dashboard as well.
3072 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3074 def _PrintBanner(self, results_dict):
3076 print " __o_\___ Aw Snap! We hit a speed bump!"
3077 print "=-O----O-'__.~.___________________________________"
3079 if self._IsBisectModeReturnCode():
3080 print ('Bisect reproduced a change in return codes while running the '
3081 'performance test.')
3083 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3084 '%s metric.' % (results_dict['regression_size'],
3085 results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3086 self._PrintConfidence(results_dict)
3088 def _PrintFailedBanner(self, results_dict):
3090 if self._IsBisectModeReturnCode():
3091 print 'Bisect could not reproduce a change in the return code.'
3093 print ('Bisect could not reproduce a change in the '
3094 '%s metric.' % '/'.join(self.opts.metric))
3097 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3098 info = self.source_control.QueryRevisionInfo(cl,
3099 self._GetDepotDirectory(depot))
3100 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3102 # Format is "git-svn-id: svn://....@123456 <other data>"
3103 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3104 svn_revision = svn_line[0].split('@')
3105 svn_revision = svn_revision[1].split(' ')[0]
3106 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3111 def _PrintRevisionInfo(self, cl, info, depot=None):
3112 # The perf dashboard specifically looks for the string
3113 # "Author : " to parse out who to cc on a bug. If you change the
3114 # formatting here, please update the perf dashboard as well.
3116 print 'Subject : %s' % info['subject']
3117 print 'Author : %s' % info['author']
3118 if not info['email'].startswith(info['author']):
3119 print 'Email : %s' % info['email']
3120 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3122 print 'Link : %s' % commit_link
3125 print 'Failed to parse svn revision from body:'
3129 print 'Commit : %s' % cl
3130 print 'Date : %s' % info['date']
3132 def _PrintTableRow(self, column_widths, row_data):
3133 assert len(column_widths) == len(row_data)
3136 for i in xrange(len(column_widths)):
3137 current_row_data = row_data[i].center(column_widths[i], ' ')
3138 text += ('%%%ds' % column_widths[i]) % current_row_data
3141 def _PrintTestedCommitsHeader(self):
3142 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3143 self._PrintTableRow(
3144 [20, 70, 14, 12, 13],
3145 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3146 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3147 self._PrintTableRow(
3148 [20, 70, 14, 12, 13],
3149 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3150 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3151 self._PrintTableRow(
3153 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3155 assert False, "Invalid bisect_mode specified."
3156 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3157 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3158 'State'.center(13, ' '))
3160 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3161 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3162 std_error = '+-%.02f' % current_data['value']['std_err']
3163 mean = '%.02f' % current_data['value']['mean']
3164 self._PrintTableRow(
3165 [20, 70, 12, 14, 13],
3166 [current_data['depot'], cl_link, mean, std_error, state_str])
3167 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3168 std_error = '+-%.02f' % current_data['value']['std_err']
3169 mean = '%.02f' % current_data['value']['mean']
3170 self._PrintTableRow(
3171 [20, 70, 12, 14, 13],
3172 [current_data['depot'], cl_link, std_error, mean, state_str])
3173 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3174 mean = '%d' % current_data['value']['mean']
3175 self._PrintTableRow(
3177 [current_data['depot'], cl_link, mean, state_str])
3179 def _PrintTestedCommitsTable(self, revision_data_sorted,
3180 first_working_revision, last_broken_revision, confidence,
3184 print 'Tested commits:'
3186 print 'Partial results:'
3187 self._PrintTestedCommitsHeader()
3189 for current_id, current_data in revision_data_sorted:
3190 if current_data['value']:
3191 if (current_id == last_broken_revision or
3192 current_id == first_working_revision):
3193 # If confidence is too low, don't add this empty line since it's
3194 # used to put focus on a suspected CL.
3195 if confidence and final_step:
3198 if state == 2 and not final_step:
3199 # Just want a separation between "bad" and "good" cl's.
3203 if state == 1 and final_step:
3204 state_str = 'Suspected CL'
3208 # If confidence is too low, don't bother outputting good/bad.
3211 state_str = state_str.center(13, ' ')
3213 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3214 current_data['depot'])
3216 cl_link = current_id
3217 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3219 def _PrintReproSteps(self):
3221 print 'To reproduce locally:'
3222 print '$ ' + self.opts.command
3223 if bisect_utils.IsTelemetryCommand(self.opts.command):
3225 print 'Also consider passing --profiler=list to see available profilers.'
3227 def _PrintOtherRegressions(self, other_regressions, revision_data):
3229 print 'Other regressions may have occurred:'
3230 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3231 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3232 for regression in other_regressions:
3233 current_id, previous_id, confidence = regression
3234 current_data = revision_data[current_id]
3235 previous_data = revision_data[previous_id]
3237 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3238 current_data['depot'])
3239 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3240 previous_data['depot'])
3242 # If we can't map it to a viewable URL, at least show the original hash.
3243 if not current_link:
3244 current_link = current_id
3245 if not previous_link:
3246 previous_link = previous_id
3248 print ' %8s %70s %s' % (
3249 current_data['depot'], current_link,
3250 ('%d%%' % confidence).center(10, ' '))
3251 print ' %8s %70s' % (
3252 previous_data['depot'], previous_link)
3255 def _PrintStepTime(self, revision_data_sorted):
3256 step_perf_time_avg = 0.0
3257 step_build_time_avg = 0.0
3259 for _, current_data in revision_data_sorted:
3260 if current_data['value']:
3261 step_perf_time_avg += current_data['perf_time']
3262 step_build_time_avg += current_data['build_time']
3265 step_perf_time_avg = step_perf_time_avg / step_count
3266 step_build_time_avg = step_build_time_avg / step_count
3268 print 'Average build time : %s' % datetime.timedelta(
3269 seconds=int(step_build_time_avg))
3270 print 'Average test time : %s' % datetime.timedelta(
3271 seconds=int(step_perf_time_avg))
3273 def _PrintWarnings(self):
3274 if not self.warnings:
3278 for w in set(self.warnings):
3281 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3282 other_regressions = []
3283 previous_values = []
3285 for current_id, current_data in revision_data_sorted:
3286 current_values = current_data['value']
3288 current_values = current_values['values']
3290 confidence = CalculateConfidence(previous_values, [current_values])
3291 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3292 mean_of_current_runs = CalculateMean(current_values)
3294 # Check that the potential regression is in the same direction as
3295 # the overall regression. If the mean of the previous runs < the
3296 # mean of the current runs, this local regression is in same
3298 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3299 is_same_direction = (prev_less_than_current if
3300 bad_greater_than_good else not prev_less_than_current)
3302 # Only report potential regressions with high confidence.
3303 if is_same_direction and confidence > 50:
3304 other_regressions.append([current_id, previous_id, confidence])
3305 previous_values.append(current_values)
3306 previous_id = current_id
3307 return other_regressions
3310 def _GetResultsDict(self, revision_data, revision_data_sorted):
3311 # Find range where it possibly broke.
3312 first_working_revision = None
3313 first_working_revision_index = -1
3314 last_broken_revision = None
3315 last_broken_revision_index = -1
3317 for i in xrange(len(revision_data_sorted)):
3318 k, v = revision_data_sorted[i]
3319 if v['passed'] == 1:
3320 if not first_working_revision:
3321 first_working_revision = k
3322 first_working_revision_index = i
3325 last_broken_revision = k
3326 last_broken_revision_index = i
3328 if last_broken_revision != None and first_working_revision != None:
3330 for i in xrange(0, last_broken_revision_index + 1):
3331 if revision_data_sorted[i][1]['value']:
3332 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3335 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3336 if revision_data_sorted[i][1]['value']:
3337 working_means.append(revision_data_sorted[i][1]['value']['values'])
3339 # Flatten the lists to calculate mean of all values.
3340 working_mean = sum(working_means, [])
3341 broken_mean = sum(broken_means, [])
3343 # Calculate the approximate size of the regression
3344 mean_of_bad_runs = CalculateMean(broken_mean)
3345 mean_of_good_runs = CalculateMean(working_mean)
3347 regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3349 if math.isnan(regression_size):
3350 regression_size = 'zero-to-nonzero'
3352 regression_std_err = math.fabs(CalculatePooledStandardError(
3353 [working_mean, broken_mean]) /
3354 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3356 # Give a "confidence" in the bisect. At the moment we use how distinct the
3357 # values are before and after the last broken revision, and how noisy the
3359 confidence = CalculateConfidence(working_means, broken_means)
3361 culprit_revisions = []
3364 self.ChangeToDepotWorkingDirectory(
3365 revision_data[last_broken_revision]['depot'])
3367 if revision_data[last_broken_revision]['depot'] == 'cros':
3368 # Want to get a list of all the commits and what depots they belong
3369 # to so that we can grab info about each.
3370 cmd = ['repo', 'forall', '-c',
3371 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3372 last_broken_revision, first_working_revision + 1)]
3373 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3376 assert not return_code, 'An error occurred while running'\
3377 ' "%s"' % ' '.join(cmd)
3380 for l in output.split('\n'):
3382 # Output will be in form:
3384 # /path_to_other_depot
3392 contents = l.split(' ')
3393 if len(contents) > 1:
3394 changes.append([last_depot, contents[0]])
3397 info = self.source_control.QueryRevisionInfo(c[1])
3398 culprit_revisions.append((c[1], info, None))
3400 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3401 k, v = revision_data_sorted[i]
3402 if k == first_working_revision:
3404 self.ChangeToDepotWorkingDirectory(v['depot'])
3405 info = self.source_control.QueryRevisionInfo(k)
3406 culprit_revisions.append((k, info, v['depot']))
3409 # Check for any other possible regression ranges
3410 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3411 mean_of_bad_runs > mean_of_good_runs)
3413 # Check for warnings:
3414 if len(culprit_revisions) > 1:
3415 self.warnings.append('Due to build errors, regression range could '
3416 'not be narrowed down to a single commit.')
3417 if self.opts.repeat_test_count == 1:
3418 self.warnings.append('Tests were only set to run once. This may '
3419 'be insufficient to get meaningful results.')
3420 if confidence < 100:
3422 self.warnings.append(
3423 'Confidence is less than 100%. There could be other candidates for '
3424 'this regression. Try bisecting again with increased repeat_count '
3425 'or on a sub-metric that shows the regression more clearly.')
3427 self.warnings.append(
3428 'Confidence is 0%. Try bisecting again on another platform, with '
3429 'increased repeat_count or on a sub-metric that shows the regression '
3433 'first_working_revision': first_working_revision,
3434 'last_broken_revision': last_broken_revision,
3435 'culprit_revisions': culprit_revisions,
3436 'other_regressions': other_regressions,
3437 'regression_size': regression_size,
3438 'regression_std_err': regression_std_err,
3439 'confidence': confidence,
3442 def FormatAndPrintResults(self, bisect_results):
3443 """Prints the results from a bisection run in a readable format.
3446 bisect_results: The results from a bisection test run.
3448 revision_data = bisect_results['revision_data']
3449 revision_data_sorted = sorted(revision_data.iteritems(),
3450 key = lambda x: x[1]['sort'])
3451 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3453 if self.opts.output_buildbot_annotations:
3454 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3457 print 'Full results of bisection:'
3458 for current_id, current_data in revision_data_sorted:
3459 build_status = current_data['passed']
3461 if type(build_status) is bool:
3463 build_status = 'Good'
3465 build_status = 'Bad'
3467 print ' %20s %40s %s' % (current_data['depot'],
3468 current_id, build_status)
3471 if self.opts.output_buildbot_annotations:
3472 bisect_utils.OutputAnnotationStepClosed()
3473 # The perf dashboard scrapes the "results" step in order to comment on
3474 # bugs. If you change this, please update the perf dashboard as well.
3475 bisect_utils.OutputAnnotationStepStart('Results')
3477 if results_dict['culprit_revisions'] and results_dict['confidence']:
3478 self._PrintBanner(results_dict)
3479 for culprit in results_dict['culprit_revisions']:
3480 cl, info, depot = culprit
3481 self._PrintRevisionInfo(cl, info, depot)
3482 self._PrintReproSteps()
3483 if results_dict['other_regressions']:
3484 self._PrintOtherRegressions(results_dict['other_regressions'],
3487 self._PrintFailedBanner(results_dict)
3488 self._PrintReproSteps()
3490 self._PrintTestedCommitsTable(revision_data_sorted,
3491 results_dict['first_working_revision'],
3492 results_dict['last_broken_revision'],
3493 results_dict['confidence'])
3494 self._PrintStepTime(revision_data_sorted)
3495 self._PrintWarnings()
3497 if self.opts.output_buildbot_annotations:
3498 bisect_utils.OutputAnnotationStepClosed()
3501 def DetermineAndCreateSourceControl(opts):
3502 """Attempts to determine the underlying source control workflow and returns
3503 a SourceControl object.
3506 An instance of a SourceControl object, or None if the current workflow
3510 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3512 if output.strip() == 'true':
3513 return GitSourceControl(opts)
3518 def IsPlatformSupported(opts):
3519 """Checks that this platform and build system are supported.
3522 opts: The options parsed from the command line.
3525 True if the platform and build system are supported.
3527 # Haven't tested the script out on any other platforms yet.
3528 supported = ['posix', 'nt']
3529 return os.name in supported
3532 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3533 """Removes the directory tree specified, and then creates an empty
3534 directory in the same location (if not specified to skip).
3537 path_to_dir: Path to the directory tree.
3538 skip_makedir: Whether to skip creating empty directory, default is False.
3541 True if successful, False if an error occurred.
3544 if os.path.exists(path_to_dir):
3545 shutil.rmtree(path_to_dir)
3547 if e.errno != errno.ENOENT:
3550 if not skip_makedir:
3551 return MaybeMakeDirectory(path_to_dir)
3556 def RemoveBuildFiles():
3557 """Removes build files from previous runs."""
3558 if RmTreeAndMkDir(os.path.join('out', 'Release')):
3559 if RmTreeAndMkDir(os.path.join('build', 'Release')):
3564 class BisectOptions(object):
3565 """Options to be used when running bisection."""
3567 super(BisectOptions, self).__init__()
3569 self.target_platform = 'chromium'
3570 self.build_preference = None
3571 self.good_revision = None
3572 self.bad_revision = None
3573 self.use_goma = None
3574 self.cros_board = None
3575 self.cros_remote_ip = None
3576 self.repeat_test_count = 20
3577 self.truncate_percent = 25
3578 self.max_time_minutes = 20
3581 self.output_buildbot_annotations = None
3582 self.no_custom_deps = False
3583 self.working_directory = None
3584 self.extra_src = None
3585 self.debug_ignore_build = None
3586 self.debug_ignore_sync = None
3587 self.debug_ignore_perf_test = None
3588 self.gs_bucket = None
3589 self.target_arch = 'ia32'
3590 self.builder_host = None
3591 self.builder_port = None
3592 self.bisect_mode = BISECT_MODE_MEAN
3594 def _CreateCommandLineParser(self):
3595 """Creates a parser with bisect options.
3598 An instance of optparse.OptionParser.
3600 usage = ('%prog [options] [-- chromium-options]\n'
3601 'Perform binary search on revision history to find a minimal '
3602 'range of revisions where a peformance metric regressed.\n')
3604 parser = optparse.OptionParser(usage=usage)
3606 group = optparse.OptionGroup(parser, 'Bisect options')
3607 group.add_option('-c', '--command',
3609 help='A command to execute your performance test at' +
3610 ' each point in the bisection.')
3611 group.add_option('-b', '--bad_revision',
3613 help='A bad revision to start bisection. ' +
3614 'Must be later than good revision. May be either a git' +
3615 ' or svn revision.')
3616 group.add_option('-g', '--good_revision',
3618 help='A revision to start bisection where performance' +
3619 ' test is known to pass. Must be earlier than the ' +
3620 'bad revision. May be either a git or svn revision.')
3621 group.add_option('-m', '--metric',
3623 help='The desired metric to bisect on. For example ' +
3624 '"vm_rss_final_b/vm_rss_f_b"')
3625 group.add_option('-r', '--repeat_test_count',
3628 help='The number of times to repeat the performance '
3629 'test. Values will be clamped to range [1, 100]. '
3630 'Default value is 20.')
3631 group.add_option('--max_time_minutes',
3634 help='The maximum time (in minutes) to take running the '
3635 'performance tests. The script will run the performance '
3636 'tests according to --repeat_test_count, so long as it '
3637 'doesn\'t exceed --max_time_minutes. Values will be '
3638 'clamped to range [1, 60].'
3639 'Default value is 20.')
3640 group.add_option('-t', '--truncate_percent',
3643 help='The highest/lowest % are discarded to form a '
3644 'truncated mean. Values will be clamped to range [0, '
3645 '25]. Default value is 25 (highest/lowest 25% will be '
3647 group.add_option('--bisect_mode',
3649 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3650 BISECT_MODE_RETURN_CODE],
3651 default=BISECT_MODE_MEAN,
3652 help='The bisect mode. Choices are to bisect on the '
3653 'difference in mean, std_dev, or return_code.')
3654 parser.add_option_group(group)
3656 group = optparse.OptionGroup(parser, 'Build options')
3657 group.add_option('-w', '--working_directory',
3659 help='Path to the working directory where the script '
3660 'will do an initial checkout of the chromium depot. The '
3661 'files will be placed in a subdirectory "bisect" under '
3662 'working_directory and that will be used to perform the '
3663 'bisection. This parameter is optional, if it is not '
3664 'supplied, the script will work from the current depot.')
3665 group.add_option('--build_preference',
3667 choices=['msvs', 'ninja', 'make'],
3668 help='The preferred build system to use. On linux/mac '
3669 'the options are make/ninja. On Windows, the options '
3671 group.add_option('--target_platform',
3673 choices=['chromium', 'cros', 'android', 'android-chrome'],
3675 help='The target platform. Choices are "chromium" '
3676 '(current platform), "cros", or "android". If you '
3677 'specify something other than "chromium", you must be '
3678 'properly set up to build that platform.')
3679 group.add_option('--no_custom_deps',
3680 dest='no_custom_deps',
3681 action="store_true",
3683 help='Run the script with custom_deps or not.')
3684 group.add_option('--extra_src',
3686 help='Path to a script which can be used to modify '
3687 'the bisect script\'s behavior.')
3688 group.add_option('--cros_board',
3690 help='The cros board type to build.')
3691 group.add_option('--cros_remote_ip',
3693 help='The remote machine to image to.')
3694 group.add_option('--use_goma',
3695 action="store_true",
3696 help='Add a bunch of extra threads for goma.')
3697 group.add_option('--output_buildbot_annotations',
3698 action="store_true",
3699 help='Add extra annotation output for buildbot.')
3700 group.add_option('--gs_bucket',
3704 help=('Name of Google Storage bucket to upload or '
3705 'download build. e.g., chrome-perf'))
3706 group.add_option('--target_arch',
3708 choices=['ia32', 'x64', 'arm'],
3711 help=('The target build architecture. Choices are "ia32" '
3712 '(default), "x64" or "arm".'))
3713 group.add_option('--builder_host',
3714 dest='builder_host',
3716 help=('Host address of server to produce build by posting'
3717 ' try job request.'))
3718 group.add_option('--builder_port',
3719 dest='builder_port',
3721 help=('HTTP port of the server to produce build by posting'
3722 ' try job request.'))
3723 parser.add_option_group(group)
3725 group = optparse.OptionGroup(parser, 'Debug options')
3726 group.add_option('--debug_ignore_build',
3727 action="store_true",
3728 help='DEBUG: Don\'t perform builds.')
3729 group.add_option('--debug_ignore_sync',
3730 action="store_true",
3731 help='DEBUG: Don\'t perform syncs.')
3732 group.add_option('--debug_ignore_perf_test',
3733 action="store_true",
3734 help='DEBUG: Don\'t perform performance tests.')
3735 parser.add_option_group(group)
3738 def ParseCommandLine(self):
3739 """Parses the command line for bisect options."""
3740 parser = self._CreateCommandLineParser()
3741 (opts, _) = parser.parse_args()
3744 if not opts.command:
3745 raise RuntimeError('missing required parameter: --command')
3747 if not opts.good_revision:
3748 raise RuntimeError('missing required parameter: --good_revision')
3750 if not opts.bad_revision:
3751 raise RuntimeError('missing required parameter: --bad_revision')
3753 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3754 raise RuntimeError('missing required parameter: --metric')
3757 if not cloud_storage.List(opts.gs_bucket):
3758 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3759 if not opts.builder_host:
3760 raise RuntimeError('Must specify try server hostname, when '
3761 'gs_bucket is used: --builder_host')
3762 if not opts.builder_port:
3763 raise RuntimeError('Must specify try server port number, when '
3764 'gs_bucket is used: --builder_port')
3765 if opts.target_platform == 'cros':
3766 # Run sudo up front to make sure credentials are cached for later.
3767 print 'Sudo is required to build cros:'
3769 RunProcess(['sudo', 'true'])
3771 if not opts.cros_board:
3772 raise RuntimeError('missing required parameter: --cros_board')
3774 if not opts.cros_remote_ip:
3775 raise RuntimeError('missing required parameter: --cros_remote_ip')
3777 if not opts.working_directory:
3778 raise RuntimeError('missing required parameter: --working_directory')
3780 metric_values = opts.metric.split('/')
3781 if (len(metric_values) != 2 and
3782 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3783 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3785 opts.metric = metric_values
3786 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3787 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3788 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3789 opts.truncate_percent = opts.truncate_percent / 100.0
3791 for k, v in opts.__dict__.iteritems():
3792 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3794 except RuntimeError, e:
3795 output_string = StringIO.StringIO()
3796 parser.print_help(file=output_string)
3797 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3798 output_string.close()
3799 raise RuntimeError(error_message)
3802 def FromDict(values):
3803 """Creates an instance of BisectOptions with the values parsed from a
3807 values: a dict containing options to set.
3810 An instance of BisectOptions.
3812 opts = BisectOptions()
3813 for k, v in values.iteritems():
3814 assert hasattr(opts, k), 'Invalid %s attribute in '\
3815 'BisectOptions.' % k
3818 metric_values = opts.metric.split('/')
3819 if len(metric_values) != 2:
3820 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3822 opts.metric = metric_values
3823 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3824 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3825 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3826 opts.truncate_percent = opts.truncate_percent / 100.0
3834 opts = BisectOptions()
3835 opts.ParseCommandLine()
3838 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3840 raise RuntimeError("Invalid or missing --extra_src.")
3841 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3843 if opts.working_directory:
3844 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3845 if opts.no_custom_deps:
3847 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3849 os.chdir(os.path.join(os.getcwd(), 'src'))
3851 if not RemoveBuildFiles():
3852 raise RuntimeError('Something went wrong removing the build files.')
3854 if not IsPlatformSupported(opts):
3855 raise RuntimeError("Sorry, this platform isn't supported yet.")
3857 # Check what source control method they're using. Only support git workflow
3859 source_control = DetermineAndCreateSourceControl(opts)
3861 if not source_control:
3862 raise RuntimeError("Sorry, only the git workflow is supported at the "
3865 # gClient sync seems to fail if you're not in master branch.
3866 if (not source_control.IsInProperBranch() and
3867 not opts.debug_ignore_sync and
3868 not opts.working_directory):
3869 raise RuntimeError("You must switch to master branch to run bisection.")
3870 bisect_test = BisectPerformanceMetrics(source_control, opts)
3872 bisect_results = bisect_test.Run(opts.command,
3876 if bisect_results['error']:
3877 raise RuntimeError(bisect_results['error'])
3878 bisect_test.FormatAndPrintResults(bisect_results)
3881 bisect_test.PerformCleanup()
3882 except RuntimeError, e:
3883 if opts.output_buildbot_annotations:
3884 # The perf dashboard scrapes the "results" step in order to comment on
3885 # bugs. If you change this, please update the perf dashboard as well.
3886 bisect_utils.OutputAnnotationStepStart('Results')
3887 print 'Error: %s' % e.message
3888 if opts.output_buildbot_annotations:
3889 bisect_utils.OutputAnnotationStepClosed()
3892 if __name__ == '__main__':