Upstream version 8.36.161.0
[platform/framework/web/crosswalk.git] / src / third_party / chromite / buildbot / cbuildbot_stages.py
1 # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 """Module containing the various stages that a builder runs."""
6
7 import contextlib
8 import datetime
9 import functools
10 import glob
11 import itertools
12 import json
13 import logging
14 import multiprocessing
15 import os
16 import platform
17 import re
18 import shutil
19 import sys
20 from xml.etree import ElementTree
21
22 from chromite.buildbot import builderstage as bs
23 from chromite.buildbot import cbuildbot_commands as commands
24 from chromite.buildbot import cbuildbot_config
25 from chromite.buildbot import cbuildbot_metadata
26 from chromite.buildbot import cbuildbot_results as results_lib
27 from chromite.buildbot import constants
28 from chromite.buildbot import lab_status
29 from chromite.buildbot import lkgm_manager
30 from chromite.buildbot import manifest_version
31 from chromite.buildbot import portage_utilities
32 from chromite.buildbot import repository
33 from chromite.buildbot import trybot_patch_pool
34 from chromite.buildbot import validation_pool
35 from chromite.lib import alerts
36 from chromite.lib import commandline
37 from chromite.lib import cros_build_lib
38 from chromite.lib import git
39 from chromite.lib import gs
40 from chromite.lib import osutils
41 from chromite.lib import parallel
42 from chromite.lib import patch as cros_patch
43 from chromite.lib import retry_util
44 from chromite.lib import timeout_util
45
46 _FULL_BINHOST = 'FULL_BINHOST'
47 _PORTAGE_BINHOST = 'PORTAGE_BINHOST'
48 _CROS_ARCHIVE_URL = 'CROS_ARCHIVE_URL'
49 _PRINT_INTERVAL = 1
50 _VM_TEST_ERROR_MSG = """
51 !!!VMTests failed!!!
52
53 Logs are uploaded in the corresponding %(vm_test_results)s. This can be found
54 by clicking on the artifacts link in the "Report" Stage. Specifically look
55 for the test_harness/failed for the failing tests. For more
56 particulars, please refer to which test failed i.e. above see the
57 individual test that failed -- or if an update failed, check the
58 corresponding update directory.
59 """
60 PRE_CQ = validation_pool.PRE_CQ
61
62 CQ_HWTEST_WAS_ABORTED = ('HWTest was aborted, because another commit '
63                          'queue builder failed outside of HWTest.')
64
65 class NonHaltingBuilderStage(bs.BuilderStage):
66   """Build stage that fails a build but finishes the other steps."""
67
68   def Run(self):
69     try:
70       super(NonHaltingBuilderStage, self).Run()
71     except results_lib.StepFailure:
72       name = self.__class__.__name__
73       cros_build_lib.Error('Ignoring StepFailure in %s', name)
74
75
76 class ForgivingBuilderStage(bs.BuilderStage):
77   """Build stage that turns a build step red but not a build."""
78
79   def _HandleStageException(self, exc_info):
80     """Override and don't set status to FAIL but FORGIVEN instead."""
81     return self._HandleExceptionAsWarning(exc_info)
82
83
84 class RetryStage(object):
85   """Retry a given stage multiple times to see if it passes."""
86
87   def __init__(self, builder_run, max_retry, stage, *args, **kwargs):
88     """Create a RetryStage object.
89
90     Args:
91       builder_run: See arguments to bs.BuilderStage.__init__()
92       max_retry: The number of times to try the given stage.
93       stage: The stage class to create.
94       *args: A list of arguments to pass to the stage constructor.
95       **kwargs: A list of keyword arguments to pass to the stage constructor.
96     """
97     self._run = builder_run
98     self.max_retry = max_retry
99     self.stage = stage
100     self.args = (builder_run,) + args
101     self.kwargs = kwargs
102     self.names = []
103     self.attempt = None
104
105   def GetStageNames(self):
106     """Get a list of the places where this stage has recorded results."""
107     return self.names[:]
108
109   def _PerformStage(self):
110     """Run the stage once, incrementing the attempt number as needed."""
111     suffix = ' (attempt %d)' % (self.attempt,)
112     stage_obj = self.stage(
113         *self.args, attempt=self.attempt, max_retry=self.max_retry,
114         suffix=suffix, **self.kwargs)
115     self.names.extend(stage_obj.GetStageNames())
116     self.attempt += 1
117     stage_obj.Run()
118
119   def Run(self):
120     """Retry the given stage multiple times to see if it passes."""
121     self.attempt = 1
122     retry_util.RetryException(
123         results_lib.RetriableStepFailure, self.max_retry, self._PerformStage)
124
125
126 class BoardSpecificBuilderStage(bs.BuilderStage):
127   """Builder stage that is specific to a board.
128
129   The following attributes are provided on self:
130     _current_board: The active board for this stage.
131     board_runattrs: BoardRunAttributes object for this stage.
132   """
133
134   def __init__(self, builder_run, board, **kwargs):
135     super(BoardSpecificBuilderStage, self).__init__(builder_run, **kwargs)
136     self._current_board = board
137
138     self.board_runattrs = builder_run.GetBoardRunAttrs(board)
139
140     if not isinstance(board, basestring):
141       raise TypeError('Expected string, got %r' % (board,))
142
143     # Add a board name suffix to differentiate between various boards (in case
144     # more than one board is built on a single builder.)
145     if len(self._boards) > 1 or self._run.config.grouped:
146       self.name = '%s [%s]' % (self.name, board)
147
148   def _RecordResult(self, *args, **kwargs):
149     """Record a successful or failed result."""
150     kwargs.setdefault('board', self._current_board)
151     super(BoardSpecificBuilderStage, self)._RecordResult(*args, **kwargs)
152
153   def GetParallel(self, board_attr, timeout=None, pretty_name=None):
154     """Wait for given |board_attr| to show up.
155
156     Args:
157       board_attr: A valid board runattribute name.
158       timeout: Timeout in seconds.  None value means wait forever.
159       pretty_name: Optional name to use instead of raw board_attr in
160         log messages.
161
162     Returns:
163       Value of board_attr found.
164
165     Raises:
166       AttrTimeoutError if timeout occurs.
167     """
168     timeout_str = 'forever'
169     if timeout is not None:
170       timeout_str = '%d minutes' % int((timeout / 60) + 0.5)
171
172     if pretty_name is None:
173       pretty_name = board_attr
174
175     cros_build_lib.Info('Waiting up to %s for %s ...', timeout_str, pretty_name)
176     return self.board_runattrs.GetParallel(board_attr, timeout=timeout)
177
178   def GetImageDirSymlink(self, pointer='latest-cbuildbot'):
179     """Get the location of the current image."""
180     return os.path.join(self._run.buildroot, 'src', 'build', 'images',
181                         self._current_board, pointer)
182
183
184 class ArchivingStageMixin(object):
185   """Stage with utilities for uploading artifacts.
186
187   This provides functionality for doing archiving.  All it needs is access
188   to the BuilderRun object at self._run.  No __init__ needed.
189
190   Attributes:
191     acl: GS ACL to use for uploads.
192     archive: Archive object.
193     archive_path: Local path where archives are kept for this run.  Also copy
194       of self.archive.archive_path.
195     download_url: The URL where artifacts for this run can be downloaded.
196       Also copy of self.archive.download_url.
197     upload_url: The Google Storage location where artifacts for this run should
198       be uploaded.  Also copy of self.archive.upload_url.
199     version: Copy of self.archive.version.
200   """
201
202   PROCESSES = 10
203
204   @property
205   def archive(self):
206     """Retrieve the Archive object to use."""
207     # pylint: disable=W0201
208     if not hasattr(self, '_archive'):
209       self._archive = self._run.GetArchive()
210
211     return self._archive
212
213   @property
214   def acl(self):
215     """Retrieve GS ACL to use for uploads."""
216     return self.archive.upload_acl
217
218   # TODO(mtennant): Get rid of this property.
219   @property
220   def version(self):
221     """Retrieve the ChromeOS version for the archiving."""
222     return self.archive.version
223
224   @property
225   def archive_path(self):
226     """Local path where archives are kept for this run."""
227     return self.archive.archive_path
228
229   # TODO(mtennant): Rename base_archive_path.
230   @property
231   def bot_archive_root(self):
232     """Path of directory one level up from self.archive_path."""
233     return os.path.dirname(self.archive_path)
234
235   @property
236   def upload_url(self):
237     """The GS location where artifacts should be uploaded for this run."""
238     return self.archive.upload_url
239
240   @property
241   def base_upload_url(self):
242     """The GS path one level up from self.upload_url."""
243     return os.path.dirname(self.upload_url)
244
245   @property
246   def download_url(self):
247     """The URL where artifacts for this run can be downloaded."""
248     return self.archive.download_url
249
250   @contextlib.contextmanager
251   def ArtifactUploader(self, queue=None, archive=True, strict=True):
252     """Upload each queued input in the background.
253
254     This context manager starts a set of workers in the background, who each
255     wait for input on the specified queue. These workers run
256     self.UploadArtifact(*args, archive=archive) for each input in the queue.
257
258     Args:
259       queue: Queue to use. Add artifacts to this queue, and they will be
260         uploaded in the background.  If None, one will be created on the fly.
261       archive: Whether to automatically copy files to the archive dir.
262       strict: Whether to treat upload errors as fatal.
263
264     Returns:
265       The queue to use. This is only useful if you did not supply a queue.
266     """
267     upload = lambda path: self.UploadArtifact(path, archive, strict)
268     with parallel.BackgroundTaskRunner(upload, queue=queue,
269                                        processes=self.PROCESSES) as bg_queue:
270       yield bg_queue
271
272   def PrintDownloadLink(self, filename, prefix='', text_to_display=None):
273     """Print a link to an artifact in Google Storage.
274
275     Args:
276       filename: The filename of the uploaded file.
277       prefix: The prefix to put in front of the filename.
278       text_to_display: Text to display. If None, use |prefix| + |filename|.
279     """
280     url = '%s/%s' % (self.download_url.rstrip('/'), filename)
281     if not text_to_display:
282       text_to_display = '%s%s' % (prefix, filename)
283     cros_build_lib.PrintBuildbotLink(text_to_display, url)
284
285   def _GetUploadUrls(self):
286     """Returns a list of all urls to upload artifacts to."""
287     urls = [self.upload_url]
288     if hasattr(self, '_current_board'):
289       custom_artifacts_file = portage_utilities.ReadOverlayFile(
290           'scripts/artifacts.json', board=self._current_board)
291       if custom_artifacts_file is not None:
292         json_file = json.loads(custom_artifacts_file)
293         for url in json_file.get('extra_upload_urls', []):
294           urls.append('/'.join([url, self._bot_id, self.version]))
295     return urls
296
297   def UploadArtifact(self, path, archive=True, strict=True):
298     """Upload generated artifact to Google Storage.
299
300     Args:
301       path: Path of local file to upload to Google Storage.
302       archive: Whether to automatically copy files to the archive dir.
303       strict: Whether to treat upload errors as fatal.
304     """
305     filename = path
306     if archive:
307       filename = commands.ArchiveFile(path, self.archive_path)
308     try:
309       commands.UploadArchivedFile(
310           self.archive_path, self._GetUploadUrls(), filename, self._run.debug,
311           update_list=True, acl=self.acl)
312     except (gs.GSContextException, timeout_util.TimeoutError):
313       cros_build_lib.PrintBuildbotStepText('Upload failed')
314       if strict:
315         raise
316
317       # Treat gsutil flake as a warning if it's the only problem.
318       self._HandleExceptionAsWarning(sys.exc_info())
319
320   def GetMetadata(self, config=None, stage=None, final_status=None,
321                   sync_instance=None, completion_instance=None):
322     """Constructs the metadata json object.
323
324     Args:
325       config: The build config for this run.  Defaults to self._run.config.
326       stage: The stage name that this metadata file is being uploaded for.
327       final_status: Whether the build passed or failed. If None, the build
328         will be treated as still running.
329       sync_instance: The stage instance that was used for syncing the source
330         code. This should be a derivative of SyncStage. If None, the list of
331         commit queue patches will not be included in the metadata.
332       completion_instance: The stage instance that was used to wait for slave
333         completion. Used to add slave build information to master builder's
334         metadata. If None, no such status information will be included. It not
335         None, this should be a derivative of MasterSlaveSyncCompletionStage.
336     """
337     builder_run = self._run
338     build_root = self._build_root
339     config = config or builder_run.config
340
341     commit_queue_stages = (CommitQueueSyncStage, PreCQSyncStage)
342     get_changes_from_pool = (sync_instance and
343                              isinstance(sync_instance, commit_queue_stages) and
344                              sync_instance.pool)
345
346     get_statuses_from_slaves = (config['master'] and
347                                 completion_instance and
348                                 isinstance(completion_instance,
349                                            MasterSlaveSyncCompletionStage))
350
351     return cbuildbot_metadata.CBuildbotMetadata.GetMetadataDict(
352         builder_run, build_root, get_changes_from_pool,
353         get_statuses_from_slaves, config, stage, final_status, sync_instance,
354         completion_instance)
355
356   def UploadMetadata(self, config=None, stage=None, upload_queue=None,
357                      **kwargs):
358     """Create and upload JSON of various metadata describing this run.
359
360     Args:
361       config: Build config to use.  Passed to GetMetadata.
362       stage: Stage to upload metadata for.  If None the metadata is for the
363         entire run.
364       upload_queue: If specified then put the artifact file to upload on
365         this queue.  If None then upload it directly now.
366       kwargs: Pass to self.GetMetadata.
367     """
368     metadata_for = '[no config]'
369     if config is not None:
370       metadata_for = config.name
371       if stage is not None:
372         metadata_for = '%s:%s' % (metadata_for, stage)
373
374     cros_build_lib.Info('Creating metadata for %s run now.', metadata_for)
375     metadata_dict = self.GetMetadata(config=config, stage=stage, **kwargs)
376     self._run.attrs.metadata.UpdateWithDict(metadata_dict)
377
378     filename = constants.METADATA_JSON
379     if stage is not None:
380       filename = constants.METADATA_STAGE_JSON % { 'stage': stage }
381     metadata_json = os.path.join(self.archive_path, filename)
382
383     # Stages may run in parallel, so we have to do atomic updates on this.
384     cros_build_lib.Info('Writing metadata for %s to %s.', metadata_for,
385                         metadata_json)
386     osutils.WriteFile(metadata_json, self._run.attrs.metadata.GetJSON(),
387                       atomic=True)
388
389     if upload_queue is not None:
390       cros_build_lib.Info('Adding metadata for %s to upload queue.',
391                           metadata_for)
392       upload_queue.put([filename])
393     else:
394       cros_build_lib.Info('Uploading metadata for %s now.', metadata_for)
395       self.UploadArtifact(filename, archive=False)
396
397
398 # TODO(mtennant): This class continues to exist only for subclasses that still
399 # need self.archive_stage.  Hopefully, we can get rid of that need, eventually.
400 class ArchivingStage(BoardSpecificBuilderStage, ArchivingStageMixin):
401   """Helper for stages that archive files.
402
403   See ArchivingStageMixin for functionality.
404
405   Attributes:
406     archive_stage: The ArchiveStage instance for this board.
407   """
408
409   def __init__(self, builder_run, board, archive_stage, **kwargs):
410     super(ArchivingStage, self).__init__(builder_run, board, **kwargs)
411     self.archive_stage = archive_stage
412
413
414 class CleanUpStage(bs.BuilderStage):
415   """Stages that cleans up build artifacts from previous runs.
416
417   This stage cleans up previous KVM state, temporary git commits,
418   clobbers, and wipes tmp inside the chroot.
419   """
420
421   option_name = 'clean'
422
423   def _CleanChroot(self):
424     commands.CleanupChromeKeywordsFile(self._boards,
425                                        self._build_root)
426     chroot_tmpdir = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR,
427                                  'tmp')
428     if os.path.exists(chroot_tmpdir):
429       cros_build_lib.SudoRunCommand(['rm', '-rf', chroot_tmpdir],
430                                     print_cmd=False)
431       cros_build_lib.SudoRunCommand(['mkdir', '--mode', '1777', chroot_tmpdir],
432                                     print_cmd=False)
433
434   def _DeleteChroot(self):
435     chroot = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
436     if os.path.exists(chroot):
437       # At this stage, it's not safe to run the cros_sdk inside the buildroot
438       # itself because we haven't sync'd yet, and the version of the chromite
439       # in there might be broken. Since we've already unmounted everything in
440       # there, we can just remove it using rm -rf.
441       osutils.RmDir(chroot, ignore_missing=True, sudo=True)
442
443   def _DeleteArchivedTrybotImages(self):
444     """Clear all previous archive images to save space."""
445     for trybot in (False, True):
446       archive_root = self._run.GetArchive().GetLocalArchiveRoot(trybot=trybot)
447       osutils.RmDir(archive_root, ignore_missing=True)
448
449   def _DeleteArchivedPerfResults(self):
450     """Clear any previously stashed perf results from hw testing."""
451     for result in glob.glob(os.path.join(
452         self._run.options.log_dir,
453         '*.%s' % HWTestStage.PERF_RESULTS_EXTENSION)):
454       os.remove(result)
455
456   def _DeleteChromeBuildOutput(self):
457     chrome_src = os.path.join(self._run.options.chrome_root, 'src')
458     for out_dir in glob.glob(os.path.join(chrome_src, 'out_*')):
459       osutils.RmDir(out_dir)
460
461   def PerformStage(self):
462     if (not (self._run.options.buildbot or self._run.options.remote_trybot)
463         and self._run.options.clobber):
464       if not commands.ValidateClobber(self._build_root):
465         cros_build_lib.Die("--clobber in local mode must be approved.")
466
467     # If we can't get a manifest out of it, then it's not usable and must be
468     # clobbered.
469     manifest = None
470     if not self._run.options.clobber:
471       try:
472         manifest = git.ManifestCheckout.Cached(self._build_root, search=False)
473       except (KeyboardInterrupt, MemoryError, SystemExit):
474         raise
475       except Exception as e:
476         # Either there is no repo there, or the manifest isn't usable.  If the
477         # directory exists, log the exception for debugging reasons.  Either
478         # way, the checkout needs to be wiped since it's in an unknown
479         # state.
480         if os.path.exists(self._build_root):
481           cros_build_lib.Warning("ManifestCheckout at %s is unusable: %s",
482                                  self._build_root, e)
483
484     # Clean mount points first to be safe about deleting.
485     commands.CleanUpMountPoints(self._build_root)
486
487     if manifest is None:
488       self._DeleteChroot()
489       repository.ClearBuildRoot(self._build_root,
490                                 self._run.options.preserve_paths)
491     else:
492       tasks = [functools.partial(commands.BuildRootGitCleanup,
493                                  self._build_root),
494                functools.partial(commands.WipeOldOutput, self._build_root),
495                self._DeleteArchivedTrybotImages,
496                self._DeleteArchivedPerfResults]
497       if self._run.options.chrome_root:
498         tasks.append(self._DeleteChromeBuildOutput)
499       if self._run.config.chroot_replace and self._run.options.build:
500         tasks.append(self._DeleteChroot)
501       else:
502         tasks.append(self._CleanChroot)
503       parallel.RunParallelSteps(tasks)
504
505
506 class PatchChangesStage(bs.BuilderStage):
507   """Stage that patches a set of Gerrit changes to the buildroot source tree."""
508
509   def __init__(self, builder_run, patch_pool, **kwargs):
510     """Construct a PatchChangesStage.
511
512     Args:
513       builder_run: BuilderRun object.
514       patch_pool: A TrybotPatchPool object containing the different types of
515                   patches to apply.
516     """
517     super(PatchChangesStage, self).__init__(builder_run, **kwargs)
518     self.patch_pool = patch_pool
519
520   @staticmethod
521   def _CheckForDuplicatePatches(_series, changes):
522     conflicts = {}
523     duplicates = []
524     for change in changes:
525       if change.id is None:
526         cros_build_lib.Warning(
527             "Change %s lacks a usable ChangeId; duplicate checking cannot "
528             "be done for this change.  If cherry-picking fails, this is a "
529             "potential cause.", change)
530         continue
531       conflicts.setdefault(change.id, []).append(change)
532
533     duplicates = [x for x in conflicts.itervalues() if len(x) > 1]
534     if not duplicates:
535       return changes
536
537     for conflict in duplicates:
538       cros_build_lib.Error(
539           "Changes %s conflict with each other- they have same id %s.",
540           ', '.join(map(str, conflict)), conflict[0].id)
541
542     cros_build_lib.Die("Duplicate patches were encountered: %s", duplicates)
543
544   def _PatchSeriesFilter(self, series, changes):
545     return self._CheckForDuplicatePatches(series, changes)
546
547   def _ApplyPatchSeries(self, series, patch_pool, **kwargs):
548     """Applies a patch pool using a patch series."""
549     kwargs.setdefault('frozen', False)
550     # Honor the given ordering, so that if a gerrit/remote patch
551     # conflicts w/ a local patch, the gerrit/remote patch are
552     # blamed rather than local (patch ordering is typically
553     # local, gerrit, then remote).
554     kwargs.setdefault('honor_ordering', True)
555     kwargs['changes_filter'] = self._PatchSeriesFilter
556
557     _applied, failed_tot, failed_inflight = series.Apply(
558         list(patch_pool), **kwargs)
559
560     failures = failed_tot + failed_inflight
561     if failures:
562       self.HandleApplyFailures(failures)
563
564   def HandleApplyFailures(self, failures):
565     cros_build_lib.Die("Failed applying patches: %s",
566                        "\n".join(map(str, failures)))
567
568   def PerformStage(self):
569     class NoisyPatchSeries(validation_pool.PatchSeries):
570       """Custom PatchSeries that adds links to buildbot logs for remote trys."""
571
572       def ApplyChange(self, change, dryrun=False):
573         if isinstance(change, cros_patch.GerritPatch):
574           cros_build_lib.PrintBuildbotLink(str(change), change.url)
575         elif isinstance(change, cros_patch.UploadedLocalPatch):
576           cros_build_lib.PrintBuildbotStepText(str(change))
577
578         return validation_pool.PatchSeries.ApplyChange(self, change,
579                                                        dryrun=dryrun)
580
581     # If we're an external builder, ignore internal patches.
582     helper_pool = validation_pool.HelperPool.SimpleCreate(
583         cros_internal=self._run.config.internal, cros=True)
584
585     # Limit our resolution to non-manifest patches.
586     patch_series = NoisyPatchSeries(
587         self._build_root,
588         force_content_merging=True,
589         helper_pool=helper_pool,
590         deps_filter_fn=lambda p: not trybot_patch_pool.ManifestFilter(p))
591
592     self._ApplyPatchSeries(patch_series, self.patch_pool)
593
594
595 class BootstrapStage(PatchChangesStage):
596   """Stage that patches a chromite repo and re-executes inside it.
597
598   Attributes:
599     returncode - the returncode of the cbuildbot re-execution.  Valid after
600                  calling stage.Run().
601   """
602   option_name = 'bootstrap'
603
604   def __init__(self, builder_run, chromite_patch_pool,
605                manifest_patch_pool=None, **kwargs):
606     super(BootstrapStage, self).__init__(
607         builder_run, trybot_patch_pool.TrybotPatchPool(), **kwargs)
608     self.chromite_patch_pool = chromite_patch_pool
609     self.manifest_patch_pool = manifest_patch_pool
610     self.returncode = None
611
612   def _ApplyManifestPatches(self, patch_pool):
613     """Apply a pool of manifest patches to a temp manifest checkout.
614
615     Args:
616       patch_pool: The pool to apply.
617
618     Returns:
619       The path to the patched manifest checkout.
620
621     Raises:
622       Exception, if the new patched manifest cannot be parsed.
623     """
624     checkout_dir = os.path.join(self.tempdir, 'manfest-checkout')
625     repository.CloneGitRepo(checkout_dir,
626                             self._run.config.manifest_repo_url)
627
628     # TODO(davidjames): This code actually takes external manifest patches and
629     # tries to apply them to the internal manifest. We shouldn't do this.
630     patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
631         checkout_dir, deps_filter_fn=trybot_patch_pool.ManifestFilter,
632         tracking_branch=self._run.manifest_branch)
633
634     self._ApplyPatchSeries(patch_series, patch_pool)
635     # Create the branch that 'repo init -b <target_branch> -u <patched_repo>'
636     # will look for.
637     cmd = ['branch', '-f', self._run.manifest_branch,
638            constants.PATCH_BRANCH]
639     git.RunGit(checkout_dir, cmd)
640
641     # Verify that the patched manifest loads properly. Propagate any errors as
642     # exceptions.
643     manifest = os.path.join(checkout_dir, self._run.config.manifest)
644     git.Manifest.Cached(manifest, manifest_include_dir=checkout_dir)
645     return checkout_dir
646
647   @staticmethod
648   def _FilterArgsForApi(parsed_args, api_minor):
649     """Remove arguments that are introduced after an api version."""
650     def filter_fn(passed_arg):
651       return passed_arg.opt_inst.api_version <= api_minor
652
653     accepted, removed = commandline.FilteringParser.FilterArgs(
654         parsed_args, filter_fn)
655
656     if removed:
657       cros_build_lib.Warning('The following arguments were removed due to api: '
658                              "'%s'" % ' '.join(removed))
659     return accepted
660
661   @classmethod
662   def FilterArgsForTargetCbuildbot(cls, buildroot, cbuildbot_path, options):
663     _, minor = cros_build_lib.GetTargetChromiteApiVersion(buildroot)
664     args = [cbuildbot_path]
665     args.extend(options.build_targets)
666     args.extend(cls._FilterArgsForApi(options.parsed_args, minor))
667
668     # Only pass down --cache-dir if it was specified. By default, we want
669     # the cache dir to live in the root of each checkout, so this means that
670     # each instance of cbuildbot needs to calculate the default separately.
671     if minor >= 2 and options.cache_dir_specified:
672       args += ['--cache-dir', options.cache_dir]
673
674     return args
675
676   def HandleApplyFailures(self, failures):
677     """Handle the case where patches fail to apply."""
678     if self._run.options.pre_cq or self._run.config.pre_cq:
679       # Let the PreCQSync stage handle this failure. The PreCQSync stage will
680       # comment on CLs with the appropriate message when they fail to apply.
681       #
682       # WARNING: For manifest patches, the Pre-CQ attempts to apply external
683       # patches to the internal manifest, and this means we may flag a conflict
684       # here even if the patch applies cleanly. TODO(davidjames): Fix this.
685       cros_build_lib.PrintBuildbotStepWarnings()
686       cros_build_lib.Error('Failed applying patches: %s',
687                            '\n'.join(map(str, failures)))
688     else:
689       PatchChangesStage.HandleApplyFailures(self, failures)
690
691   #pylint: disable=E1101
692   @osutils.TempDirDecorator
693   def PerformStage(self):
694     # The plan for the builders is to use master branch to bootstrap other
695     # branches. Now, if we wanted to test patches for both the bootstrap code
696     # (on master) and the branched chromite (say, R20), we need to filter the
697     # patches by branch.
698     filter_branch = self._run.manifest_branch
699     if self._run.options.test_bootstrap:
700       filter_branch = 'master'
701
702     chromite_dir = os.path.join(self.tempdir, 'chromite')
703     reference_repo = os.path.join(constants.SOURCE_ROOT, 'chromite', '.git')
704     repository.CloneGitRepo(chromite_dir, constants.CHROMITE_URL,
705                             reference=reference_repo)
706     git.RunGit(chromite_dir, ['checkout', filter_branch])
707
708     def BranchAndChromiteFilter(patch):
709       return (trybot_patch_pool.BranchFilter(filter_branch, patch) and
710               trybot_patch_pool.ChromiteFilter(patch))
711
712     patch_series = validation_pool.PatchSeries.WorkOnSingleRepo(
713         chromite_dir, filter_branch,
714         deps_filter_fn=BranchAndChromiteFilter)
715
716     filtered_pool = self.chromite_patch_pool.FilterBranch(filter_branch)
717     if filtered_pool:
718       self._ApplyPatchSeries(patch_series, filtered_pool)
719
720     cbuildbot_path = constants.PATH_TO_CBUILDBOT
721     if not os.path.exists(os.path.join(self.tempdir, cbuildbot_path)):
722       cbuildbot_path = 'chromite/buildbot/cbuildbot'
723     # pylint: disable=W0212
724     cmd = self.FilterArgsForTargetCbuildbot(self.tempdir, cbuildbot_path,
725                                             self._run.options)
726
727     extra_params = ['--sourceroot=%s' % self._run.options.sourceroot]
728     extra_params.extend(self._run.options.bootstrap_args)
729     if self._run.options.test_bootstrap:
730       # We don't want re-executed instance to see this.
731       cmd = [a for a in cmd if a != '--test-bootstrap']
732     else:
733       # If we've already done the desired number of bootstraps, disable
734       # bootstrapping for the next execution.  Also pass in the patched manifest
735       # repository.
736       extra_params.append('--nobootstrap')
737       if self.manifest_patch_pool:
738         manifest_dir = self._ApplyManifestPatches(self.manifest_patch_pool)
739         extra_params.extend(['--manifest-repo-url', manifest_dir])
740
741     cmd += extra_params
742     result_obj = cros_build_lib.RunCommand(
743         cmd, cwd=self.tempdir, kill_timeout=30, error_code_ok=True)
744     self.returncode = result_obj.returncode
745
746
747 class SyncStage(bs.BuilderStage):
748   """Stage that performs syncing for the builder."""
749
750   option_name = 'sync'
751   output_manifest_sha1 = True
752
753   def __init__(self, builder_run, **kwargs):
754     super(SyncStage, self).__init__(builder_run, **kwargs)
755     self.repo = None
756     self.skip_sync = False
757
758     # TODO(mtennant): Why keep a duplicate copy of this config value
759     # at self.internal when it can always be retrieved from config?
760     self.internal = self._run.config.internal
761
762   def _GetManifestVersionsRepoUrl(self, read_only=False):
763     return cbuildbot_config.GetManifestVersionsRepoUrl(
764         self.internal,
765         read_only=read_only)
766
767   def Initialize(self):
768     self._InitializeRepo()
769
770   def _InitializeRepo(self):
771     """Set up the RepoRepository object."""
772     self.repo = self.GetRepoRepository()
773
774   def GetNextManifest(self):
775     """Returns the manifest to use."""
776     return self._run.config.manifest
777
778   def ManifestCheckout(self, next_manifest):
779     """Checks out the repository to the given manifest."""
780     self._Print('\n'.join(['BUILDROOT: %s' % self.repo.directory,
781                            'TRACKING BRANCH: %s' % self.repo.branch,
782                            'NEXT MANIFEST: %s' % next_manifest]))
783
784     if not self.skip_sync:
785       self.repo.Sync(next_manifest)
786
787     print >> sys.stderr, self.repo.ExportManifest(
788         mark_revision=self.output_manifest_sha1)
789
790   def PerformStage(self):
791     self.Initialize()
792     with osutils.TempDir() as tempdir:
793       # Save off the last manifest.
794       fresh_sync = True
795       if os.path.exists(self.repo.directory) and not self._run.options.clobber:
796         old_filename = os.path.join(tempdir, 'old.xml')
797         try:
798           old_contents = self.repo.ExportManifest()
799         except cros_build_lib.RunCommandError as e:
800           cros_build_lib.Warning(str(e))
801         else:
802           osutils.WriteFile(old_filename, old_contents)
803           fresh_sync = False
804
805       # Sync.
806       self.ManifestCheckout(self.GetNextManifest())
807
808       # Print the blamelist.
809       if fresh_sync:
810         cros_build_lib.PrintBuildbotStepText('(From scratch)')
811       elif self._run.options.buildbot:
812         lkgm_manager.GenerateBlameList(self.repo, old_filename)
813
814
815 class LKGMSyncStage(SyncStage):
816   """Stage that syncs to the last known good manifest blessed by builders."""
817
818   output_manifest_sha1 = False
819
820   def GetNextManifest(self):
821     """Override: Gets the LKGM."""
822     # TODO(sosa):  Should really use an initialized manager here.
823     if self.internal:
824       mv_dir = 'manifest-versions-internal'
825     else:
826       mv_dir = 'manifest-versions'
827
828     manifest_path = os.path.join(self._build_root, mv_dir)
829     manifest_repo = self._GetManifestVersionsRepoUrl(read_only=True)
830     manifest_version.RefreshManifestCheckout(manifest_path, manifest_repo)
831     return os.path.join(manifest_path, lkgm_manager.LKGMManager.LKGM_PATH)
832
833
834 class ChromeLKGMSyncStage(SyncStage):
835   """Stage that syncs to the last known good manifest for Chrome."""
836
837   output_manifest_sha1 = False
838
839   def GetNextManifest(self):
840     """Override: Gets the LKGM from the Chrome tree."""
841     chrome_lkgm = commands.GetChromeLKGM(self._run.options.chrome_version)
842
843     # We need a full buildspecs manager here as we need an initialized manifest
844     # manager with paths to the spec.
845     # TODO(mtennant): Consider registering as manifest_manager run param, for
846     # consistency, but be careful that consumers do not get confused.
847     # Currently only the "manifest_manager" from ManifestVersionedSync (and
848     # subclasses) is used later in the flow.
849     manifest_manager = manifest_version.BuildSpecsManager(
850       source_repo=self.repo,
851       manifest_repo=self._GetManifestVersionsRepoUrl(read_only=False),
852       build_names=self._run.GetBuilderIds(),
853       incr_type='build',
854       force=False,
855       branch=self._run.manifest_branch)
856
857     manifest_manager.BootstrapFromVersion(chrome_lkgm)
858     return manifest_manager.GetLocalManifest(chrome_lkgm)
859
860
861 class ManifestVersionedSyncStage(SyncStage):
862   """Stage that generates a unique manifest file, and sync's to it."""
863
864   # TODO(mtennant): Make this into a builder run value.
865   output_manifest_sha1 = False
866
867   def __init__(self, builder_run, **kwargs):
868     # Perform the sync at the end of the stage to the given manifest.
869     super(ManifestVersionedSyncStage, self).__init__(builder_run, **kwargs)
870     self.repo = None
871     self.manifest_manager = None
872
873     # If a builder pushes changes (even with dryrun mode), we need a writable
874     # repository. Otherwise, the push will be rejected by the server.
875     self.manifest_repo = self._GetManifestVersionsRepoUrl(read_only=False)
876
877     # 1. If we're uprevving Chrome, Chrome might have changed even if the
878     #    manifest has not, so we should force a build to double check. This
879     #    means that we'll create a new manifest, even if there are no changes.
880     # 2. If we're running with --debug, we should always run through to
881     #    completion, so as to ensure a complete test.
882     self._force = self._chrome_rev or self._run.options.debug
883
884   def HandleSkip(self):
885     """Initializes a manifest manager to the specified version if skipped."""
886     super(ManifestVersionedSyncStage, self).HandleSkip()
887     if self._run.options.force_version:
888       self.Initialize()
889       self.ForceVersion(self._run.options.force_version)
890
891   def ForceVersion(self, version):
892     """Creates a manifest manager from given version and returns manifest."""
893     cros_build_lib.PrintBuildbotStepText(version)
894     return self.manifest_manager.BootstrapFromVersion(version)
895
896   def VersionIncrementType(self):
897     """Return which part of the version number should be incremented."""
898     if self._run.manifest_branch == 'master':
899       return 'build'
900
901     return 'branch'
902
903   def RegisterManifestManager(self, manifest_manager):
904     """Save the given manifest manager for later use in this run.
905
906     Args:
907       manifest_manager: Expected to be a BuildSpecsManager.
908     """
909     self._run.attrs.manifest_manager = self.manifest_manager = manifest_manager
910
911   def Initialize(self):
912     """Initializes a manager that manages manifests for associated stages."""
913
914     dry_run = self._run.options.debug
915
916     self._InitializeRepo()
917
918     # If chrome_rev is somehow set, fail.
919     assert not self._chrome_rev, \
920         'chrome_rev is unsupported on release builders.'
921
922     self.RegisterManifestManager(manifest_version.BuildSpecsManager(
923         source_repo=self.repo,
924         manifest_repo=self.manifest_repo,
925         manifest=self._run.config.manifest,
926         build_names=self._run.GetBuilderIds(),
927         incr_type=self.VersionIncrementType(),
928         force=self._force,
929         branch=self._run.manifest_branch,
930         dry_run=dry_run,
931         master=self._run.config.master))
932
933   def GetNextManifest(self):
934     """Uses the initialized manifest manager to get the next manifest."""
935     assert self.manifest_manager, \
936         'Must run GetStageManager before checkout out build.'
937
938     to_return = self.manifest_manager.GetNextBuildSpec(
939         dashboard_url=self.ConstructDashboardURL())
940     previous_version = self.manifest_manager.GetLatestPassingSpec()
941     target_version = self.manifest_manager.current_version
942
943     # Print the Blamelist here.
944     url_prefix = 'http://chromeos-images.corp.google.com/diff/report?'
945     url = url_prefix + 'from=%s&to=%s' % (previous_version, target_version)
946     cros_build_lib.PrintBuildbotLink('Blamelist', url)
947     # The testManifestVersionedSyncOnePartBranch interacts badly with this
948     # function.  It doesn't fully initialize self.manifest_manager which
949     # causes target_version to be None.  Since there isn't a clean fix in
950     # either direction, just throw this through str().  In the normal case,
951     # it's already a string anyways.
952     cros_build_lib.PrintBuildbotStepText(str(target_version))
953
954     return to_return
955
956   @contextlib.contextmanager
957   def LocalizeManifest(self, manifest, filter_cros=False):
958     """Remove restricted checkouts from the manifest if needed.
959
960     Args:
961       manifest: The manifest to localize.
962       filter_cros: If set, then only checkouts with a remote of 'cros' or
963         'cros-internal' are kept, and the rest are filtered out.
964     """
965     if filter_cros:
966       with osutils.TempDir() as tempdir:
967         filtered_manifest = os.path.join(tempdir, 'filtered.xml')
968         doc = ElementTree.parse(manifest)
969         root = doc.getroot()
970         for node in root.findall('project'):
971           remote = node.attrib.get('remote')
972           if remote and remote not in constants.GIT_REMOTES:
973             root.remove(node)
974         doc.write(filtered_manifest)
975         yield filtered_manifest
976     else:
977       yield manifest
978
979   def PerformStage(self):
980     self.Initialize()
981     if self._run.options.force_version:
982       next_manifest = self.ForceVersion(self._run.options.force_version)
983     else:
984       next_manifest = self.GetNextManifest()
985
986     if not next_manifest:
987       cros_build_lib.Info('Found no work to do.')
988       if self._run.attrs.manifest_manager.DidLastBuildFail():
989         raise results_lib.StepFailure('The previous build failed.')
990       else:
991         sys.exit(0)
992
993     # Log this early on for the release team to grep out before we finish.
994     if self.manifest_manager:
995       self._Print('\nRELEASETAG: %s\n' % (
996           self.manifest_manager.current_version))
997
998     # To keep local trybots working, remove restricted checkouts from the
999     # official manifest we get from manifest-versions.
1000     with self.LocalizeManifest(
1001         next_manifest, filter_cros=self._run.options.local) as new_manifest:
1002       self.ManifestCheckout(new_manifest)
1003
1004
1005 class MasterSlaveSyncStage(ManifestVersionedSyncStage):
1006   """Stage that generates a unique manifest file candidate, and sync's to it."""
1007
1008   # TODO(mtennant): Turn this into self._run.attrs.sub_manager or similar.
1009   # An instance of lkgm_manager.LKGMManager for slave builds.
1010   sub_manager = None
1011
1012   def __init__(self, builder_run, **kwargs):
1013     super(MasterSlaveSyncStage, self).__init__(builder_run, **kwargs)
1014     # lkgm_manager deals with making sure we're synced to whatever manifest
1015     # we get back in GetNextManifest so syncing again is redundant.
1016     self.skip_sync = True
1017
1018   def _GetInitializedManager(self, internal):
1019     """Returns an initialized lkgm manager.
1020
1021     Args:
1022       internal: Boolean.  True if this is using an internal manifest.
1023
1024     Returns:
1025       lkgm_manager.LKGMManager.
1026     """
1027     increment = self.VersionIncrementType()
1028     return lkgm_manager.LKGMManager(
1029         source_repo=self.repo,
1030         manifest_repo=cbuildbot_config.GetManifestVersionsRepoUrl(
1031             internal, read_only=False),
1032         manifest=self._run.config.manifest,
1033         build_names=self._run.GetBuilderIds(),
1034         build_type=self._run.config.build_type,
1035         incr_type=increment,
1036         force=self._force,
1037         branch=self._run.manifest_branch,
1038         dry_run=self._run.options.debug,
1039         master=self._run.config.master)
1040
1041   def Initialize(self):
1042     """Override: Creates an LKGMManager rather than a ManifestManager."""
1043     self._InitializeRepo()
1044     self.RegisterManifestManager(self._GetInitializedManager(self.internal))
1045     if (self._run.config.master and self._GetSlaveConfigs()):
1046       assert self.internal, 'Unified masters must use an internal checkout.'
1047       MasterSlaveSyncStage.sub_manager = self._GetInitializedManager(False)
1048
1049   def ForceVersion(self, version):
1050     manifest = super(MasterSlaveSyncStage, self).ForceVersion(version)
1051     if MasterSlaveSyncStage.sub_manager:
1052       MasterSlaveSyncStage.sub_manager.BootstrapFromVersion(version)
1053
1054     return manifest
1055
1056   def GetNextManifest(self):
1057     """Gets the next manifest using LKGM logic."""
1058     assert self.manifest_manager, \
1059         'Must run Initialize before we can get a manifest.'
1060     assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
1061         'Manifest manager instantiated with wrong class.'
1062
1063     if self._run.config.master:
1064       manifest = self.manifest_manager.CreateNewCandidate()
1065       if MasterSlaveSyncStage.sub_manager:
1066         MasterSlaveSyncStage.sub_manager.CreateFromManifest(
1067             manifest, dashboard_url=self.ConstructDashboardURL())
1068       return manifest
1069     else:
1070       return self.manifest_manager.GetLatestCandidate(
1071           dashboard_url=self.ConstructDashboardURL())
1072
1073
1074 class CommitQueueSyncStage(MasterSlaveSyncStage):
1075   """Commit Queue Sync stage that handles syncing and applying patches.
1076
1077   This stage handles syncing to a manifest, passing around that manifest to
1078   other builders and finding the Gerrit Reviews ready to be committed and
1079   applying them into its own checkout.
1080   """
1081
1082   def __init__(self, builder_run, **kwargs):
1083     super(CommitQueueSyncStage, self).__init__(builder_run, **kwargs)
1084     # Figure out the builder's name from the buildbot waterfall.
1085     builder_name = self._run.config.paladin_builder_name
1086     self.builder_name = builder_name if builder_name else self._run.config.name
1087
1088     # The pool of patches to be picked up by the commit queue.
1089     # - For the master commit queue, it's initialized in GetNextManifest.
1090     # - For slave commit queues, it's initialized in _SetPoolFromManifest.
1091     #
1092     # In all cases, the pool is saved to disk, and refreshed after bootstrapping
1093     # by HandleSkip.
1094     self.pool = None
1095
1096   def HandleSkip(self):
1097     """Handles skip and initializes validation pool from manifest."""
1098     super(CommitQueueSyncStage, self).HandleSkip()
1099     filename = self._run.options.validation_pool
1100     if filename:
1101       self.pool = validation_pool.ValidationPool.Load(filename,
1102           metadata=self._run.attrs.metadata)
1103     else:
1104       self._SetPoolFromManifest(self.manifest_manager.GetLocalManifest())
1105
1106   def _ChangeFilter(self, pool, changes, non_manifest_changes):
1107     # First, look for changes that were tested by the Pre-CQ.
1108     changes_to_test = []
1109     for change in changes:
1110       status = pool.GetCLStatus(PRE_CQ, change)
1111       if status == manifest_version.BuilderStatus.STATUS_PASSED:
1112         changes_to_test.append(change)
1113
1114     # If we only see changes that weren't verified by Pre-CQ, try all of the
1115     # changes. This ensures that the CQ continues to work even if the Pre-CQ is
1116     # down.
1117     if not changes_to_test:
1118       changes_to_test = changes
1119
1120     return changes_to_test, non_manifest_changes
1121
1122   def _SetPoolFromManifest(self, manifest):
1123     """Sets validation pool based on manifest path passed in."""
1124     # Note that GetNextManifest() calls GetLatestCandidate() in this case,
1125     # so the repo will already be sync'd appropriately. This means that
1126     # AcquirePoolFromManifest does not need to sync.
1127     self.pool = validation_pool.ValidationPool.AcquirePoolFromManifest(
1128         manifest, self._run.config.overlays, self.repo,
1129         self._run.buildnumber, self.builder_name,
1130         self._run.config.master, self._run.options.debug,
1131         metadata=self._run.attrs.metadata)
1132
1133   def GetNextManifest(self):
1134     """Gets the next manifest using LKGM logic."""
1135     assert self.manifest_manager, \
1136         'Must run Initialize before we can get a manifest.'
1137     assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
1138         'Manifest manager instantiated with wrong class.'
1139
1140     if self._run.config.master:
1141       try:
1142         # In order to acquire a pool, we need an initialized buildroot.
1143         if not git.FindRepoDir(self.repo.directory):
1144           self.repo.Initialize()
1145         self.pool = pool = validation_pool.ValidationPool.AcquirePool(
1146             self._run.config.overlays, self.repo,
1147             self._run.buildnumber, self.builder_name,
1148             self._run.options.debug,
1149             check_tree_open=not self._run.options.debug or
1150                             self._run.options.mock_tree_status,
1151             changes_query=self._run.options.cq_gerrit_override,
1152             change_filter=self._ChangeFilter, throttled_ok=True,
1153             metadata=self._run.attrs.metadata)
1154
1155       except validation_pool.TreeIsClosedException as e:
1156         cros_build_lib.Warning(str(e))
1157         return None
1158
1159       manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool)
1160       if MasterSlaveSyncStage.sub_manager:
1161         MasterSlaveSyncStage.sub_manager.CreateFromManifest(
1162             manifest, dashboard_url=self.ConstructDashboardURL())
1163
1164       return manifest
1165     else:
1166       manifest = self.manifest_manager.GetLatestCandidate(
1167           dashboard_url=self.ConstructDashboardURL())
1168       if manifest:
1169         if self._run.config.build_before_patching:
1170           pre_build_passed = self._RunPrePatchBuild()
1171           cros_build_lib.PrintBuildbotStepName(
1172               'CommitQueueSync : Apply Patches')
1173           if not pre_build_passed:
1174             cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
1175
1176         self._SetPoolFromManifest(manifest)
1177         self.pool.ApplyPoolIntoRepo()
1178
1179       return manifest
1180
1181   def _RunPrePatchBuild(self):
1182     """Run through a pre-patch build to prepare for incremental build.
1183
1184     This function runs though the InitSDKStage, SetupBoardStage, and
1185     BuildPackagesStage. It is intended to be called before applying
1186     any patches under test, to prepare the chroot and sysroot in a state
1187     corresponding to ToT prior to an incremental build.
1188
1189     Returns:
1190       True if all stages were successful, False if any of them failed.
1191     """
1192     suffix = ' (pre-Patch)'
1193     try:
1194       InitSDKStage(self._run, chroot_replace=True, suffix=suffix).Run()
1195       for builder_run in self._run.GetUngroupedBuilderRuns():
1196         for board in builder_run.config.boards:
1197           SetupBoardStage(builder_run, board=board, suffix=suffix).Run()
1198           BuildPackagesStage(builder_run, board=board, suffix=suffix).Run()
1199     except results_lib.StepFailure:
1200       return False
1201
1202     return True
1203
1204   def PerformStage(self):
1205     """Performs normal stage and prints blamelist at end."""
1206     if self._run.options.force_version:
1207       self.HandleSkip()
1208     else:
1209       ManifestVersionedSyncStage.PerformStage(self)
1210
1211
1212 class ManifestVersionedSyncCompletionStage(ForgivingBuilderStage):
1213   """Stage that records board specific results for a unique manifest file."""
1214
1215   option_name = 'sync'
1216
1217   def __init__(self, builder_run, sync_stage, success, **kwargs):
1218     super(ManifestVersionedSyncCompletionStage, self).__init__(
1219         builder_run, **kwargs)
1220     self.sync_stage = sync_stage
1221     self.success = success
1222     # Message that can be set that well be sent along with the status in
1223     # UpdateStatus.
1224     self.message = None
1225
1226   def PerformStage(self):
1227     self._run.attrs.manifest_manager.UpdateStatus(
1228         success=self.success, message=self.message,
1229         dashboard_url=self.ConstructDashboardURL())
1230
1231
1232 class ImportantBuilderFailedException(results_lib.StepFailure):
1233   """Exception thrown when an important build fails to build."""
1234
1235
1236 class MasterSlaveSyncCompletionStage(ManifestVersionedSyncCompletionStage):
1237   """Stage that records whether we passed or failed to build/test manifest."""
1238
1239   def __init__(self, *args, **kwargs):
1240     super(MasterSlaveSyncCompletionStage, self).__init__(*args, **kwargs)
1241     self._slave_statuses = {}
1242
1243   def _FetchSlaveStatuses(self):
1244     """Fetch and return build status for slaves of this build.
1245
1246     If this build is not a master then return just the status of this build.
1247
1248     Returns:
1249       A dict with "bot id" keys and BuilderStatus objects for values.  All keys
1250       will have valid BuilderStatus values, but builders that never started
1251       will have a BuilderStatus with status MISSING.
1252     """
1253     if not self._run.config.master:
1254       # This is a slave build, so return the status for this build.
1255       if self._run.options.debug:
1256         # In debug mode, nothing is uploaded to Google Storage, so we bypass
1257         # the extra hop and just look at what we have locally.
1258         status = manifest_version.BuilderStatus.GetCompletedStatus(self.success)
1259         status_obj = manifest_version.BuilderStatus(status, self.message)
1260         return {self._bot_id: status_obj}
1261       else:
1262         # Slaves only need to look at their own status.
1263         return self._run.attrs.manifest_manager.GetBuildersStatus(
1264             [self._bot_id])
1265     else:
1266       # This is a master build, so return the statuses for all its slaves.
1267
1268       # Wait for slaves to finish, unless this is a debug run.
1269       wait_for_results = not self._run.options.debug
1270
1271       builders = self._GetSlaveConfigs()
1272       builder_names = [b['name'] for b in builders]
1273
1274       manager = self._run.attrs.manifest_manager
1275       if MasterSlaveSyncStage.sub_manager:
1276         manager = MasterSlaveSyncStage.sub_manager
1277
1278       return manager.GetBuildersStatus(builder_names, wait_for_results)
1279
1280   def _AbortCQHWTests(self):
1281     """Abort any HWTests started by the CQ."""
1282     if (cbuildbot_config.IsCQType(self._run.config.build_type) and
1283         self._run.manifest_branch == 'master'):
1284       version = self._run.GetVersion()
1285       if not commands.HaveCQHWTestsBeenAborted(version):
1286         commands.AbortCQHWTests(version, self._run.options.debug)
1287
1288   def _HandleStageException(self, exc_info):
1289     """Decide whether an exception should be treated as fatal."""
1290     # Besides the master, the completion stages also run on slaves, to report
1291     # their status back to the master. If the build failed, they throw an
1292     # exception here. For slave builders, marking this stage 'red' would be
1293     # redundant, since the build itself would already be red. In this case,
1294     # report a warning instead.
1295     # pylint: disable=W0212
1296     exc_type = exc_info[0]
1297     if (issubclass(exc_type, ImportantBuilderFailedException) and
1298         not self._run.config.master):
1299       return self._HandleExceptionAsWarning(exc_info)
1300     else:
1301       # In all other cases, exceptions should be treated as fatal. To
1302       # implement this, we bypass ForgivingStage and call
1303       # bs.BuilderStage._HandleStageException explicitly.
1304       return bs.BuilderStage._HandleStageException(self, exc_info)
1305
1306   def HandleSuccess(self):
1307     """Handle a successful build.
1308
1309     This function is called whenever the cbuildbot run is successful.
1310     For the master, this will only be called when all slave builders
1311     are also successful. This function may be overridden by subclasses.
1312     """
1313     # We only promote for the pfq, not chrome pfq.
1314     # TODO(build): Run this logic in debug mode too.
1315     if (not self._run.options.debug and
1316         cbuildbot_config.IsPFQType(self._run.config.build_type) and
1317         self._run.config.master and
1318         self._run.manifest_branch == 'master' and
1319         self._run.config.build_type != constants.CHROME_PFQ_TYPE):
1320       self._run.attrs.manifest_manager.PromoteCandidate()
1321       if MasterSlaveSyncStage.sub_manager:
1322         MasterSlaveSyncStage.sub_manager.PromoteCandidate()
1323
1324   def HandleFailure(self, failing, inflight):
1325     """Handle a build failure.
1326
1327     This function is called whenever the cbuildbot run fails.
1328     For the master, this will be called when any slave fails or times
1329     out. This function may be overridden by subclasses.
1330
1331     Args:
1332       failing: The names of the failing builders.
1333       inflight: The names of the builders that are still running.
1334     """
1335     if failing:
1336       self.HandleValidationFailure(failing)
1337     elif inflight:
1338       self.HandleValidationTimeout(inflight)
1339
1340   def HandleValidationFailure(self, failing):
1341     cros_build_lib.PrintBuildbotStepWarnings()
1342     cros_build_lib.Warning('\n'.join([
1343         'The following builders failed with this manifest:',
1344         ', '.join(sorted(failing)),
1345         'Please check the logs of the failing builders for details.']))
1346
1347   def HandleValidationTimeout(self, inflight_statuses):
1348     cros_build_lib.PrintBuildbotStepWarnings()
1349     cros_build_lib.Warning('\n'.join([
1350         'The following builders took too long to finish:',
1351         ', '.join(sorted(inflight_statuses)),
1352         'Please check the logs of these builders for details.']))
1353
1354   def PerformStage(self):
1355     # Upload our pass/fail status to Google Storage.
1356     self._run.attrs.manifest_manager.UploadStatus(
1357         success=self.success, message=self.message,
1358         dashboard_url=self.ConstructDashboardURL())
1359
1360     statuses = self._FetchSlaveStatuses()
1361     self._slave_statuses = statuses
1362     no_stat = set(builder for builder, status in statuses.iteritems()
1363                   if status.Missing())
1364     failing = set(builder for builder, status in statuses.iteritems()
1365                   if status.Failed())
1366     inflight = set(builder for builder, status in statuses.iteritems()
1367                    if status.Inflight())
1368
1369     # If all the failing or inflight builders were sanity checkers
1370     # then ignore the failure.
1371     fatal = self._IsFailureFatal(failing, inflight, no_stat)
1372
1373     if fatal:
1374       self._AnnotateFailingBuilders(failing, inflight, no_stat, statuses)
1375       self.HandleFailure(failing, inflight)
1376       raise ImportantBuilderFailedException()
1377     else:
1378       self.HandleSuccess()
1379
1380   def _IsFailureFatal(self, failing, inflight, no_stat):
1381     """Returns a boolean indicating whether the build should fail.
1382
1383     Args:
1384       failing: Set of builder names of slave builders that failed.
1385       inflight: Set of builder names of slave builders that are inflight
1386       no_stat: Set of builder names of slave builders that had status None.
1387
1388     Returns:
1389       True if any of the failing or inflight builders are not sanity check
1390       builders for this master, or if there were any non-sanity-check builders
1391       with status None.
1392     """
1393     sanity_builders = self._run.config.sanity_check_slaves or []
1394     sanity_builders = set(sanity_builders)
1395     return not sanity_builders.issuperset(failing | inflight | no_stat)
1396
1397   def _AnnotateFailingBuilders(self, failing, inflight, no_stat, statuses):
1398     """Add annotations that link to either failing or inflight builders.
1399
1400     Adds buildbot links to failing builder dashboards. If no builders are
1401     failing, adds links to inflight builders. Adds step text for builders
1402     with status None.
1403
1404     Args:
1405       failing: Set of builder names of slave builders that failed.
1406       inflight: Set of builder names of slave builders that are inflight.
1407       no_stat: Set of builder names of slave builders that had status None.
1408       statuses: A builder-name->status dictionary, which will provide
1409                 the dashboard_url values for any links.
1410     """
1411     builders_to_link = failing or inflight or []
1412     for builder in builders_to_link:
1413       if statuses[builder].dashboard_url:
1414         cros_build_lib.PrintBuildbotLink(builder,
1415                                          statuses[builder].dashboard_url)
1416     for builder in no_stat:
1417       cros_build_lib.PrintBuildbotStepText('%s did not start.' % builder)
1418
1419   def GetSlaveStatuses(self):
1420     """Returns cached slave status results.
1421
1422     Cached results are populated during PerformStage, so this function
1423     should only be called after PerformStage has returned.
1424
1425     Returns:
1426       A dictionary from build names to manifest_version.BuilderStatus
1427       builder status objects.
1428     """
1429     return self._slave_statuses
1430
1431
1432 class CommitQueueCompletionStage(MasterSlaveSyncCompletionStage):
1433   """Commits or reports errors to CL's that failed to be validated."""
1434
1435   def _HandleStageException(self, exc_info):
1436     """Decide whether an exception should be treated as fatal."""
1437     exc_type = exc_info[0]
1438     if isinstance(
1439         exc_type, validation_pool.FailedToSubmitAllChangesNonFatalException):
1440       return self._HandleExceptionAsWarning(exc_info)
1441     else:
1442       return super(CommitQueueCompletionStage, self)._HandleStageException(
1443           exc_info)
1444
1445   def HandleSuccess(self):
1446     if self._run.config.master:
1447       self.sync_stage.pool.SubmitPool()
1448       # After submitting the pool, update the commit hashes for uprevved
1449       # ebuilds.
1450       manifest = git.ManifestCheckout.Cached(self._build_root)
1451       portage_utilities.EBuild.UpdateCommitHashesForChanges(
1452           self.sync_stage.pool.changes, self._build_root, manifest)
1453       if cbuildbot_config.IsPFQType(self._run.config.build_type):
1454         super(CommitQueueCompletionStage, self).HandleSuccess()
1455
1456   def HandleFailure(self, failing, inflight):
1457     """Handle a build failure or timeout in the Commit Queue.
1458
1459     This function performs any tasks that need to happen when the Commit Queue
1460     fails:
1461       - Abort the HWTests if necessary.
1462       - Push any CLs that indicate that they don't care about this failure.
1463       - Reject the rest of the changes, but only if the sanity check builders
1464         did NOT fail.
1465
1466     See MasterSlaveSyncCompletionStage.HandleFailure.
1467
1468     Args:
1469       failing: Names of the builders that failed.
1470       inflight: Names of the builders that timed out.
1471     """
1472     # Print out the status about what builds failed or not.
1473     MasterSlaveSyncCompletionStage.HandleFailure(self, failing, inflight)
1474
1475     # Abort hardware tests to save time if we have already seen a failure,
1476     # except in the case where the only failure is a hardware test failure.
1477     #
1478     # When we're debugging hardware test failures, it's useful to see the
1479     # results on all platforms, to see if the failure is platform-specific.
1480     tracebacks = results_lib.Results.GetTracebacks()
1481     if not self.success and self._run.config['important']:
1482       if len(tracebacks) != 1 or tracebacks[0].failed_prefix != 'HWTest':
1483         self._AbortCQHWTests()
1484
1485     if self._run.config.master:
1486       # Even if there was a failure, we can submit the changes that indicate
1487       # that they don't care about this failure.
1488       messages = [self._slave_statuses[x].message for x in failing]
1489
1490       if failing and not inflight:
1491         tracebacks = set()
1492         for message in messages:
1493           # If there are no tracebacks, that means that the builder did not
1494           # report its status properly. Don't submit anything.
1495           if not message or not message.tracebacks:
1496             break
1497           tracebacks.update(message.tracebacks)
1498         else:
1499           rejected = self.sync_stage.pool.SubmitPartialPool(tracebacks)
1500           self.sync_stage.pool.changes = rejected
1501
1502       sanity = self._WasBuildSane(
1503           self._run.config.sanity_check_slaves, self._slave_statuses)
1504       if not sanity:
1505         logging.info('Detected that a sanity-check builder failed. Will not '
1506                      'reject patches.')
1507       if failing:
1508         self.sync_stage.pool.HandleValidationFailure(messages, sanity=sanity)
1509       elif inflight:
1510         self.sync_stage.pool.HandleValidationTimeout(sanity=sanity)
1511
1512   @staticmethod
1513   def _WasBuildSane(sanity_check_slaves, slave_statuses):
1514     """Determines whether any of the sanity check slaves failed.
1515
1516     Args:
1517       sanity_check_slaves: Names of slave builders that are "sanity check"
1518         builders for the current master.
1519       slave_statuses: Dict of BuilderStatus objects by builder name keys.
1520
1521     Returns:
1522       True if no sanity builders ran and failed.
1523     """
1524     sanity_check_slaves = sanity_check_slaves or []
1525     return not any([x in slave_statuses and slave_statuses[x].Failed()
1526                     for x in sanity_check_slaves])
1527
1528   def PerformStage(self):
1529     # - If the build failed, and the builder was important, fetch a message
1530     # listing the patches which failed to be validated. This message is sent
1531     # along with the failed status to the master to indicate a failure.
1532     # - This is skipped when sync_stage did not apply a validation pool. For
1533     # instance on builders with do_not_apply_cq_patches=True, sync_stage will
1534     # be a MasterSlaveSyncStage and not have a |pool| attribute.
1535     if (not self.success and self._run.config.important
1536         and hasattr(self.sync_stage, 'pool')):
1537       self.message = self.sync_stage.pool.GetValidationFailedMessage()
1538
1539     super(CommitQueueCompletionStage, self).PerformStage()
1540
1541     self._run.attrs.manifest_manager.UpdateStatus(
1542         success=self.success, message=self.message,
1543         dashboard_url=self.ConstructDashboardURL())
1544
1545
1546 class PreCQSyncStage(SyncStage):
1547   """Sync and apply patches to test if they compile."""
1548
1549   def __init__(self, builder_run, patches, **kwargs):
1550     super(PreCQSyncStage, self).__init__(builder_run, **kwargs)
1551
1552     # The list of patches to test.
1553     self.patches = patches
1554
1555     # The ValidationPool of patches to test. Initialized in PerformStage, and
1556     # refreshed after bootstrapping by HandleSkip.
1557     self.pool = None
1558
1559   def HandleSkip(self):
1560     """Handles skip and loads validation pool from disk."""
1561     super(PreCQSyncStage, self).HandleSkip()
1562     filename = self._run.options.validation_pool
1563     if filename:
1564       self.pool = validation_pool.ValidationPool.Load(filename,
1565           metadata=self._run.attrs.metadata)
1566
1567   def PerformStage(self):
1568     super(PreCQSyncStage, self).PerformStage()
1569     self.pool = validation_pool.ValidationPool.AcquirePreCQPool(
1570         self._run.config.overlays, self._build_root,
1571         self._run.buildnumber, self._run.config.name,
1572         dryrun=self._run.options.debug_forced, changes=self.patches,
1573         metadata=self._run.attrs.metadata)
1574     self.pool.ApplyPoolIntoRepo()
1575
1576
1577 class PreCQCompletionStage(bs.BuilderStage):
1578   """Reports the status of a trybot run to Google Storage and Gerrit."""
1579
1580   def __init__(self, builder_run, sync_stage, success, **kwargs):
1581     super(PreCQCompletionStage, self).__init__(builder_run, **kwargs)
1582     self.sync_stage = sync_stage
1583     self.success = success
1584
1585   def PerformStage(self):
1586     # Update Gerrit and Google Storage with the Pre-CQ status.
1587     if self.success:
1588       self.sync_stage.pool.HandlePreCQSuccess()
1589     else:
1590       message = self.sync_stage.pool.GetValidationFailedMessage()
1591       self.sync_stage.pool.HandleValidationFailure([message])
1592
1593
1594 class PreCQLauncherStage(SyncStage):
1595   """Scans for CLs and automatically launches Pre-CQ jobs to test them."""
1596
1597   STATUS_INFLIGHT = validation_pool.ValidationPool.STATUS_INFLIGHT
1598   STATUS_PASSED = validation_pool.ValidationPool.STATUS_PASSED
1599   STATUS_FAILED = validation_pool.ValidationPool.STATUS_FAILED
1600   STATUS_LAUNCHING = validation_pool.ValidationPool.STATUS_LAUNCHING
1601   STATUS_WAITING = validation_pool.ValidationPool.STATUS_WAITING
1602
1603   # The number of minutes we allow before considering a launch attempt failed.
1604   # If this window isn't hit in a given launcher run, the window will start
1605   # again from scratch in the next run.
1606   LAUNCH_DELAY = 30
1607
1608   # The number of minutes we allow before considering an in-flight
1609   # job failed. If this window isn't hit in a given launcher run, the window
1610   # will start again from scratch in the next run.
1611   INFLIGHT_DELAY = 90
1612
1613   # The maximum number of patches we will allow in a given trybot run. This is
1614   # needed because our trybot infrastructure can only handle so many patches at
1615   # once.
1616   MAX_PATCHES_PER_TRYBOT_RUN = 50
1617
1618   def __init__(self, builder_run, **kwargs):
1619     super(PreCQLauncherStage, self).__init__(builder_run, **kwargs)
1620     self.skip_sync = True
1621     # Mapping from launching changes to the first known time when they
1622     # were launching.
1623     self.launching = {}
1624     # Mapping from inflight changes to the first known time when they
1625     # were inflight.
1626     self.inflight = {}
1627     self.retried = set()
1628
1629   def _HasLaunchTimedOut(self, change):
1630     """Check whether a given |change| has timed out on its trybot launch.
1631
1632     Assumes that the change is in the middle of being launched.
1633
1634     Returns:
1635       True if the change has timed out. False otherwise.
1636     """
1637     diff = datetime.timedelta(minutes=self.LAUNCH_DELAY)
1638     return datetime.datetime.now() - self.launching[change] > diff
1639
1640   def _HasInflightTimedOut(self, change):
1641     """Check whether a given |change| has timed out while trybot inflight.
1642
1643     Assumes that the change's trybot is inflight.
1644
1645     Returns:
1646       True if the change has timed out. False otherwise.
1647     """
1648     diff = datetime.timedelta(minutes=self.INFLIGHT_DELAY)
1649     return datetime.datetime.now() - self.inflight[change] > diff
1650
1651   def GetPreCQStatus(self, pool, changes):
1652     """Get the Pre-CQ status of a list of changes.
1653
1654     Side effect: reject or retry changes that have timed out.
1655
1656     Args:
1657       pool: The validation pool.
1658       changes: Changes to examine.
1659
1660     Returns:
1661       busy: The set of CLs that are currently being tested.
1662       passed: The set of CLs that have been verified.
1663     """
1664     busy, passed = set(), set()
1665
1666     for change in changes:
1667       status = pool.GetCLStatus(PRE_CQ, change)
1668
1669       if status != self.STATUS_LAUNCHING:
1670         # The trybot is not launching, so we should remove it from our
1671         # launching timeout map.
1672         self.launching.pop(change, None)
1673
1674       if status != self.STATUS_INFLIGHT:
1675         # The trybot is not inflight, so we should remove it from our
1676         # inflight timeout map.
1677         self.inflight.pop(change, None)
1678
1679       if status == self.STATUS_LAUNCHING:
1680         # The trybot is in the process of launching.
1681         busy.add(change)
1682         if change not in self.launching:
1683           # Record the launch time of changes.
1684           self.launching[change] = datetime.datetime.now()
1685         elif self._HasLaunchTimedOut(change):
1686           if change in self.retried:
1687             msg = ('We were not able to launch a pre-cq trybot for your change.'
1688                    '\n\n'
1689                    'This problem can happen if the trybot waterfall is very '
1690                    'busy, or if there is an infrastructure issue. Please '
1691                    'notify the sheriff and mark your change as ready again. If '
1692                    'this problem occurs multiple times in a row, please file a '
1693                    'bug.')
1694
1695             pool.SendNotification(change, '%(details)s', details=msg)
1696             pool.RemoveCommitReady(change)
1697             pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
1698                                 self._run.options.debug)
1699             self.retried.discard(change)
1700           else:
1701             # Try the change again.
1702             self.retried.add(change)
1703             pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
1704                                 self._run.options.debug)
1705       elif status == self.STATUS_INFLIGHT:
1706         # Once a Pre-CQ run actually starts, it'll set the status to
1707         # STATUS_INFLIGHT.
1708         busy.add(change)
1709         if change not in self.inflight:
1710           # Record the inflight start time.
1711           self.inflight[change] = datetime.datetime.now()
1712         elif self._HasInflightTimedOut(change):
1713           msg = ('The pre-cq trybot for your change timed out after %s minutes.'
1714                  '\n\n'
1715                  'This problem can happen if your change causes the builder '
1716                  'to hang, or if there is some infrastructure issue. If your '
1717                  'change is not at fault you may mark your change as ready '
1718                  'again. If this problem occurs multiple times please notify '
1719                  'the sheriff and file a bug.' % self.INFLIGHT_DELAY)
1720
1721           pool.SendNotification(change, '%(details)s', details=msg)
1722           pool.RemoveCommitReady(change)
1723           pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_FAILED,
1724                               self._run.options.debug)
1725       elif status == self.STATUS_FAILED:
1726         # The Pre-CQ run failed for this change. It's possible that we got
1727         # unlucky and this change was just marked as 'Not Ready' by a bot. To
1728         # test this, mark the CL as 'waiting' for now. If the CL is still marked
1729         # as 'Ready' next time we check, we'll know the CL is truly still ready.
1730         busy.add(change)
1731         pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
1732                             self._run.options.debug)
1733       elif status == self.STATUS_PASSED:
1734         passed.add(change)
1735
1736     return busy, passed
1737
1738   def LaunchTrybot(self, pool, plan):
1739     """Launch a Pre-CQ run with the provided list of CLs.
1740
1741     Args:
1742       pool: ValidationPool corresponding to |plan|.
1743       plan: The list of patches to test in the Pre-CQ run.
1744     """
1745     cmd = ['cbuildbot', '--remote', constants.PRE_CQ_BUILDER_NAME]
1746     if self._run.options.debug:
1747       cmd.append('--debug')
1748     for patch in plan:
1749       cmd += ['-g', cros_patch.AddPrefix(patch, patch.gerrit_number)]
1750     cros_build_lib.RunCommand(cmd, cwd=self._build_root)
1751     for patch in plan:
1752       if pool.GetCLStatus(PRE_CQ, patch) != self.STATUS_PASSED:
1753         pool.UpdateCLStatus(PRE_CQ, patch, self.STATUS_LAUNCHING,
1754                             self._run.options.debug)
1755
1756   def GetDisjointTransactionsToTest(self, pool, changes):
1757     """Get the list of disjoint transactions to test.
1758
1759     Side effect: reject or retry changes that have timed out.
1760
1761     Returns:
1762       A list of disjoint transactions to test. Each transaction should be sent
1763       to a different Pre-CQ trybot.
1764     """
1765     busy, passed = self.GetPreCQStatus(pool, changes)
1766
1767     # Create a list of disjoint transactions to test.
1768     manifest = git.ManifestCheckout.Cached(self._build_root)
1769     plans = pool.CreateDisjointTransactions(
1770         manifest, max_txn_length=self.MAX_PATCHES_PER_TRYBOT_RUN)
1771     for plan in plans:
1772       # If any of the CLs in the plan are currently "busy" being tested,
1773       # wait until they're done before launching our trybot run. This helps
1774       # avoid race conditions.
1775       #
1776       # Similarly, if all of the CLs in the plan have already been validated,
1777       # there's no need to launch a trybot run.
1778       plan = set(plan)
1779       if plan.issubset(passed):
1780         logging.info('CLs already verified: %r', ' '.join(map(str, plan)))
1781       elif plan.intersection(busy):
1782         logging.info('CLs currently being verified: %r',
1783                      ' '.join(map(str, plan.intersection(busy))))
1784         if plan.difference(busy):
1785           logging.info('CLs waiting on verification of dependencies: %r',
1786               ' '.join(map(str, plan.difference(busy))))
1787       else:
1788         yield plan
1789
1790   def ProcessChanges(self, pool, changes, _non_manifest_changes):
1791     """Process a list of changes that were marked as Ready.
1792
1793     From our list of changes that were marked as Ready, we create a
1794     list of disjoint transactions and send each one to a separate Pre-CQ
1795     trybot.
1796
1797     Non-manifest changes are just submitted here because they don't need to be
1798     verified by either the Pre-CQ or CQ.
1799     """
1800     # Submit non-manifest changes if we can.
1801     if timeout_util.IsTreeOpen(
1802         validation_pool.ValidationPool.STATUS_URL):
1803       pool.SubmitNonManifestChanges(check_tree_open=False)
1804
1805     # Launch trybots for manifest changes.
1806     for plan in self.GetDisjointTransactionsToTest(pool, changes):
1807       self.LaunchTrybot(pool, plan)
1808
1809     # Tell ValidationPool to keep waiting for more changes until we hit
1810     # its internal timeout.
1811     return [], []
1812
1813   def PerformStage(self):
1814     # Setup and initialize the repo.
1815     super(PreCQLauncherStage, self).PerformStage()
1816
1817     # Loop through all of the changes until we hit a timeout.
1818     validation_pool.ValidationPool.AcquirePool(
1819         self._run.config.overlays, self.repo,
1820         self._run.buildnumber,
1821         constants.PRE_CQ_LAUNCHER_NAME,
1822         dryrun=self._run.options.debug,
1823         changes_query=self._run.options.cq_gerrit_override,
1824         check_tree_open=False, change_filter=self.ProcessChanges,
1825         metadata=self._run.attrs.metadata)
1826
1827
1828 class BranchError(Exception):
1829   """Raised by branch creation code on error."""
1830
1831
1832 class BranchUtilStage(bs.BuilderStage):
1833   """Creates, deletes and renames branches, depending on cbuildbot options.
1834
1835   The two main types of branches are release branches and non-release
1836   branches.  Release branches have the form 'release-*' - e.g.,
1837   'release-R29-4319.B'.
1838
1839   On a very basic level, a branch is created by parsing the manifest of a
1840   specific version of Chrome OS (e.g., 4319.0.0), and creating the branch
1841   remotely for each checkout in the manifest at the specified hash.
1842
1843   Once a branch is created however, the branch component of the version on the
1844   newly created branch needs to be incremented.  Additionally, in some cases
1845   the Chrome major version (i.e, R29) and/or the Chrome OS version (i.e.,
1846   4319.0.0) of the source branch must be incremented
1847   (see _IncrementVersionOnDiskForSourceBranch docstring).  Finally, the external
1848   and internal manifests of the new branch need to be fixed up (see
1849   FixUpManifests docstring).
1850   """
1851
1852   COMMIT_MESSAGE = 'Bump %(target)s after branching %(branch)s'
1853
1854   def __init__(self, builder_run, **kwargs):
1855     super(BranchUtilStage, self).__init__(builder_run, **kwargs)
1856     self.dryrun = self._run.options.debug_forced
1857     self.branch_name = self._run.options.branch_name
1858     self.rename_to = self._run.options.rename_to
1859
1860   def _RunPush(self, checkout, src_ref, dest_ref, force=False):
1861     """Perform a git push for a checkout.
1862
1863     Args:
1864       checkout: A dictionary of checkout manifest attributes.
1865       src_ref: The source local ref to push to the remote.
1866       dest_ref: The local remote ref that correspond to destination ref name.
1867       force: Whether to override non-fastforward checks.
1868     """
1869     # Convert local tracking ref to refs/heads/* on a remote:
1870     # refs/remotes/<remote name>/<branch> to refs/heads/<branch>.
1871     # If dest_ref is already refs/heads/<branch> it's a noop.
1872     dest_ref = git.NormalizeRef(git.StripRefs(dest_ref))
1873     push_to = git.RemoteRef(checkout['push_remote'], dest_ref)
1874     git.GitPush(checkout['local_path'], src_ref, push_to, dryrun=self.dryrun,
1875                 force=force)
1876
1877   def _FetchAndCheckoutTo(self, checkout_dir, remote_ref):
1878     """Fetch a remote ref and check out to it.
1879
1880     Args:
1881       checkout_dir: Path to git repo to operate on.
1882       remote_ref: A git.RemoteRef object.
1883     """
1884     git.RunGit(checkout_dir, ['fetch', remote_ref.remote, remote_ref.ref],
1885                print_cmd=True)
1886     git.RunGit(checkout_dir, ['checkout', 'FETCH_HEAD'], print_cmd=True)
1887
1888   def _GetBranchSuffix(self, manifest, checkout):
1889     """Return the branch suffix for the given checkout.
1890
1891     If a given project is checked out to multiple locations, it is necessary
1892     to append a branch suffix. To be safe, we append branch suffixes for all
1893     repositories that use a non-standard branch name (e.g., if our default
1894     revision is "master", then any repository which does not use "master"
1895     has a non-standard branch name.)
1896
1897     Args:
1898       manifest: The associated ManifestCheckout.
1899       checkout: The associated ProjectCheckout.
1900     """
1901     # Get the default and tracking branch.
1902     suffix = ''
1903     if len(manifest.FindCheckouts(checkout['name'])) > 1:
1904       default_branch = git.StripRefs(manifest.default['revision'])
1905       tracking_branch = git.StripRefs(checkout['tracking_branch'])
1906       suffix = '-%s' % (tracking_branch,)
1907       if default_branch != 'master':
1908         suffix = re.sub('^-%s-' % re.escape(default_branch), '-', suffix)
1909     return suffix
1910
1911   def _GetSHA1(self, checkout, branch):
1912     """Get the SHA1 for the specified |branch| in the specified |checkout|.
1913
1914     Args:
1915       checkout: The ProjectCheckout to look in.
1916       branch: Remote branch to look for.
1917
1918     Returns:
1919       If the branch exists, returns the SHA1 of the branch. Otherwise, returns
1920       the empty string.  If branch is None, return None.
1921     """
1922     if branch:
1923       cmd = ['show-ref', branch]
1924       result = git.RunGit(checkout['local_path'], cmd, error_code_ok=True)
1925       if result.returncode == 0:
1926         # Output looks like:
1927         # a00733b...30ee40e0c2c1 refs/remotes/cros/test-4980.B
1928         return result.output.strip().split()[0]
1929
1930       return ''
1931
1932   def _CopyBranch(self, src_checkout, src_branch, dst_branch, force=False):
1933     """Copy the given |src_branch| to |dst_branch|.
1934
1935     Args:
1936       src_checkout: The ProjectCheckout to work in.
1937       src_branch: The remote branch ref to copy from.
1938       dst_branch: The remote branch ref to copy to.
1939       force: If True then execute the copy even if dst_branch exists.
1940     """
1941     cros_build_lib.Info('Creating new branch "%s" for %s.',
1942                         dst_branch, src_checkout['name'])
1943     self._RunPush(src_checkout, src_ref=src_branch, dest_ref=dst_branch,
1944                   force=force)
1945
1946   def _DeleteBranch(self, src_checkout, branch):
1947     """Delete the given |branch| in the given |src_checkout|.
1948
1949     Args:
1950       src_checkout: The ProjectCheckout to work in.
1951       branch: The branch ref to delete.  Must be a remote branch.
1952     """
1953     cros_build_lib.Info('Deleting branch "%s" for %s.',
1954                         branch, src_checkout['name'])
1955     self._RunPush(src_checkout, src_ref='', dest_ref=branch)
1956
1957   def _ProcessCheckout(self, src_manifest, src_checkout):
1958     """Performs per-checkout push operations.
1959
1960     Args:
1961       src_manifest: The ManifestCheckout object for the current manifest.
1962       src_checkout: The ProjectCheckout object to process.
1963     """
1964     if not src_checkout.IsBranchableProject():
1965       # We don't have the ability to push branches to this repository. Just
1966       # use TOT instead.
1967       return
1968
1969     checkout_name = src_checkout['name']
1970     remote = src_checkout['push_remote']
1971     src_ref = src_checkout['revision']
1972     suffix = self._GetBranchSuffix(src_manifest, src_checkout)
1973
1974     # The source/destination branches depend on options.
1975     if self.rename_to:
1976       # Rename flow.  Both src and dst branches exist.
1977       src_branch = '%s%s' % (self.branch_name, suffix)
1978       dst_branch = '%s%s' % (self.rename_to, suffix)
1979     elif self._run.options.delete_branch:
1980       # Delete flow.  Only dst branch exists.
1981       src_branch = None
1982       dst_branch = '%s%s' % (self.branch_name, suffix)
1983     else:
1984       # Create flow (default).  Only dst branch exists.  Source
1985       # for the branch will just be src_ref.
1986       src_branch = None
1987       dst_branch = '%s%s' % (self.branch_name, suffix)
1988
1989     # Normalize branch refs to remote.  We only process remote branches.
1990     src_branch = git.NormalizeRemoteRef(remote, src_branch)
1991     dst_branch = git.NormalizeRemoteRef(remote, dst_branch)
1992
1993     # Determine whether src/dst branches exist now, by getting their sha1s.
1994     if src_branch:
1995       src_sha1 = self._GetSHA1(src_checkout, src_branch)
1996     elif git.IsSHA1(src_ref):
1997       src_sha1 = src_ref
1998     dst_sha1 = self._GetSHA1(src_checkout, dst_branch)
1999
2000     # Complain if the branch already exists, unless that is expected.
2001     force = self._run.options.force_create or self._run.options.delete_branch
2002     if dst_sha1 and not force:
2003       # We are either creating a branch or renaming a branch, and the
2004       # destination branch unexpectedly exists.  Accept this only if the
2005       # destination branch is already at the revision we want.
2006       if src_sha1 != dst_sha1:
2007         raise BranchError('Checkout %s already contains branch %s.  Run with '
2008                           '--force-create to overwrite.'
2009                           % (checkout_name, dst_branch))
2010
2011       cros_build_lib.Info('Checkout %s already contains branch %s and it '
2012                           'already points to revision %s', checkout_name,
2013                           dst_branch, dst_sha1)
2014
2015     elif self._run.options.delete_branch:
2016       # Delete the dst_branch, if it exists.
2017       if dst_sha1:
2018         self._DeleteBranch(src_checkout, dst_branch)
2019       else:
2020         raise BranchError('Checkout %s does not contain branch %s to delete.'
2021                           % (checkout_name, dst_branch))
2022
2023     elif self.rename_to:
2024       # Copy src_branch to dst_branch, if it exists, then delete src_branch.
2025       if src_sha1:
2026         self._CopyBranch(src_checkout, src_branch, dst_branch)
2027         self._DeleteBranch(src_checkout, src_branch)
2028       else:
2029         raise BranchError('Checkout %s does not contain branch %s to rename.'
2030                           % (checkout_name, src_branch))
2031
2032     else:
2033       # Copy src_ref to dst_branch.
2034       self._CopyBranch(src_checkout, src_ref, dst_branch,
2035                        force=self._run.options.force_create)
2036
2037   def _UpdateManifest(self, manifest_path):
2038     """Rewrite |manifest_path| to point at the right branch.
2039
2040     Args:
2041       manifest_path: The path to the manifest file.
2042     """
2043     src_manifest = git.ManifestCheckout.Cached(self._build_root,
2044                                                manifest_path=manifest_path)
2045     doc = ElementTree.parse(manifest_path)
2046     root = doc.getroot()
2047
2048     # Use the local branch ref.
2049     new_branch_name = self.rename_to if self.rename_to else self.branch_name
2050     new_branch_name = git.NormalizeRef(new_branch_name)
2051
2052     cros_build_lib.Info('Updating manifest for %s', new_branch_name)
2053
2054     for node in root.findall('project'):
2055       path = node.attrib['path']
2056       checkout = src_manifest.FindCheckoutFromPath(path)
2057       if checkout.IsBranchableProject():
2058         # Point at the new branch.
2059         node.attrib.pop('revision', None)
2060         node.attrib.pop('upstream', None)
2061         suffix = self._GetBranchSuffix(src_manifest, checkout)
2062         if suffix:
2063           node.attrib['revision'] = '%s%s' % (new_branch_name, suffix)
2064           cros_build_lib.Info('Pointing project %s at: %s',
2065                               node.attrib['name'], node.attrib['revision'])
2066       else:
2067         # Set this tag to any string to avoid pinning to a SHA1 on branch.
2068         if cros_build_lib.BooleanShellValue(node.get('pin'), True):
2069           git_repo = checkout.GetPath(absolute=True)
2070           repo_head = git.GetGitRepoRevision(git_repo)
2071           node.attrib['revision'] = repo_head
2072           cros_build_lib.Info('Pinning project %s at: %s',
2073                               node.attrib['name'], node.attrib['revision'])
2074         else:
2075           cros_build_lib.Info('Updating project %s', node.attrib['name'])
2076           # We can't branch this repository. Leave it alone.
2077           node.attrib['revision'] = checkout['revision']
2078           cros_build_lib.Info('Project %s UNPINNED using: %s',
2079                               node.attrib['name'], node.attrib['revision'])
2080
2081         # Can not use the default version of get() here since
2082         # 'upstream' can be a valid key with a None value.
2083         upstream = checkout.get('upstream')
2084         if upstream is not None:
2085           node.attrib['upstream'] = upstream
2086
2087     for node in root.findall('default'):
2088       node.attrib['revision'] = new_branch_name
2089
2090     doc.write(manifest_path)
2091     return [node.attrib['name'] for node in root.findall('include')]
2092
2093   def _FixUpManifests(self, repo_manifest):
2094     """Points the checkouts at the new branch in the manifests.
2095
2096     Within the branch, make sure all manifests with projects that are
2097     "branchable" are checked out to "refs/heads/<new_branch>".  Do this
2098     by updating all manifests in the known manifest projects.
2099     """
2100     assert not self._run.options.delete_branch, 'Cannot fix a deleted branch.'
2101
2102     # Use local branch ref.
2103     branch_ref = git.NormalizeRef(self.branch_name)
2104
2105     cros_build_lib.Debug('Fixing manifest projects for new branch.')
2106     for project in constants.MANIFEST_PROJECTS:
2107       manifest_checkout = repo_manifest.FindCheckout(project)
2108       manifest_dir = manifest_checkout['local_path']
2109       push_remote = manifest_checkout['push_remote']
2110
2111       # Checkout revision can be either a sha1 or a branch ref.
2112       src_ref = manifest_checkout['revision']
2113       if not git.IsSHA1(src_ref):
2114         src_ref = git.NormalizeRemoteRef(push_remote, src_ref)
2115
2116       git.CreateBranch(
2117           manifest_dir, manifest_version.PUSH_BRANCH, src_ref)
2118
2119       # We want to process default.xml and official.xml + their imports.
2120       pending_manifests = [constants.DEFAULT_MANIFEST,
2121                            constants.OFFICIAL_MANIFEST]
2122       processed_manifests = []
2123
2124       while pending_manifests:
2125         # Canonicalize the manifest name (resolve dir and symlinks).
2126         manifest_path = os.path.join(manifest_dir, pending_manifests.pop())
2127         manifest_path = os.path.realpath(manifest_path)
2128
2129         # Don't process a manifest more than once.
2130         if manifest_path in processed_manifests:
2131           continue
2132
2133         processed_manifests.append(manifest_path)
2134
2135         if not os.path.exists(manifest_path):
2136           cros_build_lib.Info('Manifest not found: %s', manifest_path)
2137           continue
2138
2139         cros_build_lib.Debug('Fixing manifest at %s.', manifest_path)
2140         included_manifests = self._UpdateManifest(manifest_path)
2141         pending_manifests += included_manifests
2142
2143       git.RunGit(manifest_dir, ['add', '-A'], print_cmd=True)
2144       message = 'Fix up manifest after branching %s.' % branch_ref
2145       git.RunGit(manifest_dir, ['commit', '-m', message], print_cmd=True)
2146       push_to = git.RemoteRef(push_remote, branch_ref)
2147       git.GitPush(manifest_dir, manifest_version.PUSH_BRANCH, push_to,
2148                   dryrun=self.dryrun, force=self.dryrun)
2149
2150   def _IncrementVersionOnDisk(self, incr_type, push_to, message):
2151     """Bumps the version found in chromeos_version.sh on a branch.
2152
2153     Args:
2154       incr_type: See docstring for manifest_version.VersionInfo.
2155       push_to: A git.RemoteRef object.
2156       message: The message to give the git commit that bumps the version.
2157     """
2158     version_info = manifest_version.VersionInfo.from_repo(
2159         self._build_root, incr_type=incr_type)
2160     version_info.IncrementVersion()
2161     version_info.UpdateVersionFile(message, dry_run=self.dryrun,
2162                                    push_to=push_to)
2163
2164   @staticmethod
2165   def DetermineBranchIncrParams(version_info):
2166     """Determines the version component to bump for the new branch."""
2167     # We increment the left-most component that is zero.
2168     if version_info.branch_build_number != '0':
2169       if version_info.patch_number != '0':
2170         raise BranchError('Version %s cannot be branched.' %
2171                           version_info.VersionString())
2172       return 'patch', 'patch number'
2173     else:
2174       return 'branch', 'branch number'
2175
2176   @staticmethod
2177   def DetermineSourceIncrParams(source_name, dest_name):
2178     """Determines the version component to bump for the original branch."""
2179     if dest_name.startswith('refs/heads/release-'):
2180       return 'chrome_branch', 'Chrome version'
2181     elif source_name == 'refs/heads/master':
2182       return 'build', 'build number'
2183     else:
2184       return 'branch', 'branch build number'
2185
2186   def _IncrementVersionOnDiskForNewBranch(self, push_remote):
2187     """Bumps the version found in chromeos_version.sh on the new branch
2188
2189     When a new branch is created, the branch component of the new branch's
2190     version needs to bumped.
2191
2192     For example, say 'stabilize-link' is created from a the 4230.0.0 manifest.
2193     The new branch's version needs to be bumped to 4230.1.0.
2194
2195     Args:
2196       push_remote: a git remote name where the new branch lives.
2197     """
2198     # This needs to happen before the source branch version bumping above
2199     # because we rely on the fact that since our current overlay checkout
2200     # is what we just pushed to the new branch, we don't need to do another
2201     # sync.  This also makes it easier to implement dryrun functionality (the
2202     # new branch doesn't actually get created in dryrun mode).
2203
2204     # Use local branch ref.
2205     branch_ref = git.NormalizeRef(self.branch_name)
2206     push_to = git.RemoteRef(push_remote, branch_ref)
2207     version_info = manifest_version.VersionInfo(
2208         version_string=self._run.options.force_version)
2209     incr_type, incr_target = self.DetermineBranchIncrParams(version_info)
2210     message = self.COMMIT_MESSAGE % {
2211         'target': incr_target,
2212         'branch': branch_ref,
2213     }
2214     self._IncrementVersionOnDisk(incr_type, push_to, message)
2215
2216   def _IncrementVersionOnDiskForSourceBranch(self, overlay_dir, push_remote,
2217                                              source_branch):
2218     """Bumps the version found in chromeos_version.sh on the source branch
2219
2220     The source branch refers to the branch that the manifest used for creating
2221     the new branch came from.  For release branches, we generally branch from a
2222     'master' branch manifest.
2223
2224     To work around crbug.com/213075, for both non-release and release branches,
2225     we need to bump the Chrome OS version on the source branch if the manifest
2226     used for branch creation is the latest generated manifest for the source
2227     branch.
2228
2229     When we are creating a release branch, the Chrome major version of the
2230     'master' (source) branch needs to be bumped.  For example, if we branch
2231     'release-R29-4230.B' from the 4230.0.0 manifest (which is from the 'master'
2232     branch), the 'master' branch's Chrome major version in chromeos_version.sh
2233     (which is 29) needs to be bumped to 30.
2234
2235     Args:
2236       overlay_dir: Absolute path to the chromiumos overlay repo.
2237       push_remote: The remote to push to.
2238       source_branch: The branch that the manifest we are using comes from.
2239     """
2240     push_to = git.RemoteRef(push_remote, source_branch)
2241     self._FetchAndCheckoutTo(overlay_dir, push_to)
2242
2243     # Use local branch ref.
2244     branch_ref = git.NormalizeRef(self.branch_name)
2245     tot_version_info = manifest_version.VersionInfo.from_repo(self._build_root)
2246     if (branch_ref.startswith('refs/heads/release-') or
2247         tot_version_info.VersionString() == self._run.options.force_version):
2248       incr_type, incr_target = self.DetermineSourceIncrParams(
2249           source_branch, branch_ref)
2250       message = self.COMMIT_MESSAGE % {
2251           'target': incr_target,
2252           'branch': branch_ref,
2253       }
2254       try:
2255         self._IncrementVersionOnDisk(incr_type, push_to, message)
2256       except cros_build_lib.RunCommandError:
2257         # There's a chance we are racing against the buildbots for this
2258         # increment.  We shouldn't quit the script because of this.  Instead, we
2259         # print a warning.
2260         self._FetchAndCheckoutTo(overlay_dir, push_to)
2261         new_version =  manifest_version.VersionInfo.from_repo(self._build_root)
2262         if new_version.VersionString() != tot_version_info.VersionString():
2263           logging.warning('Version number for branch %s was bumped by another '
2264                           'bot.', push_to.ref)
2265         else:
2266           raise
2267
2268   def PerformStage(self):
2269     """Run the branch operation."""
2270     # Setup and initialize the repo.
2271     super(BranchUtilStage, self).PerformStage()
2272
2273     repo_manifest = git.ManifestCheckout.Cached(self._build_root)
2274     checkouts = repo_manifest.ListCheckouts()
2275
2276     cros_build_lib.Debug(
2277         'Processing %d checkouts from manifest in parallel.', len(checkouts))
2278     args = [[repo_manifest, x] for x in checkouts]
2279     parallel.RunTasksInProcessPool(self._ProcessCheckout, args, processes=16)
2280
2281     if not self._run.options.delete_branch:
2282       self._FixUpManifests(repo_manifest)
2283
2284     # Increment versions for a new branch.
2285     if not (self._run.options.delete_branch or self.rename_to):
2286       overlay_name = 'chromiumos/overlays/chromiumos-overlay'
2287       overlay_checkout = repo_manifest.FindCheckout(overlay_name)
2288       overlay_dir = overlay_checkout['local_path']
2289       push_remote = overlay_checkout['push_remote']
2290       self._IncrementVersionOnDiskForNewBranch(push_remote)
2291
2292       source_branch = repo_manifest.default['revision']
2293       self._IncrementVersionOnDiskForSourceBranch(overlay_dir, push_remote,
2294                                                   source_branch)
2295
2296
2297 class RefreshPackageStatusStage(bs.BuilderStage):
2298   """Stage for refreshing Portage package status in online spreadsheet."""
2299   def PerformStage(self):
2300     commands.RefreshPackageStatus(buildroot=self._build_root,
2301                                   boards=self._boards,
2302                                   debug=self._run.options.debug)
2303
2304
2305 class InitSDKStage(bs.BuilderStage):
2306   """Stage that is responsible for initializing the SDK."""
2307
2308   option_name = 'build'
2309
2310   def __init__(self, builder_run, chroot_replace=False, **kwargs):
2311     """InitSDK constructor.
2312
2313     Args:
2314       builder_run: Builder run instance for this run.
2315       chroot_replace: If True, force the chroot to be replaced.
2316     """
2317     super(InitSDKStage, self).__init__(builder_run, **kwargs)
2318     self._env = {}
2319     self.force_chroot_replace = chroot_replace
2320     if self._run.options.clobber:
2321       self._env['IGNORE_PREFLIGHT_BINHOST'] = '1'
2322
2323     self._latest_toolchain = (self._run.config.latest_toolchain or
2324                               self._run.options.latest_toolchain)
2325     if self._latest_toolchain and self._run.config.gcc_githash:
2326       self._env['USE'] = 'git_gcc'
2327       self._env['GCC_GITHASH'] = self._run.config.gcc_githash
2328
2329   def PerformStage(self):
2330     chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
2331     replace = self._run.config.chroot_replace or self.force_chroot_replace
2332     pre_ver = post_ver = None
2333     if os.path.isdir(self._build_root) and not replace:
2334       try:
2335         pre_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
2336         commands.RunChrootUpgradeHooks(self._build_root)
2337       except results_lib.BuildScriptFailure:
2338         cros_build_lib.PrintBuildbotStepText('Replacing broken chroot')
2339         cros_build_lib.PrintBuildbotStepWarnings()
2340         replace = True
2341
2342     if not os.path.isdir(chroot_path) or replace:
2343       use_sdk = (self._run.config.use_sdk and not self._run.options.nosdk)
2344       pre_ver = None
2345       commands.MakeChroot(
2346           buildroot=self._build_root,
2347           replace=replace,
2348           use_sdk=use_sdk,
2349           chrome_root=self._run.options.chrome_root,
2350           extra_env=self._env)
2351
2352     post_ver = cros_build_lib.GetChrootVersion(chroot=chroot_path)
2353     if pre_ver is not None and pre_ver != post_ver:
2354       cros_build_lib.PrintBuildbotStepText('%s->%s' % (pre_ver, post_ver))
2355     else:
2356       cros_build_lib.PrintBuildbotStepText(post_ver)
2357
2358     commands.SetSharedUserPassword(
2359         self._build_root,
2360         password=self._run.config.shared_user_password)
2361
2362
2363 class SetupBoardStage(BoardSpecificBuilderStage, InitSDKStage):
2364   """Stage that is responsible for building host pkgs and setting up a board."""
2365
2366   option_name = 'build'
2367
2368   def PerformStage(self):
2369     # Calculate whether we should use binary packages.
2370     usepkg = (self._run.config.usepkg_setup_board and
2371               not self._latest_toolchain)
2372
2373     # We need to run chroot updates on most builders because they uprev after
2374     # the InitSDK stage. For the SDK builder, we can skip updates because uprev
2375     # is run prior to InitSDK. This is not just an optimization: It helps
2376     # workaround http://crbug.com/225509
2377     chroot_upgrade = (
2378       self._run.config.build_type != constants.CHROOT_BUILDER_TYPE)
2379
2380     # Iterate through boards to setup.
2381     chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
2382
2383     # Only update the board if we need to do so.
2384     board_path = os.path.join(chroot_path, 'build', self._current_board)
2385     if not os.path.isdir(board_path) or chroot_upgrade:
2386       commands.SetupBoard(
2387           self._build_root, board=self._current_board, usepkg=usepkg,
2388           chrome_binhost_only=self._run.config.chrome_binhost_only,
2389           force=self._run.config.board_replace,
2390           extra_env=self._env, chroot_upgrade=chroot_upgrade,
2391           profile=self._run.options.profile or self._run.config.profile)
2392
2393
2394 class UprevStage(bs.BuilderStage):
2395   """Stage that uprevs Chromium OS packages that the builder intends to
2396   validate.
2397   """
2398
2399   option_name = 'uprev'
2400
2401   def __init__(self, builder_run, boards=None, enter_chroot=True, **kwargs):
2402     super(UprevStage, self).__init__(builder_run, **kwargs)
2403     self._enter_chroot = enter_chroot
2404     if boards is not None:
2405       self._boards = boards
2406
2407   def PerformStage(self):
2408     # Perform other uprevs.
2409     if self._run.config.uprev:
2410       overlays, _ = self._ExtractOverlays()
2411       commands.UprevPackages(self._build_root,
2412                              self._boards,
2413                              overlays,
2414                              enter_chroot=self._enter_chroot)
2415
2416
2417 class SyncChromeStage(bs.BuilderStage):
2418   """Stage that syncs Chrome sources if needed."""
2419
2420   option_name = 'managed_chrome'
2421
2422   def __init__(self, builder_run, **kwargs):
2423     super(SyncChromeStage, self).__init__(builder_run, **kwargs)
2424     # PerformStage() will fill this out for us.
2425     # TODO(mtennant): Replace with a run param.
2426     self.chrome_version = None
2427
2428   def HandleSkip(self):
2429     """Set run.attrs.chrome_version to chrome version in buildroot now."""
2430     self._run.attrs.chrome_version = self._run.DetermineChromeVersion()
2431     cros_build_lib.Debug('Existing chrome version is %s.',
2432                          self._run.attrs.chrome_version)
2433     super(SyncChromeStage, self).HandleSkip()
2434
2435   def PerformStage(self):
2436     # Perform chrome uprev.
2437     chrome_atom_to_build = None
2438     if self._chrome_rev:
2439       chrome_atom_to_build = commands.MarkChromeAsStable(
2440           self._build_root, self._run.manifest_branch,
2441           self._chrome_rev, self._boards,
2442           chrome_version=self._run.options.chrome_version)
2443
2444     kwargs = {}
2445     if self._chrome_rev == constants.CHROME_REV_SPEC:
2446       kwargs['revision'] = self._run.options.chrome_version
2447       cros_build_lib.PrintBuildbotStepText('revision %s' % kwargs['revision'])
2448       self.chrome_version = self._run.options.chrome_version
2449     else:
2450       kwargs['tag'] = self._run.DetermineChromeVersion()
2451       cros_build_lib.PrintBuildbotStepText('tag %s' % kwargs['tag'])
2452       self.chrome_version = kwargs['tag']
2453
2454     useflags = self._run.config.useflags
2455     commands.SyncChrome(self._build_root, self._run.options.chrome_root,
2456                         useflags, **kwargs)
2457     if (self._chrome_rev and not chrome_atom_to_build and
2458         self._run.options.buildbot and
2459         self._run.config.build_type == constants.CHROME_PFQ_TYPE):
2460       cros_build_lib.Info('Chrome already uprevved')
2461       sys.exit(0)
2462
2463   def _Finish(self):
2464     """Provide chrome_version to the rest of the run."""
2465     # Even if the stage failed, a None value for chrome_version still
2466     # means something.  In other words, this stage tried to run.
2467     self._run.attrs.chrome_version = self.chrome_version
2468
2469     super(SyncChromeStage, self)._Finish()
2470
2471
2472 class PatchChromeStage(bs.BuilderStage):
2473   """Stage that applies Chrome patches if needed."""
2474
2475   option_name = 'rietveld_patches'
2476
2477   URL_BASE = 'https://codereview.chromium.org/%(id)s'
2478
2479   def PerformStage(self):
2480     for spatch in ' '.join(self._run.options.rietveld_patches).split():
2481       patch, colon, subdir = spatch.partition(':')
2482       if not colon:
2483         subdir = 'src'
2484       url = self.URL_BASE % {'id': patch}
2485       cros_build_lib.PrintBuildbotLink(spatch, url)
2486       commands.PatchChrome(self._run.options.chrome_root, patch, subdir)
2487
2488
2489 class BuildPackagesStage(BoardSpecificBuilderStage, ArchivingStageMixin):
2490   """Build Chromium OS packages."""
2491
2492   option_name = 'build'
2493   def __init__(self, builder_run, board, pgo_generate=False, pgo_use=False,
2494                **kwargs):
2495     super(BuildPackagesStage, self).__init__(builder_run, board, **kwargs)
2496     self._pgo_generate, self._pgo_use = pgo_generate, pgo_use
2497     assert not pgo_generate or not pgo_use
2498
2499     useflags = self._run.config.useflags[:]
2500     if pgo_generate:
2501       self.name += ' [%s]' % constants.USE_PGO_GENERATE
2502       useflags.append(constants.USE_PGO_GENERATE)
2503     elif pgo_use:
2504       self.name += ' [%s]' % constants.USE_PGO_USE
2505       useflags.append(constants.USE_PGO_USE)
2506
2507     self._env = {}
2508     if useflags:
2509       self._env['USE'] = ' '.join(useflags)
2510
2511     if self._run.options.chrome_root:
2512       self._env['CHROME_ORIGIN'] = 'LOCAL_SOURCE'
2513
2514     if self._run.options.clobber:
2515       self._env['IGNORE_PREFLIGHT_BINHOST'] = '1'
2516
2517   def _GetArchitectures(self):
2518     """Get the list of architectures built by this builder."""
2519     return set(self._GetPortageEnvVar('ARCH', b) for b in self._boards)
2520
2521   def PerformStage(self):
2522     # Wait for PGO data to be ready if needed.
2523     if self._pgo_use:
2524       cpv = portage_utilities.BestVisible(constants.CHROME_CP,
2525                                           buildroot=self._build_root)
2526       commands.WaitForPGOData(self._GetArchitectures(), cpv)
2527
2528     # If we have rietveld patches, always compile Chrome from source.
2529     noworkon = not self._run.options.rietveld_patches
2530
2531     commands.Build(self._build_root,
2532                    self._current_board,
2533                    build_autotest=self._run.ShouldBuildAutotest(),
2534                    usepkg=self._run.config.usepkg_build_packages,
2535                    chrome_binhost_only=self._run.config.chrome_binhost_only,
2536                    packages=self._run.config.packages,
2537                    skip_chroot_upgrade=True,
2538                    chrome_root=self._run.options.chrome_root,
2539                    noworkon=noworkon,
2540                    extra_env=self._env)
2541
2542
2543 class ChromeSDKStage(BoardSpecificBuilderStage, ArchivingStageMixin):
2544   """Run through the simple chrome workflow."""
2545
2546   def __init__(self, *args, **kwargs):
2547     super(ChromeSDKStage, self).__init__(*args, **kwargs)
2548     self._upload_queue = multiprocessing.Queue()
2549     self._pkg_dir = os.path.join(
2550         self._build_root, constants.DEFAULT_CHROOT_DIR,
2551         'build', self._current_board, 'var', 'db', 'pkg')
2552     self.chrome_src = os.path.join(self._run.options.chrome_root, 'src')
2553     self.out_board_dir = os.path.join(
2554         self.chrome_src, 'out_%s' % self._current_board)
2555
2556   def _BuildAndArchiveChromeSysroot(self):
2557     """Generate and upload sysroot for building Chrome."""
2558     assert self.archive_path.startswith(self._build_root)
2559     extra_env = {}
2560     if self._run.config.useflags:
2561       extra_env['USE'] = ' '.join(self._run.config.useflags)
2562     in_chroot_path = git.ReinterpretPathForChroot(self.archive_path)
2563     cmd = ['cros_generate_sysroot', '--out-dir', in_chroot_path, '--board',
2564            self._current_board, '--package', constants.CHROME_CP]
2565     cros_build_lib.RunCommand(cmd, cwd=self._build_root, enter_chroot=True,
2566                               extra_env=extra_env)
2567     self._upload_queue.put([constants.CHROME_SYSROOT_TAR])
2568
2569   def _ArchiveChromeEbuildEnv(self):
2570     """Generate and upload Chrome ebuild environment."""
2571     chrome_dir = ArchiveStage.SingleMatchGlob(
2572         os.path.join(self._pkg_dir, constants.CHROME_CP) + '-*')
2573     env_bzip = os.path.join(chrome_dir, 'environment.bz2')
2574     with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
2575       # Convert from bzip2 to tar format.
2576       bzip2 = cros_build_lib.FindCompressor(cros_build_lib.COMP_BZIP2)
2577       cros_build_lib.RunCommand(
2578           [bzip2, '-d', env_bzip, '-c'],
2579           log_stdout_to_file=os.path.join(tempdir, constants.CHROME_ENV_FILE))
2580       env_tar = os.path.join(self.archive_path, constants.CHROME_ENV_TAR)
2581       cros_build_lib.CreateTarball(env_tar, tempdir)
2582       self._upload_queue.put([os.path.basename(env_tar)])
2583
2584   def _VerifyChromeDeployed(self, tempdir):
2585     """Check to make sure deploy_chrome ran correctly."""
2586     if not os.path.exists(os.path.join(tempdir, 'chrome')):
2587       raise AssertionError('deploy_chrome did not run successfully!')
2588
2589   def _VerifySDKEnvironment(self):
2590     """Make sure the SDK environment is set up properly."""
2591     # If the environment wasn't set up, then the output directory wouldn't be
2592     # created after 'gclient runhooks'.
2593     # TODO: Make this check actually look at the environment.
2594     if not os.path.exists(self.out_board_dir):
2595       raise AssertionError('%s not created!' % self.out_board_dir)
2596
2597   def _BuildChrome(self, sdk_cmd):
2598     """Use the generated SDK to build Chrome."""
2599     # Validate fetching of the SDK and setting everything up.
2600     sdk_cmd.Run(['true'])
2601     # Actually build chromium.
2602     sdk_cmd.Run(['gclient', 'runhooks'])
2603     self._VerifySDKEnvironment()
2604     sdk_cmd.Ninja()
2605
2606   def _TestDeploy(self, sdk_cmd):
2607     """Test SDK deployment."""
2608     with osutils.TempDir(prefix='chrome-sdk-stage') as tempdir:
2609       # Use the TOT deploy_chrome.
2610       script_path = os.path.join(
2611           self._build_root, constants.CHROMITE_BIN_SUBDIR, 'deploy_chrome')
2612       sdk_cmd.Run([script_path, '--build-dir',
2613                    os.path.join(self.out_board_dir, 'Release'),
2614                    '--staging-only', '--staging-dir', tempdir])
2615       self._VerifyChromeDeployed(tempdir)
2616
2617   def PerformStage(self):
2618     if platform.dist()[-1] == 'lucid':
2619       # Chrome no longer builds on Lucid. See crbug.com/276311
2620       print 'Ubuntu lucid is no longer supported.'
2621       print 'Please upgrade to Ubuntu Precise.'
2622       cros_build_lib.PrintBuildbotStepWarnings()
2623       return
2624
2625     upload_metadata = functools.partial(
2626         self.UploadMetadata, upload_queue=self._upload_queue)
2627     steps = [self._BuildAndArchiveChromeSysroot, self._ArchiveChromeEbuildEnv,
2628              upload_metadata]
2629     with self.ArtifactUploader(self._upload_queue, archive=False):
2630       parallel.RunParallelSteps(steps)
2631
2632       if self._run.config.chrome_sdk_build_chrome:
2633         with osutils.TempDir(prefix='chrome-sdk-cache') as tempdir:
2634           cache_dir = os.path.join(tempdir, 'cache')
2635           extra_args = ['--cwd', self.chrome_src, '--sdk-path',
2636                         self.archive_path]
2637           sdk_cmd = commands.ChromeSDK(
2638               self._build_root, self._current_board, chrome_src=self.chrome_src,
2639               goma=self._run.config.chrome_sdk_goma,
2640               extra_args=extra_args, cache_dir=cache_dir)
2641           self._BuildChrome(sdk_cmd)
2642           self._TestDeploy(sdk_cmd)
2643
2644
2645 class BuildImageStage(BuildPackagesStage):
2646   """Build standard Chromium OS images."""
2647
2648   option_name = 'build'
2649
2650   def _BuildImages(self):
2651     # We only build base, dev, and test images from this stage.
2652     if self._pgo_generate:
2653       images_can_build = set(['test'])
2654     else:
2655       images_can_build = set(['base', 'dev', 'test'])
2656     images_to_build = set(self._run.config.images).intersection(
2657         images_can_build)
2658
2659     version = self._run.attrs.release_tag
2660     disk_layout = self._run.config.disk_layout
2661     if self._pgo_generate:
2662       disk_layout = constants.PGO_GENERATE_DISK_LAYOUT
2663       if version:
2664         version = '%s-pgo-generate' % version
2665
2666     rootfs_verification = self._run.config.rootfs_verification
2667     commands.BuildImage(self._build_root,
2668                         self._current_board,
2669                         sorted(images_to_build),
2670                         rootfs_verification=rootfs_verification,
2671                         version=version,
2672                         disk_layout=disk_layout,
2673                         extra_env=self._env)
2674
2675     # Update link to latest image.
2676     latest_image = os.readlink(self.GetImageDirSymlink('latest'))
2677     cbuildbot_image_link = self.GetImageDirSymlink()
2678     if os.path.lexists(cbuildbot_image_link):
2679       os.remove(cbuildbot_image_link)
2680
2681     os.symlink(latest_image, cbuildbot_image_link)
2682
2683     self.board_runattrs.SetParallel('images_generated', True)
2684
2685     parallel.RunParallelSteps(
2686         [self._BuildVMImage, lambda: self._GenerateAuZip(cbuildbot_image_link)])
2687
2688   def _BuildVMImage(self):
2689     if self._run.config.vm_tests and not self._pgo_generate:
2690       commands.BuildVMImageForTesting(
2691           self._build_root,
2692           self._current_board,
2693           disk_layout=self._run.config.disk_vm_layout,
2694           extra_env=self._env)
2695
2696   def _GenerateAuZip(self, image_dir):
2697     """Create au-generator.zip."""
2698     if not self._pgo_generate:
2699       commands.GenerateAuZip(self._build_root,
2700                              image_dir,
2701                              extra_env=self._env)
2702
2703   def _HandleStageException(self, exc_info):
2704     """Tell other stages to not wait on us if we die for some reason."""
2705     self.board_runattrs.SetParallelDefault('images_generated', False)
2706     return super(BuildImageStage, self)._HandleStageException(exc_info)
2707
2708   def PerformStage(self):
2709     if self._run.config.images:
2710       self._BuildImages()
2711
2712
2713 class SignerTestStage(ArchivingStage):
2714   """Run signer related tests."""
2715
2716   option_name = 'tests'
2717   config_name = 'signer_tests'
2718
2719   # If the signer tests take longer than 30 minutes, abort. They usually take
2720   # five minutes to run.
2721   SIGNER_TEST_TIMEOUT = 1800
2722
2723   def PerformStage(self):
2724     if not self.archive_stage.WaitForRecoveryImage():
2725       raise InvalidTestConditionException('Missing recovery image.')
2726     with timeout_util.Timeout(self.SIGNER_TEST_TIMEOUT):
2727       commands.RunSignerTests(self._build_root, self._current_board)
2728
2729
2730 class UnitTestStage(BoardSpecificBuilderStage):
2731   """Run unit tests."""
2732
2733   option_name = 'tests'
2734   config_name = 'unittests'
2735
2736   # If the unit tests take longer than 70 minutes, abort. They usually take
2737   # ten minutes to run.
2738   #
2739   # If the processes hang, parallel_emerge will print a status report after 60
2740   # minutes, so we picked 70 minutes because it gives us a little buffer time.
2741   UNIT_TEST_TIMEOUT = 70 * 60
2742
2743   def PerformStage(self):
2744     extra_env = {}
2745     if self._run.config.useflags:
2746       extra_env['USE'] = ' '.join(self._run.config.useflags)
2747     with timeout_util.Timeout(self.UNIT_TEST_TIMEOUT):
2748       commands.RunUnitTests(self._build_root,
2749                             self._current_board,
2750                             full=(not self._run.config.quick_unit),
2751                             blacklist=self._run.config.unittest_blacklist,
2752                             extra_env=extra_env)
2753
2754     if os.path.exists(os.path.join(self.GetImageDirSymlink(),
2755                                    'au-generator.zip')):
2756       commands.TestAuZip(self._build_root,
2757                          self.GetImageDirSymlink())
2758
2759
2760 class VMTestStage(BoardSpecificBuilderStage, ArchivingStageMixin):
2761   """Run autotests in a virtual machine."""
2762
2763   option_name = 'tests'
2764   config_name = 'vm_tests'
2765
2766   def _PrintFailedTests(self, results_path, test_basename):
2767     """Print links to failed tests.
2768
2769     Args:
2770       results_path: Path to directory containing the test results.
2771       test_basename: The basename that the tests are archived to.
2772     """
2773     test_list = commands.ListFailedTests(results_path)
2774     for test_name, path in test_list:
2775       self.PrintDownloadLink(
2776           os.path.join(test_basename, path), text_to_display=test_name)
2777
2778   def _ArchiveTestResults(self, test_results_dir, test_basename):
2779     """Archives test results to Google Storage.
2780
2781     Args:
2782       test_results_dir: Name of the directory containing the test results.
2783       test_basename: The basename to archive the tests.
2784     """
2785     results_path = commands.GetTestResultsDir(
2786         self._build_root, test_results_dir)
2787
2788     # Skip archiving if results_path does not exist or is an empty directory.
2789     if not os.path.isdir(results_path) or not os.listdir(results_path):
2790       return
2791
2792     archived_results_dir = os.path.join(self.archive_path, test_basename)
2793     # Copy relevant files to archvied_results_dir.
2794     commands.ArchiveTestResults(results_path, archived_results_dir)
2795     upload_paths = [os.path.basename(archived_results_dir)]
2796     # Create the compressed tarball to upload.
2797     # TODO: We should revisit whether uploading the tarball is necessary.
2798     test_tarball = commands.BuildAndArchiveTestResultsTarball(
2799         archived_results_dir, self._build_root)
2800     upload_paths.append(test_tarball)
2801
2802     got_symbols = self.GetParallel('breakpad_symbols_generated',
2803                                    pretty_name='breakpad symbols')
2804     upload_paths += commands.GenerateStackTraces(
2805         self._build_root, self._current_board, test_results_dir,
2806         self.archive_path, got_symbols)
2807
2808     self._Upload(upload_paths)
2809     self._PrintFailedTests(results_path, test_basename)
2810
2811     # Remove the test results directory.
2812     osutils.RmDir(results_path, ignore_missing=True, sudo=True)
2813
2814   def _ArchiveVMFiles(self, test_results_dir):
2815     vm_files = commands.ArchiveVMFiles(
2816         self._build_root, os.path.join(test_results_dir, 'test_harness'),
2817         self.archive_path)
2818     # We use paths relative to |self.archive_path|, for prettier
2819     # formatting on the web page.
2820     self._Upload([os.path.basename(image) for image in vm_files])
2821
2822   def _Upload(self, filenames):
2823     cros_build_lib.Info('Uploading artifacts to Google Storage...')
2824     with self.ArtifactUploader(archive=False, strict=False) as queue:
2825       for filename in filenames:
2826         queue.put([filename])
2827         if filename.endswith('.dmp.txt'):
2828           prefix = 'crash: '
2829         elif constants.VM_DISK_PREFIX in os.path.basename(filename):
2830           prefix = 'vm_disk: '
2831         elif constants.VM_MEM_PREFIX in os.path.basename(filename):
2832           prefix = 'vm_memory: '
2833         else:
2834           prefix = ''
2835         self.PrintDownloadLink(filename, prefix)
2836
2837   def _RunTest(self, test_type, test_results_dir):
2838     """Run a VM test.
2839
2840     Args:
2841       test_type: Any test in constants.VALID_VM_TEST_TYPES
2842       test_results_dir: The base directory to store the results.
2843     """
2844     if test_type == constants.CROS_VM_TEST_TYPE:
2845       commands.RunCrosVMTest(self._current_board, self.GetImageDirSymlink())
2846     elif test_type == constants.DEV_MODE_TEST_TYPE:
2847       commands.RunDevModeTest(
2848         self._build_root, self._current_board, self.GetImageDirSymlink())
2849     else:
2850       commands.RunTestSuite(self._build_root,
2851                             self._current_board,
2852                             self.GetImageDirSymlink(),
2853                             os.path.join(test_results_dir,
2854                                          'test_harness'),
2855                             test_type=test_type,
2856                             whitelist_chrome_crashes=self._chrome_rev is None,
2857                             archive_dir=self.bot_archive_root)
2858
2859   def PerformStage(self):
2860     # These directories are used later to archive test artifacts.
2861     test_results_dir = commands.CreateTestRoot(self._build_root)
2862     test_basename = constants.VM_TEST_RESULTS % dict(attempt=self._attempt)
2863     try:
2864       for test_type in self._run.config.vm_tests:
2865         cros_build_lib.Info('Running VM test %s.', test_type)
2866         self._RunTest(test_type, test_results_dir)
2867
2868     except Exception:
2869       cros_build_lib.Error(_VM_TEST_ERROR_MSG %
2870                            dict(vm_test_results=test_basename))
2871       self._ArchiveVMFiles(test_results_dir)
2872       raise
2873     finally:
2874       self._ArchiveTestResults(test_results_dir, test_basename)
2875
2876
2877 class UploadTestArtifactsStage(BoardSpecificBuilderStage, ArchivingStageMixin):
2878   """Upload needed hardware test artifacts."""
2879
2880   def BuildAutotestTarballs(self):
2881     """Build the autotest tarballs."""
2882     with osutils.TempDir(prefix='cbuildbot-autotest') as tempdir:
2883       with self.ArtifactUploader(strict=True) as queue:
2884         cwd = os.path.abspath(
2885             os.path.join(self._build_root, 'chroot', 'build',
2886                          self._current_board, constants.AUTOTEST_BUILD_PATH,
2887                          '..'))
2888
2889         # Find the control files in autotest/
2890         control_files = commands.FindFilesWithPattern(
2891             'control*', target='autotest', cwd=cwd)
2892
2893         # Tar the control files and the packages.
2894         autotest_tarball = os.path.join(tempdir, 'autotest.tar')
2895         input_list = control_files + ['autotest/packages']
2896         commands.BuildTarball(self._build_root, input_list, autotest_tarball,
2897                               cwd=cwd, compressed=False)
2898         queue.put([autotest_tarball])
2899
2900         # Tar up the test suites.
2901         test_suites_tarball = os.path.join(tempdir, 'test_suites.tar.bz2')
2902         commands.BuildTarball(self._build_root, ['autotest/test_suites'],
2903                               test_suites_tarball, cwd=cwd)
2904         queue.put([test_suites_tarball])
2905
2906   def BuildUpdatePayloads(self):
2907     """Archives update payloads when they are ready."""
2908     got_images = self.GetParallel('images_generated', pretty_name='images')
2909     if not got_images:
2910       return
2911
2912     with osutils.TempDir(prefix='cbuildbot-payloads') as tempdir:
2913       with self.ArtifactUploader() as queue:
2914         if 'test' in self._run.config.images:
2915           image_name = 'chromiumos_test_image.bin'
2916         elif 'dev' in self._run.config.images:
2917           image_name = 'chromiumos_image.bin'
2918         else:
2919           image_name = 'chromiumos_base_image.bin'
2920
2921         logging.info('Generating payloads to upload for %s', image_name)
2922         image_path = os.path.join(self.GetImageDirSymlink(), image_name)
2923         # For non release builds, we are only interested in generating
2924         # payloads for the purpose of imaging machines. This means we
2925         # shouldn't generate delta payloads for n-1->n testing.
2926         # TODO: Add a config flag for generating delta payloads instead.
2927         if self._run.config.build_type == constants.CANARY_TYPE:
2928           commands.GenerateFullAndDeltaPayloads(
2929               self._build_root, self.bot_archive_root, image_path, tempdir)
2930         else:
2931           commands.GenerateFullPayload(self._build_root, image_path, tempdir)
2932
2933         for payload in os.listdir(tempdir):
2934           queue.put([os.path.join(tempdir, payload)])
2935
2936   def PerformStage(self):
2937     """Upload any needed HWTest artifacts."""
2938     steps = []
2939     if (self._run.ShouldBuildAutotest() and
2940         self._run.config.upload_hw_test_artifacts):
2941       steps.append(self.BuildAutotestTarballs)
2942
2943     if self._run.config.upload_hw_test_artifacts:
2944       steps.append(self.BuildUpdatePayloads)
2945
2946     parallel.RunParallelSteps(steps)
2947
2948
2949 class TestTimeoutException(Exception):
2950   """Raised when a critical test times out."""
2951   pass
2952
2953
2954 class InvalidTestConditionException(Exception):
2955   """Raised when pre-conditions for a test aren't met."""
2956   pass
2957
2958
2959 class HWTestStage(BoardSpecificBuilderStage, ArchivingStageMixin):
2960   """Stage that runs tests in the Autotest lab."""
2961
2962   option_name = 'tests'
2963   config_name = 'hw_tests'
2964
2965   PERF_RESULTS_EXTENSION = 'results'
2966
2967   def __init__(self, builder_run, board, suite_config, **kwargs):
2968     super(HWTestStage, self).__init__(builder_run, board,
2969                                       suffix=' [%s]' % suite_config.suite,
2970                                       **kwargs)
2971     self.suite_config = suite_config
2972     self.wait_for_results = True
2973
2974   def _PrintFile(self, filename):
2975     with open(filename) as f:
2976       print f.read()
2977
2978
2979   def _CheckAborted(self):
2980     """Checks with GS to see if HWTest for this build's release_tag was aborted.
2981
2982     We currently only support aborting HWTests for the CQ, so this method only
2983     returns True for paladin builders.
2984
2985     Returns:
2986       True if HWTest have been aborted for this build's release_tag.
2987       False otherwise.
2988     """
2989     aborted = (cbuildbot_config.IsCQType(self._run.config.build_type) and
2990                commands.HaveCQHWTestsBeenAborted(self._run.GetVersion()))
2991     return aborted
2992
2993   # Disable complaint about calling _HandleStageException.
2994   # pylint: disable=W0212
2995   def _HandleStageException(self, exc_info):
2996     """Override and don't set status to FAIL but FORGIVEN instead."""
2997     exc_type, exc_value = exc_info[:2]
2998
2999     # Deal with timeout errors specially.
3000     if issubclass(exc_type, timeout_util.TimeoutError):
3001       return self._HandleStageTimeoutException(exc_info)
3002
3003     # 2 for warnings returned by run_suite.py, or CLIENT_HTTP_CODE error
3004     # returned by autotest_rpc_client.py. It is the former that we care about.
3005     # 11, 12, 13 for cases when rpc is down, see autotest_rpc_errors.py.
3006     codes_handled_as_warning = (2, 11, 12, 13)
3007
3008     if self.suite_config.critical:
3009       return super(HWTestStage, self)._HandleStageException(exc_info)
3010     is_lab_down = (issubclass(exc_type, lab_status.LabIsDownException) or
3011                    issubclass(exc_type, lab_status.BoardIsDisabledException))
3012     is_warning_code = (issubclass(exc_type, cros_build_lib.RunCommandError) and
3013                        exc_value.result.returncode in codes_handled_as_warning)
3014     if is_lab_down:
3015       cros_build_lib.Warning('HWTest was skipped because the lab was down.')
3016       return self._HandleExceptionAsWarning(exc_info)
3017     elif is_warning_code:
3018       cros_build_lib.Warning('HWTest failed with warning code.')
3019       return self._HandleExceptionAsWarning(exc_info)
3020     elif self._CheckAborted():
3021       cros_build_lib.Warning(CQ_HWTEST_WAS_ABORTED)
3022       return self._HandleExceptionAsWarning(exc_info)
3023     else:
3024       return super(HWTestStage, self)._HandleStageException(exc_info)
3025
3026   def _HandleStageTimeoutException(self, exc_info):
3027     if not self.suite_config.critical and not self.suite_config.fatal_timeouts:
3028       return self._HandleExceptionAsWarning(exc_info)
3029
3030     return super(HWTestStage, self)._HandleStageException(exc_info)
3031
3032   def PerformStage(self):
3033     if self._CheckAborted():
3034       cros_build_lib.PrintBuildbotStepText('aborted')
3035       cros_build_lib.Warning(CQ_HWTEST_WAS_ABORTED)
3036       return
3037
3038     build = '/'.join([self._bot_id, self.version])
3039     if self._run.options.remote_trybot and self._run.options.hwtest:
3040       debug = self._run.options.debug_forced
3041     else:
3042       debug = self._run.options.debug
3043     lab_status.CheckLabStatus(self._current_board)
3044     with timeout_util.Timeout(
3045         self.suite_config.timeout  + constants.HWTEST_TIMEOUT_EXTENSION):
3046       commands.RunHWTestSuite(build,
3047                               self.suite_config.suite,
3048                               self._current_board,
3049                               self.suite_config.pool,
3050                               self.suite_config.num,
3051                               self.suite_config.file_bugs,
3052                               self.wait_for_results,
3053                               self.suite_config.priority,
3054                               self.suite_config.timeout_mins,
3055                               debug)
3056
3057
3058 class SignerResultsException(Exception):
3059   """An expected failure from the SignerResultsStage."""
3060
3061
3062 class SignerResultsTimeout(SignerResultsException):
3063   """The signer did not produce any results inside the expected time."""
3064
3065
3066 class SignerFailure(SignerResultsException):
3067   """The signer returned an error result."""
3068
3069
3070 class MissingInstructionException(SignerResultsException):
3071   """We didn't receive the list of signing instructions PushImage uploaded."""
3072
3073
3074 class MalformedResultsException(SignerResultsException):
3075   """The Signer results aren't formatted as we expect."""
3076
3077
3078 class SignerResultsStage(ArchivingStage):
3079   """Stage that blocks on and retrieves signer results."""
3080
3081   option_name = 'signer_results'
3082   config_name = 'signer_results'
3083
3084   # Poll for new results every 30 seconds.
3085   SIGNING_PERIOD = 30
3086
3087   # Abort, if the signing takes longer than 2 hours.
3088   #   2 hours * 60 minutes * 60 seconds
3089   SIGNING_TIMEOUT = 7200
3090
3091   FINISHED = 'finished'
3092
3093   def __init__(self, *args, **kwargs):
3094     super(SignerResultsStage, self).__init__(*args, **kwargs)
3095     self.signing_results = {}
3096
3097   def _HandleStageException(self, exc_info):
3098     """Convert SignerResultsException exceptions to WARNING (for now)."""
3099     exc_type = exc_info[0]
3100
3101     # If we raise an exception, make sure nobody keeps waiting on our results.
3102     self.archive_stage.AnnounceChannelSigned(None)
3103
3104     # This stage is experimental, don't trust it yet.
3105     if issubclass(exc_type, SignerResultsException):
3106       return self._HandleExceptionAsWarning(exc_info)
3107
3108     return super(SignerResultsStage, self)._HandleStageException(exc_info)
3109
3110   def _JsonFromUrl(self, gs_ctx, url):
3111     """Fetch a GS Url, and parse it as Json.
3112
3113     Args:
3114       gs_ctx: GS Context.
3115       url: Url to fetch and parse.
3116
3117     Returns:
3118       None if the Url doesn't exist.
3119       Parsed Json structure if it did.
3120
3121     Raises:
3122       MalformedResultsException if it failed to parse.
3123     """
3124     try:
3125       signer_txt = gs_ctx.Cat(url).output
3126     except gs.GSNoSuchKey:
3127       return None
3128
3129     try:
3130       return json.loads(signer_txt)
3131     except ValueError:
3132       # We should never see malformed Json, even for intermediate statuses.
3133       raise MalformedResultsException(signer_txt)
3134
3135   def _SigningStatusFromJson(self, signer_json):
3136     """Extract a signing status from a signer result Json DOM.
3137
3138     Args:
3139       signer_json: The parsed json status from a signer operation.
3140
3141     Returns:
3142       string with a simple status: 'passed', 'failed', 'downloading', etc,
3143       or '' if the json doesn't contain a status.
3144     """
3145     return (signer_json or {}).get('status', {}).get('status', '')
3146
3147   def _CheckForResults(self, gs_ctx, instruction_urls_per_channel):
3148     """timeout_util.WaitForSuccess func to check a list of signer results.
3149
3150     Args:
3151       gs_ctx: Google Storage Context.
3152       instruction_urls_per_channel: Urls of the signer result files
3153                                     we're expecting.
3154
3155     Returns:
3156       Number of results not yet collected.
3157     """
3158     COMPLETED_STATUS = ('passed', 'failed')
3159
3160     # Assume we are done, then try to prove otherwise.
3161     results_completed = True
3162
3163     for channel in instruction_urls_per_channel:
3164       self.signing_results.setdefault(channel, {})
3165
3166       if (len(self.signing_results[channel]) ==
3167           len(instruction_urls_per_channel[channel])):
3168         continue
3169
3170       for url in instruction_urls_per_channel[channel]:
3171         # Convert from instructions URL to instructions result URL.
3172         url += '.json'
3173
3174         # We already have a result for this URL.
3175         if url in self.signing_results[channel]:
3176           continue
3177
3178         signer_json = self._JsonFromUrl(gs_ctx, url)
3179         if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS:
3180           # If we find a completed result, remember it.
3181           self.signing_results[channel][url] = signer_json
3182
3183       if (len(self.signing_results[channel]) ==
3184           len(instruction_urls_per_channel[channel])):
3185         # If we just completed the channel, inform paygen.
3186         self.archive_stage.AnnounceChannelSigned(channel)
3187       else:
3188         results_completed = False
3189
3190     return results_completed
3191
3192   def PerformStage(self):
3193     """Do the work of waiting for signer results and logging them.
3194
3195     Raises:
3196       ValueError: If the signer result isn't valid json.
3197       RunCommandError: If we are unable to download signer results.
3198     """
3199     # These results are expected to contain:
3200     # { 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] }
3201     instruction_urls_per_channel = self.archive_stage.WaitForPushImage()
3202     if instruction_urls_per_channel is None:
3203       raise MissingInstructionException('PushImage results not available.')
3204
3205     gs_ctx = gs.GSContext(dry_run=self._run.debug)
3206
3207     try:
3208       cros_build_lib.Info('Waiting for signer results.')
3209       timeout_util.WaitForReturnTrue(
3210           self._CheckForResults,
3211           func_args=(gs_ctx, instruction_urls_per_channel),
3212           timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD)
3213     except timeout_util.TimeoutError:
3214       msg = 'Image signing timed out.'
3215       cros_build_lib.Error(msg)
3216       cros_build_lib.PrintBuildbotStepText(msg)
3217       raise SignerResultsTimeout(msg)
3218
3219     # Log all signer results, then handle any signing failures.
3220     failures = []
3221     for url_results in self.signing_results.values():
3222       for url, signer_result in url_results.iteritems():
3223         result_description = os.path.basename(url)
3224         cros_build_lib.PrintBuildbotStepText(result_description)
3225         cros_build_lib.Info('Received results for: %s', result_description)
3226         cros_build_lib.Info(json.dumps(signer_result, indent=4))
3227
3228         status = self._SigningStatusFromJson(signer_result)
3229         if status != 'passed':
3230           failures.append(result_description)
3231           cros_build_lib.Error('Signing failed for: %s', result_description)
3232
3233     if failures:
3234       cros_build_lib.Error('Failure summary:')
3235       for failure in failures:
3236         cros_build_lib.Error('  %s', failure)
3237       raise SignerFailure(failures)
3238
3239     # If we completed successfully, announce all channels compeleted.
3240     self.archive_stage.AnnounceChannelSigned(self.FINISHED)
3241
3242
3243 class PaygenSigningRequirementsError(Exception):
3244   """Paygen stage can't run if signing failed."""
3245
3246
3247 class PaygenStage(ArchivingStage):
3248   """Stage that generates release payloads.
3249
3250   If this stage is created with a 'channels' argument, it can run
3251   independantly. Otherwise, it's dependent on values queued up by
3252   the SignerResultsStage.
3253   """
3254   option_name = 'paygen'
3255   config_name = 'paygen'
3256
3257   def __init__(self, builder_run, board, archive_stage, channels=None,
3258                **kwargs):
3259     """Init that accepts the channels argument, if present.
3260
3261     Args:
3262       builder_run: See builder_run on ArchivingStage.
3263       board: See board on ArchivingStage.
3264       archive_stage: See archive_stage on ArchivingStage.
3265       channels: Explicit list of channels to generate payloads for.
3266                 If empty, will instead wait on values from SignerResultsStage.
3267                 Channels is normally None in release builds, and normally set
3268                 for trybot 'payloads' builds.
3269     """
3270     super(PaygenStage, self).__init__(builder_run, board, archive_stage,
3271                                       **kwargs)
3272     self.channels = channels
3273
3274   def _HandleStageException(self, exc_info):
3275     """Override and don't set status to FAIL but FORGIVEN instead."""
3276     exc_type = exc_info[0]
3277
3278     # TODO(dgarrett): Constrain this to only expected exceptions.
3279     if issubclass(exc_type, Exception):
3280       return self._HandleExceptionAsWarning(exc_info)
3281
3282     return super(PaygenStage, self)._HandleStageException(exc_info)
3283
3284   def PerformStage(self):
3285     """Do the work of generating our release payloads."""
3286
3287     board = self._current_board
3288     version = self._run.attrs.release_tag
3289
3290     assert version, "We can't generate payloads without a release_tag."
3291     logging.info("Generating payloads for: %s, %s", board, version)
3292
3293     with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
3294       if self.channels:
3295         logging.info("Using explicit channels: %s", self.channels)
3296         # If we have an explicit list of channels, use it.
3297         for channel in self.channels:
3298           per_channel.put((channel, board, version, self._run.debug,
3299                            self._run.config.perform_paygen_testing))
3300       else:
3301         # Otherwise, wait for SignerResults to tell us which channels are ready.
3302         while True:
3303           channel = self.archive_stage.WaitForChannelSigning()
3304
3305           if channel is None:
3306             # This signals a signer error, or that signing never ran.
3307             raise PaygenSigningRequirementsError()
3308
3309           if channel == SignerResultsStage.FINISHED:
3310             break
3311
3312           logging.info("Notified of channel: %s", channel)
3313           per_channel.put((channel, board, version, self._run.debug,
3314                            self._run.config.perform_paygen_testing))
3315
3316   def _RunPaygenInProcess(self, channel, board, version, debug, test_payloads):
3317     """Helper for PaygenStage that invokes payload generation.
3318
3319     This method is intended to be safe to invoke inside a process.
3320
3321     Args:
3322       channel: Channel of payloads to generate ('stable', 'beta', etc)
3323       board: Board of payloads to generate ('x86-mario', 'x86-alex-he', etc)
3324       version: Version of payloads to generate.
3325       debug: Flag telling if this is a real run, or a test run.
3326       test_payloads: Generate test payloads, and schedule auto testing.
3327     """
3328     # TODO(dgarrett): Remove when crbug.com/341152 is fixed.
3329     # These modules are imported here because they aren't always available at
3330     # cbuildbot startup.
3331     # pylint: disable=F0401
3332     from crostools.lib import gspaths
3333     from crostools.lib import paygen_build_lib
3334
3335     # Convert to release tools naming for channels.
3336     if not channel.endswith('-channel'):
3337       channel += '-channel'
3338
3339     # Convert to release tools naming for boards.
3340     board = board.replace('_', '-')
3341
3342     with osutils.TempDir(sudo_rm=True) as tempdir:
3343       # Create the definition of the build to generate payloads for.
3344       build = gspaths.Build(channel=channel,
3345                             board=board,
3346                             version=version)
3347
3348       try:
3349         # Generate the payloads.
3350         self._PrintLoudly('Starting %s, %s, %s' % (channel, version, board))
3351         paygen_build_lib.CreatePayloads(build,
3352                                         work_dir=tempdir,
3353                                         dry_run=debug,
3354                                         run_parallel=True,
3355                                         run_on_builder=True,
3356                                         skip_test_payloads=not test_payloads,
3357                                         skip_autotest=not test_payloads)
3358       except (paygen_build_lib.BuildFinished,
3359               paygen_build_lib.BuildLocked,
3360               paygen_build_lib.BuildSkip) as e:
3361         # These errors are normal if it's possible for another process to
3362         # work on the same build. This process could be a Paygen server, or
3363         # another builder (perhaps by a trybot generating payloads on request).
3364         #
3365         # This means the build was finished by the other process, is already
3366         # being processed (so the build is locked), or that it's been marked
3367         # to skip (probably done manually).
3368         cros_build_lib.Info('Paygen skipped because: %s', e)
3369
3370
3371 class AUTestStage(HWTestStage):
3372   """Stage for au hw test suites that requires special pre-processing."""
3373
3374   def PerformStage(self):
3375     """Wait for payloads to be staged and uploads its au control files."""
3376     with osutils.TempDir() as tempdir:
3377       tarball = commands.BuildAUTestTarball(
3378           self._build_root, self._current_board, tempdir,
3379           self.version, self.upload_url)
3380       self.UploadArtifact(tarball)
3381
3382     super(AUTestStage, self).PerformStage()
3383
3384
3385 class ASyncHWTestStage(HWTestStage, ForgivingBuilderStage):
3386   """Stage that fires and forgets hw test suites to the Autotest lab."""
3387
3388   def __init__(self, *args, **kwargs):
3389     super(ASyncHWTestStage, self).__init__(*args, **kwargs)
3390     self.wait_for_results = False
3391
3392
3393 class QATestStage(HWTestStage, ForgivingBuilderStage):
3394   """Stage that runs qav suite in lab, blocking build but forgiving failures.
3395   """
3396
3397   def __init__(self, *args, **kwargs):
3398     super(QATestStage, self).__init__(*args, **kwargs)
3399
3400
3401 class SDKPackageStage(bs.BuilderStage):
3402   """Stage that performs preparing and packaging SDK files"""
3403
3404   # Version of the Manifest file being generated. Should be incremented for
3405   # Major format changes.
3406   MANIFEST_VERSION = '1'
3407   _EXCLUDED_PATHS = ('usr/lib/debug', constants.AUTOTEST_BUILD_PATH,
3408                      'packages', 'tmp')
3409
3410   def PerformStage(self):
3411     tarball_name = 'built-sdk.tar.xz'
3412     tarball_location = os.path.join(self._build_root, tarball_name)
3413     chroot_location = os.path.join(self._build_root,
3414                                    constants.DEFAULT_CHROOT_DIR)
3415     board_location = os.path.join(chroot_location, 'build/amd64-host')
3416     manifest_location = os.path.join(self._build_root,
3417                                      '%s.Manifest' % tarball_name)
3418
3419     # Create a tarball of the latest SDK.
3420     self.CreateSDKTarball(chroot_location, board_location, tarball_location)
3421
3422     # Create a package manifest for the tarball.
3423     self.CreateManifestFromSDK(board_location, manifest_location)
3424
3425     # Create toolchain packages.
3426     self.CreateRedistributableToolchains(chroot_location)
3427
3428     # Make sure the regular user has the permission to read.
3429     cmd = ['chmod', 'a+r', tarball_location]
3430     cros_build_lib.SudoRunCommand(cmd, cwd=board_location)
3431
3432   def CreateRedistributableToolchains(self, chroot_location):
3433     """Create the toolchain packages"""
3434     osutils.RmDir(os.path.join(chroot_location,
3435                                constants.SDK_TOOLCHAINS_OUTPUT),
3436                   ignore_missing=True)
3437
3438     # We need to run this as root because the tool creates hard links to root
3439     # owned files and our bots enable security features which disallow that.
3440     # Specifically, these features cause problems:
3441     #  /proc/sys/kernel/yama/protected_nonaccess_hardlinks
3442     #  /proc/sys/fs/protected_hardlinks
3443     cmd = [
3444         git.ReinterpretPathForChroot(os.path.join(
3445             constants.CHROMITE_BIN_DIR, 'cros_setup_toolchains')),
3446         '--create-packages',
3447         '--output-dir', os.path.join('/', constants.SDK_TOOLCHAINS_OUTPUT),
3448     ]
3449     cros_build_lib.SudoRunCommand(cmd, enter_chroot=True)
3450
3451   def CreateSDKTarball(self, _chroot, sdk_path, dest_tarball):
3452     """Creates an SDK tarball from a given source chroot.
3453
3454     Args:
3455       chroot: A chroot used for finding compression tool.
3456       sdk_path: Path to the root of newly generated SDK image.
3457       dest_tarball: Path of the tarball that should be created.
3458     """
3459     # TODO(zbehan): We cannot use xz from the chroot unless it's
3460     # statically linked.
3461     extra_args = ['--exclude=%s/*' % path for path in self._EXCLUDED_PATHS]
3462     # Options for maximum compression.
3463     extra_env = { 'XZ_OPT' : '-e9' }
3464     cros_build_lib.CreateTarball(
3465         dest_tarball, sdk_path, sudo=True, extra_args=extra_args,
3466         extra_env=extra_env)
3467
3468   def CreateManifestFromSDK(self, sdk_path, dest_manifest):
3469     """Creates a manifest from a given source chroot.
3470
3471     Args:
3472       sdk_path: Path to the root of the SDK to describe.
3473       dest_manifest: Path to the manifest that should be generated.
3474     """
3475     package_data = {}
3476     for key, version in portage_utilities.ListInstalledPackages(sdk_path):
3477       package_data.setdefault(key, []).append((version, {}))
3478     self._WriteManifest(package_data, dest_manifest)
3479
3480   def _WriteManifest(self, data, manifest):
3481     """Encode manifest into a json file."""
3482     json_input = dict(version=self.MANIFEST_VERSION, packages=data)
3483     osutils.WriteFile(manifest, json.dumps(json_input))
3484
3485
3486 class SDKTestStage(bs.BuilderStage):
3487   """Stage that performs testing an SDK created in a previous stage"""
3488
3489   option_name = 'tests'
3490
3491   def PerformStage(self):
3492     tarball_location = os.path.join(self._build_root, 'built-sdk.tar.xz')
3493     new_chroot_cmd = ['cros_sdk', '--chroot', 'new-sdk-chroot']
3494     # Build a new SDK using the provided tarball.
3495     cmd = new_chroot_cmd + ['--download', '--replace', '--nousepkg',
3496         '--url', 'file://' + tarball_location]
3497     cros_build_lib.RunCommand(cmd, cwd=self._build_root)
3498
3499     for board in self._boards:
3500       cros_build_lib.PrintBuildbotStepText(board)
3501       cmd = new_chroot_cmd + ['--', './setup_board',
3502           '--board', board, '--skip_chroot_upgrade']
3503       cros_build_lib.RunCommand(cmd, cwd=self._build_root)
3504       cmd = new_chroot_cmd + ['--', './build_packages',
3505           '--board', board, '--nousepkg', '--skip_chroot_upgrade']
3506       cros_build_lib.RunCommand(cmd, cwd=self._build_root)
3507
3508
3509 class NothingToArchiveException(Exception):
3510   """Thrown if ArchiveStage found nothing to archive."""
3511   def __init__(self, message='No images found to archive.'):
3512     super(NothingToArchiveException, self).__init__(message)
3513
3514
3515 class ArchiveStage(BoardSpecificBuilderStage, ArchivingStageMixin):
3516   """Archives build and test artifacts for developer consumption.
3517
3518   Attributes:
3519     release_tag: The release tag. E.g. 2981.0.0
3520     version: The full version string, including the milestone.
3521         E.g. R26-2981.0.0-b123
3522   """
3523
3524   option_name = 'archive'
3525   config_name = 'archive'
3526
3527   # This stage is intended to run in the background, in parallel with tests.
3528   def __init__(self, builder_run, board, chrome_version=None, **kwargs):
3529     super(ArchiveStage, self).__init__(builder_run, board, **kwargs)
3530     self.chrome_version = chrome_version
3531
3532     # TODO(mtennant): Places that use this release_tag attribute should
3533     # move to use self._run.attrs.release_tag directly.
3534     self.release_tag = getattr(self._run.attrs, 'release_tag', None)
3535
3536     self._recovery_image_status_queue = multiprocessing.Queue()
3537     self._release_upload_queue = multiprocessing.Queue()
3538     self._upload_queue = multiprocessing.Queue()
3539     self._push_image_status_queue = multiprocessing.Queue()
3540     self._wait_for_channel_signing = multiprocessing.Queue()
3541     self.artifacts = []
3542
3543   def WaitForRecoveryImage(self):
3544     """Wait until artifacts needed by SignerTest stage are created.
3545
3546     Returns:
3547       True if artifacts created successfully.
3548       False otherwise.
3549     """
3550     cros_build_lib.Info('Waiting for recovery image...')
3551     status = self._recovery_image_status_queue.get()
3552     # Put the status back so other SignerTestStage instances don't starve.
3553     self._recovery_image_status_queue.put(status)
3554     return status
3555
3556   def WaitForPushImage(self):
3557     """Wait until PushImage compeletes.
3558
3559     Returns:
3560       On success: The pushimage results.
3561       None on error, or if pushimage didn't run.
3562     """
3563     cros_build_lib.Info('Waiting for PushImage...')
3564     urls = self._push_image_status_queue.get()
3565     # Put the status back so other processes don't starve.
3566     self._push_image_status_queue.put(urls)
3567     return urls
3568
3569   def AnnounceChannelSigned(self, channel):
3570     """Announce that image signing has compeleted for a given channel.
3571
3572     Args:
3573       channel: Either a channel name ('stable', 'dev', etc).
3574                SignerResultsStage.FINISHED if all channels are finished.
3575                None if there was an error, and no channels will be announced.
3576     """
3577     self._wait_for_channel_signing.put(channel)
3578
3579   def WaitForChannelSigning(self):
3580     """Wait until ChannelSigning completes for a given channel.
3581
3582     This method is expected to return once for each channel, and return
3583     the name of the channel that was signed. When all channels are signed,
3584     it should return again with None.
3585
3586     Returns:
3587       The name of the channel for which images have been signed.
3588       None when all channels are signed, or on error.
3589     """
3590     cros_build_lib.Info('Waiting for channel images to be signed...')
3591     return self._wait_for_channel_signing.get()
3592
3593   @staticmethod
3594   def SingleMatchGlob(path_pattern):
3595     """Returns the last match (after sort) if multiple found."""
3596     files = glob.glob(path_pattern)
3597     files.sort()
3598     if not files:
3599       raise NothingToArchiveException('No %s found!' % path_pattern)
3600     elif len(files) > 1:
3601       cros_build_lib.PrintBuildbotStepWarnings()
3602       cros_build_lib.Warning('Expecting one result for %s package, but '
3603                              'found multiple.', path_pattern)
3604     return files[-1]
3605
3606   def ArchiveStrippedChrome(self):
3607     """Generate and upload stripped Chrome package."""
3608
3609     # If chrome is not installed, skip archiving.
3610     chroot_path = os.path.join(self._build_root, constants.DEFAULT_CHROOT_DIR)
3611     board_path = os.path.join(chroot_path, 'build', self._current_board)
3612     if not portage_utilities.IsPackageInstalled(constants.CHROME_CP,
3613                                                 board_path):
3614       return
3615
3616     cmd = ['strip_package', '--board', self._current_board,
3617            constants.CHROME_PN]
3618     cros_build_lib.RunCommand(cmd, cwd=self._build_root, enter_chroot=True)
3619     pkg_dir = os.path.join(
3620         self._build_root, constants.DEFAULT_CHROOT_DIR, 'build',
3621         self._current_board, 'stripped-packages')
3622     chrome_tarball = self.SingleMatchGlob(
3623         os.path.join(pkg_dir, constants.CHROME_CP) + '-*')
3624     filename = os.path.basename(chrome_tarball)
3625     os.link(chrome_tarball, os.path.join(self.archive_path, filename))
3626     self._upload_queue.put([filename])
3627
3628   def BuildAndArchiveDeltaSysroot(self):
3629     """Generate and upload delta sysroot for initial build_packages."""
3630     extra_env = {}
3631     if self._run.config.useflags:
3632       extra_env['USE'] = ' '.join(self._run.config.useflags)
3633     in_chroot_path = git.ReinterpretPathForChroot(self.archive_path)
3634     cmd = ['generate_delta_sysroot', '--out-dir', in_chroot_path,
3635            '--board', self._current_board]
3636     # TODO(mtennant): Make this condition into one run param.
3637     if not self._run.config.build_tests or not self._run.options.tests:
3638       cmd.append('--skip-tests')
3639     cros_build_lib.RunCommand(cmd, cwd=self._build_root, enter_chroot=True,
3640                               extra_env=extra_env)
3641     self._upload_queue.put([constants.DELTA_SYSROOT_TAR])
3642
3643   def LoadArtifactsList(self, board, image_dir):
3644     """Load the list of artifacts to upload for this board.
3645
3646     It attempts to load a JSON file, scripts/artifacts.json, from the
3647     overlay directories for this board. This file specifies the artifacts
3648     to generate, if it can't be found, it will use a default set that
3649     uploads every .bin file as a .tar.xz file except for
3650     chromiumos_qemu_image.bin.
3651
3652     See BuildStandaloneArchive in cbuildbot_commands.py for format docs.
3653     """
3654     custom_artifacts_file = portage_utilities.ReadOverlayFile(
3655         'scripts/artifacts.json', board=board)
3656     artifacts = None
3657
3658     if custom_artifacts_file is not None:
3659       json_file = json.loads(custom_artifacts_file)
3660       artifacts = json_file.get('artifacts')
3661
3662     if artifacts is None:
3663       artifacts = []
3664       for image_file in glob.glob(os.path.join(image_dir, '*.bin')):
3665         basename = os.path.basename(image_file)
3666         if basename != constants.VM_IMAGE_BIN:
3667           info = {'input': [basename], 'archive': 'tar', 'compress': 'xz'}
3668           artifacts.append(info)
3669
3670     for artifact in artifacts:
3671       # Resolve the (possible) globs in the input list, and store
3672       # the actual set of files to use in 'paths'
3673       paths = []
3674       for s in artifact['input']:
3675         glob_paths = glob.glob(os.path.join(image_dir, s))
3676         if not glob_paths:
3677           logging.warning('No artifacts generated for input: %s', s)
3678         else:
3679           for path in glob_paths:
3680             paths.append(os.path.relpath(path, image_dir))
3681       artifact['paths'] = paths
3682     self.artifacts = artifacts
3683
3684   def IsArchivedFile(self, filename):
3685     """Return True if filename is the name of a file being archived."""
3686     for artifact in self.artifacts:
3687       for path in itertools.chain(artifact['paths'], artifact['input']):
3688         if os.path.basename(path) == filename:
3689           return True
3690     return False
3691
3692   def PerformStage(self):
3693     buildroot = self._build_root
3694     config = self._run.config
3695     board = self._current_board
3696     debug = self._run.debug
3697     upload_url = self.upload_url
3698     archive_path = self.archive_path
3699     image_dir = self.GetImageDirSymlink()
3700
3701     extra_env = {}
3702     if config['useflags']:
3703       extra_env['USE'] = ' '.join(config['useflags'])
3704
3705     if not archive_path:
3706       raise NothingToArchiveException()
3707
3708     # The following functions are run in parallel (except where indicated
3709     # otherwise)
3710     # \- BuildAndArchiveArtifacts
3711     #    \- ArchiveReleaseArtifacts
3712     #       \- ArchiveFirmwareImages
3713     #       \- BuildAndArchiveAllImages
3714     #          (builds recovery image first, then launches functions below)
3715     #          \- BuildAndArchiveFactoryImages
3716     #          \- ArchiveStandaloneArtifacts
3717     #             \- ArchiveStandaloneArtifact
3718     #          \- ArchiveZipFiles
3719     #          \- ArchiveHWQual
3720     #       \- PushImage (blocks on BuildAndArchiveAllImages)
3721     #    \- ArchiveManifest
3722     #    \- ArchiveStrippedChrome
3723     #    \- ArchiveImageScripts
3724
3725     def ArchiveManifest():
3726       """Create manifest.xml snapshot of the built code."""
3727       output_manifest = os.path.join(archive_path, 'manifest.xml')
3728       cmd = ['repo', 'manifest', '-r', '-o', output_manifest]
3729       cros_build_lib.RunCommand(cmd, cwd=buildroot, capture_output=True)
3730       self._upload_queue.put(['manifest.xml'])
3731
3732     def BuildAndArchiveFactoryImages():
3733       """Build and archive the factory zip file.
3734
3735       The factory zip file consists of the factory toolkit and the factory
3736       install image. Both are built here.
3737       """
3738       # Build factory install image and create a symlink to it.
3739       factory_install_symlink = None
3740       if 'factory_install' in config['images']:
3741         alias = commands.BuildFactoryInstallImage(buildroot, board, extra_env)
3742         factory_install_symlink = self.GetImageDirSymlink(alias)
3743         if config['factory_install_netboot']:
3744           commands.MakeNetboot(buildroot, board, factory_install_symlink)
3745
3746       # Build the factory toolkit.
3747       chroot_dir = os.path.join(buildroot, constants.DEFAULT_CHROOT_DIR)
3748       chroot_tmp_dir = os.path.join(chroot_dir, 'tmp')
3749       with osutils.TempDir(base_dir=chroot_tmp_dir) as tempdir:
3750         # Build the factory toolkit.
3751         if config['factory_toolkit']:
3752           toolkit_dir = os.path.join(tempdir, 'factory_toolkit')
3753           os.makedirs(toolkit_dir)
3754           commands.MakeFactoryToolkit(
3755               buildroot, board, toolkit_dir, self._run.attrs.release_tag)
3756
3757         # Build and upload factory zip if needed.
3758         if factory_install_symlink or config['factory_toolkit']:
3759           filename = commands.BuildFactoryZip(
3760               buildroot, board, archive_path, factory_install_symlink,
3761               toolkit_dir, self._run.attrs.release_tag)
3762           self._release_upload_queue.put([filename])
3763
3764     def ArchiveStandaloneArtifact(artifact_info):
3765       """Build and upload a single archive."""
3766       if artifact_info['paths']:
3767         for path in commands.BuildStandaloneArchive(archive_path, image_dir,
3768                                                     artifact_info):
3769           self._release_upload_queue.put([path])
3770
3771     def ArchiveStandaloneArtifacts():
3772       """Build and upload standalone archives for each image."""
3773       if config['upload_standalone_images']:
3774         parallel.RunTasksInProcessPool(ArchiveStandaloneArtifact,
3775                                        [[x] for x in self.artifacts])
3776
3777     def ArchiveZipFiles():
3778       """Build and archive zip files.
3779
3780       This includes:
3781         - image.zip (all images in one big zip file)
3782         - the au-generator.zip used for update payload generation.
3783       """
3784       # Zip up everything in the image directory.
3785       image_zip = commands.BuildImageZip(archive_path, image_dir)
3786       self._release_upload_queue.put([image_zip])
3787
3788       # Archive au-generator.zip.
3789       filename = 'au-generator.zip'
3790       shutil.copy(os.path.join(image_dir, filename), archive_path)
3791       self._release_upload_queue.put([filename])
3792
3793     def ArchiveHWQual():
3794       """Build and archive the HWQual images."""
3795       # TODO(petermayo): This logic needs to be exported from the BuildTargets
3796       # stage rather than copied/re-evaluated here.
3797       # TODO(mtennant): Make this autotest_built concept into a run param.
3798       autotest_built = (config['build_tests'] and self._run.options.tests and
3799                         config['upload_hw_test_artifacts'])
3800
3801       if config['hwqual'] and autotest_built:
3802         # Build the full autotest tarball for hwqual image. We don't upload it,
3803         # as it's fairly large and only needed by the hwqual tarball.
3804         cros_build_lib.Info('Archiving full autotest tarball locally ...')
3805         tarball = commands.BuildFullAutotestTarball(self._build_root,
3806                                                     self._current_board,
3807                                                     image_dir)
3808         commands.ArchiveFile(tarball, archive_path)
3809
3810         # Build hwqual image and upload to Google Storage.
3811         hwqual_name = 'chromeos-hwqual-%s-%s' % (board, self.version)
3812         filename = commands.ArchiveHWQual(buildroot, hwqual_name, archive_path,
3813                                           image_dir)
3814         self._release_upload_queue.put([filename])
3815
3816     def ArchiveFirmwareImages():
3817       """Archive firmware images built from source if available."""
3818       archive = commands.BuildFirmwareArchive(buildroot, board, archive_path)
3819       if archive:
3820         self._release_upload_queue.put([archive])
3821
3822     def BuildAndArchiveAllImages():
3823       # Generate the recovery image. To conserve loop devices, we try to only
3824       # run one instance of build_image at a time. TODO(davidjames): Move the
3825       # image generation out of the archive stage.
3826       self.LoadArtifactsList(self._current_board, image_dir)
3827
3828       # For recovery image to be generated correctly, BuildRecoveryImage must
3829       # run before BuildAndArchiveFactoryImages.
3830       if self.IsArchivedFile(constants.BASE_IMAGE_BIN):
3831         commands.BuildRecoveryImage(buildroot, board, image_dir, extra_env)
3832         self._recovery_image_status_queue.put(True)
3833         # Re-generate the artifacts list so we include the newly created
3834         # recovery image.
3835         self.LoadArtifactsList(self._current_board, image_dir)
3836
3837       if config['images']:
3838         parallel.RunParallelSteps([BuildAndArchiveFactoryImages,
3839                                    ArchiveHWQual,
3840                                    ArchiveStandaloneArtifacts,
3841                                    ArchiveZipFiles])
3842
3843     def ArchiveImageScripts():
3844       """Archive tarball of generated image manipulation scripts."""
3845       target = os.path.join(archive_path, constants.IMAGE_SCRIPTS_TAR)
3846       files = glob.glob(os.path.join(image_dir, '*.sh'))
3847       files = [os.path.basename(f) for f in files]
3848       cros_build_lib.CreateTarball(target, image_dir, inputs=files)
3849       self._upload_queue.put([constants.IMAGE_SCRIPTS_TAR])
3850
3851     def PushImage():
3852       # This helper script is only available on internal manifests currently.
3853       if not config['internal']:
3854         return
3855
3856       self.GetParallel('debug_tarball_generated', pretty_name='debug tarball')
3857
3858       # Now that all data has been generated, we can upload the final result to
3859       # the image server.
3860       # TODO: When we support branches fully, the friendly name of the branch
3861       # needs to be used with PushImages
3862       sign_types = []
3863       if config['name'].endswith('-%s' % cbuildbot_config.CONFIG_TYPE_FIRMWARE):
3864         sign_types += ['firmware']
3865       urls = commands.PushImages(
3866           board=board,
3867           archive_url=upload_url,
3868           dryrun=debug or not config['push_image'],
3869           profile=self._run.options.profile or config['profile'],
3870           sign_types=sign_types)
3871       self._push_image_status_queue.put(urls)
3872
3873     def ArchiveReleaseArtifacts():
3874       with self.ArtifactUploader(self._release_upload_queue, archive=False):
3875         steps = [BuildAndArchiveAllImages, ArchiveFirmwareImages]
3876         parallel.RunParallelSteps(steps)
3877       PushImage()
3878
3879     def BuildAndArchiveArtifacts():
3880       # Run archiving steps in parallel.
3881       steps = [ArchiveReleaseArtifacts, ArchiveManifest]
3882       if config['images']:
3883         steps.extend([self.ArchiveStrippedChrome, ArchiveImageScripts])
3884       if config['create_delta_sysroot']:
3885         steps.append(self.BuildAndArchiveDeltaSysroot)
3886
3887       with self.ArtifactUploader(self._upload_queue, archive=False):
3888         parallel.RunParallelSteps(steps)
3889
3890     if not self._run.config.pgo_generate:
3891       BuildAndArchiveArtifacts()
3892
3893   def _HandleStageException(self, exc_info):
3894     # Tell the HWTestStage not to wait for artifacts to be uploaded
3895     # in case ArchiveStage throws an exception.
3896     self._recovery_image_status_queue.put(False)
3897     self._push_image_status_queue.put(None)
3898     self._wait_for_channel_signing.put(None)
3899     return super(ArchiveStage, self)._HandleStageException(exc_info)
3900
3901
3902 class CPEExportStage(BoardSpecificBuilderStage, ArchivingStageMixin):
3903   """Handles generation & upload of package CPE information."""
3904
3905   config_name = 'cpe_export'
3906
3907   def PerformStage(self):
3908     """Generate debug symbols and upload debug.tgz."""
3909     buildroot = self._build_root
3910     board = self._current_board
3911     useflags = self._run.config.useflags
3912
3913     logging.info('Generating CPE export.')
3914     result = commands.GenerateCPEExport(buildroot, board, useflags)
3915
3916     logging.info('Writing CPE export to files for archive.')
3917     warnings_filename = os.path.join(self.archive_path,
3918                                      'cpe-warnings-chromeos-%s.txt' % board)
3919     results_filename = os.path.join(self.archive_path,
3920                                     'cpe-chromeos-%s.json' % board)
3921
3922     osutils.WriteFile(warnings_filename, result.error)
3923     osutils.WriteFile(results_filename, result.output)
3924
3925     logging.info('Uploading CPE files.')
3926     self.UploadArtifact(os.path.basename(warnings_filename), archive=False)
3927     self.UploadArtifact(os.path.basename(results_filename), archive=False)
3928
3929
3930 class DebugSymbolsStage(BoardSpecificBuilderStage, ArchivingStageMixin):
3931   """Handles generation & upload of debug symbols."""
3932
3933   config_name = 'debug_symbols'
3934
3935   def PerformStage(self):
3936     """Generate debug symbols and upload debug.tgz."""
3937     buildroot = self._build_root
3938     board = self._current_board
3939
3940     commands.GenerateBreakpadSymbols(buildroot, board, self._run.debug)
3941     self.board_runattrs.SetParallel('breakpad_symbols_generated', True)
3942
3943     steps = [self.UploadDebugTarball]
3944     failed_list = os.path.join(self.archive_path, 'failed_upload_symbols.list')
3945     if self._run.config.upload_symbols:
3946       steps.append(lambda: self.UploadSymbols(buildroot, board, failed_list))
3947
3948     parallel.RunParallelSteps(steps)
3949
3950   def UploadDebugTarball(self):
3951     """Generate and upload the debug tarball."""
3952     filename = commands.GenerateDebugTarball(
3953         self._build_root, self._current_board, self.archive_path,
3954         self._run.config.archive_build_debug)
3955     self.UploadArtifact(filename, archive=False)
3956
3957     cros_build_lib.Info('Announcing availability of debug tarball now.')
3958     self.board_runattrs.SetParallel('debug_tarball_generated', True)
3959
3960   def UploadSymbols(self, buildroot, board, failed_list):
3961     """Upload generated debug symbols."""
3962     if self._run.options.remote_trybot or self._run.debug:
3963       # For debug builds, limit ourselves to just uploading 1 symbol.
3964       # This way trybots and such still exercise this code.
3965       cnt = 1
3966       official = False
3967     else:
3968       cnt = None
3969       official = self._run.config.chromeos_official
3970
3971     commands.UploadSymbols(buildroot, board, official, cnt, failed_list)
3972
3973     if os.path.exists(failed_list):
3974       self.UploadArtifact(os.path.basename(failed_list), archive=False)
3975
3976   def _SymbolsNotGenerated(self):
3977     """Tell other stages that our symbols were not generated."""
3978     self.board_runattrs.SetParallelDefault('breakpad_symbols_generated', False)
3979     self.board_runattrs.SetParallelDefault('debug_tarball_generated', False)
3980
3981   def HandleSkip(self):
3982     """Tell other stages to not wait on us if we are skipped."""
3983     self._SymbolsNotGenerated()
3984     return super(DebugSymbolsStage, self).HandleSkip()
3985
3986   def _HandleStageException(self, exc_info):
3987     """Tell other stages to not wait on us if we die for some reason."""
3988     self._SymbolsNotGenerated()
3989     return super(DebugSymbolsStage, self)._HandleStageException(exc_info)
3990
3991
3992 class MasterUploadPrebuiltsStage(bs.BuilderStage):
3993   """Syncs prebuilt binhost files across slaves."""
3994   # TODO(mtennant): This class represents logic spun out from
3995   # UploadPrebuiltsStage that is specific to a master builder.  It will
3996   # be used by commit queue to start, but could be used by other master
3997   # builders that upload prebuilts.  Current examples are
3998   # x86-alex-pre-flight-branch and x86-generic-chromium-pfq.  When
3999   # completed the UploadPrebuiltsStage code can be thinned significantly.
4000   option_name = 'prebuilts'
4001   config_name = 'prebuilts'
4002
4003   def _GenerateCommonArgs(self):
4004     """Generate common prebuilt arguments."""
4005     generated_args = []
4006     if self._run.options.debug:
4007       generated_args.append('--debug')
4008
4009     profile = self._run.options.profile or self._run.config['profile']
4010     if profile:
4011       generated_args.extend(['--profile', profile])
4012
4013     # Generate the version if we are a manifest_version build.
4014     if self._run.config.manifest_version:
4015       version = self._run.GetVersion()
4016       generated_args.extend(['--set-version', version])
4017
4018     return generated_args
4019
4020   @staticmethod
4021   def _AddOptionsForSlave(slave_config):
4022     """Private helper method to add upload_prebuilts args for a slave builder.
4023
4024     Args:
4025       slave_config: The build config of a slave builder.
4026
4027     Returns:
4028       An array of options to add to upload_prebuilts array that allow a master
4029       to submit prebuilt conf modifications on behalf of a slave.
4030     """
4031     args = []
4032     if slave_config['prebuilts']:
4033       for slave_board in slave_config['boards']:
4034         args.extend(['--slave-board', slave_board])
4035         slave_profile = slave_config['profile']
4036         if slave_profile:
4037           args.extend(['--slave-profile', slave_profile])
4038
4039     return args
4040
4041   def PerformStage(self):
4042     """Syncs prebuilt binhosts for slave builders."""
4043     # Common args we generate for all types of builds.
4044     generated_args = self._GenerateCommonArgs()
4045     # Args we specifically add for public/private build types.
4046     public_args, private_args = [], []
4047     # Gather public/private (slave) builders.
4048     public_builders, private_builders = [], []
4049
4050     # Distributed builders that use manifest-versions to sync with one another
4051     # share prebuilt logic by passing around versions.
4052     assert cbuildbot_config.IsPFQType(self._prebuilt_type)
4053     assert self._prebuilt_type != constants.CHROME_PFQ_TYPE
4054
4055     # Public pfqs should upload host preflight prebuilts.
4056     public_args.append('--sync-host')
4057
4058     # Update all the binhost conf files.
4059     generated_args.append('--sync-binhost-conf')
4060     for slave_config in self._GetSlaveConfigs():
4061       if slave_config['prebuilts'] == constants.PUBLIC:
4062         public_builders.append(slave_config['name'])
4063         public_args.extend(self._AddOptionsForSlave(slave_config))
4064       elif slave_config['prebuilts'] == constants.PRIVATE:
4065         private_builders.append(slave_config['name'])
4066         private_args.extend(self._AddOptionsForSlave(slave_config))
4067
4068     # Upload the public prebuilts, if any.
4069     if public_builders:
4070       commands.UploadPrebuilts(
4071           category=self._prebuilt_type, chrome_rev=self._chrome_rev,
4072           private_bucket=False, buildroot=self._build_root, board=None,
4073           extra_args=generated_args + public_args)
4074
4075     # Upload the private prebuilts, if any.
4076     if private_builders:
4077       commands.UploadPrebuilts(
4078           category=self._prebuilt_type, chrome_rev=self._chrome_rev,
4079           private_bucket=True, buildroot=self._build_root, board=None,
4080           extra_args=generated_args + private_args)
4081
4082
4083 class UploadPrebuiltsStage(BoardSpecificBuilderStage):
4084   """Uploads binaries generated by this build for developer use."""
4085
4086   option_name = 'prebuilts'
4087   config_name = 'prebuilts'
4088
4089   def __init__(self, builder_run, board, **kwargs):
4090     super(UploadPrebuiltsStage, self).__init__(builder_run, board, **kwargs)
4091
4092   def GenerateCommonArgs(self):
4093     """Generate common prebuilt arguments."""
4094     generated_args = []
4095     if self._run.options.debug:
4096       generated_args.append('--debug')
4097
4098     profile = self._run.options.profile or self._run.config.profile
4099     if profile:
4100       generated_args.extend(['--profile', profile])
4101
4102     # Generate the version if we are a manifest_version build.
4103     if self._run.config.manifest_version:
4104       version = self._run.GetVersion()
4105       generated_args.extend(['--set-version', version])
4106
4107     if self._run.config.git_sync:
4108       # Git sync should never be set for pfq type builds.
4109       assert not cbuildbot_config.IsPFQType(self._prebuilt_type)
4110       generated_args.extend(['--git-sync'])
4111
4112     return generated_args
4113
4114   @classmethod
4115   def _AddOptionsForSlave(cls, slave_config, board):
4116     """Private helper method to add upload_prebuilts args for a slave builder.
4117
4118     Args:
4119       slave_config: The build config of a slave builder.
4120       board: The name of the "master" board on the master builder.
4121
4122     Returns:
4123       An array of options to add to upload_prebuilts array that allow a master
4124       to submit prebuilt conf modifications on behalf of a slave.
4125     """
4126     args = []
4127     if slave_config['prebuilts']:
4128       for slave_board in slave_config['boards']:
4129         if slave_config['master'] and slave_board == board:
4130           # Ignore self.
4131           continue
4132
4133         args.extend(['--slave-board', slave_board])
4134         slave_profile = slave_config['profile']
4135         if slave_profile:
4136           args.extend(['--slave-profile', slave_profile])
4137
4138     return args
4139
4140   def PerformStage(self):
4141     """Uploads prebuilts for master and slave builders."""
4142     prebuilt_type = self._prebuilt_type
4143     board = self._current_board
4144     binhosts = []
4145
4146     # Whether we publish public or private prebuilts.
4147     public = self._run.config.prebuilts == constants.PUBLIC
4148     # Common args we generate for all types of builds.
4149     generated_args = self.GenerateCommonArgs()
4150     # Args we specifically add for public/private build types.
4151     public_args, private_args = [], []
4152     # Public / private builders.
4153     public_builders, private_builders = [], []
4154
4155     # Distributed builders that use manifest-versions to sync with one another
4156     # share prebuilt logic by passing around versions.
4157     if cbuildbot_config.IsPFQType(prebuilt_type):
4158       # Public pfqs should upload host preflight prebuilts.
4159       if prebuilt_type != constants.CHROME_PFQ_TYPE:
4160         public_args.append('--sync-host')
4161
4162       # Deduplicate against previous binhosts.
4163       binhosts.extend(self._GetPortageEnvVar(_PORTAGE_BINHOST, board).split())
4164       binhosts.extend(self._GetPortageEnvVar(_PORTAGE_BINHOST, None).split())
4165       for binhost in filter(None, binhosts):
4166         generated_args.extend(['--previous-binhost-url', binhost])
4167
4168       if self._run.config.master and board == self._boards[-1]:
4169         # The master builder updates all the binhost conf files, and needs to do
4170         # so only once so as to ensure it doesn't try to update the same file
4171         # more than once. As multiple boards can be built on the same builder,
4172         # we arbitrarily decided to update the binhost conf files when we run
4173         # upload_prebuilts for the last board. The other boards are treated as
4174         # slave boards.
4175         generated_args.append('--sync-binhost-conf')
4176         for c in self._GetSlaveConfigs():
4177           if c['prebuilts'] == constants.PUBLIC:
4178             public_builders.append(c['name'])
4179             public_args.extend(self._AddOptionsForSlave(c, board))
4180           elif c['prebuilts'] == constants.PRIVATE:
4181             private_builders.append(c['name'])
4182             private_args.extend(self._AddOptionsForSlave(c, board))
4183
4184     # Upload the public prebuilts, if any.
4185     if public_builders or public:
4186       public_board = board if public else None
4187       commands.UploadPrebuilts(
4188           category=prebuilt_type, chrome_rev=self._chrome_rev,
4189           private_bucket=False, buildroot=self._build_root,
4190           board=public_board, extra_args=generated_args + public_args)
4191
4192     # Upload the private prebuilts, if any.
4193     if private_builders or not public:
4194       private_board = board if not public else None
4195       commands.UploadPrebuilts(
4196           category=prebuilt_type, chrome_rev=self._chrome_rev,
4197           private_bucket=True, buildroot=self._build_root, board=private_board,
4198           extra_args=generated_args + private_args)
4199
4200
4201 class DevInstallerPrebuiltsStage(UploadPrebuiltsStage):
4202   """Stage that uploads DevInstaller prebuilts."""
4203
4204   config_name = 'dev_installer_prebuilts'
4205
4206   def PerformStage(self):
4207     generated_args = generated_args = self.GenerateCommonArgs()
4208     commands.UploadDevInstallerPrebuilts(
4209         binhost_bucket=self._run.config.binhost_bucket,
4210         binhost_key=self._run.config.binhost_key,
4211         binhost_base_url=self._run.config.binhost_base_url,
4212         buildroot=self._build_root,
4213         board=self._current_board,
4214         extra_args=generated_args)
4215
4216
4217 class PublishUprevChangesStage(bs.BuilderStage):
4218   """Makes uprev changes from pfq live for developers."""
4219
4220   def __init__(self, builder_run, success, **kwargs):
4221     """Constructor.
4222
4223     Args:
4224       builder_run: BuilderRun object.
4225       success: Boolean indicating whether the build succeeded.
4226     """
4227     super(PublishUprevChangesStage, self).__init__(builder_run, **kwargs)
4228     self.success = success
4229
4230   def PerformStage(self):
4231     overlays, push_overlays = self._ExtractOverlays()
4232     assert push_overlays, 'push_overlays must be set to run this stage'
4233
4234     # If the build failed, we don't want to push our local changes, because
4235     # they might include some CLs that failed. Instead, clean up our local
4236     # changes and do a fresh uprev.
4237     if not self.success:
4238       # Clean up our root and sync down the latest changes that were
4239       # submitted.
4240       commands.BuildRootGitCleanup(self._build_root)
4241
4242       # Sync down the latest changes we have submitted.
4243       if self._run.options.sync:
4244         next_manifest = self._run.config.manifest
4245         repo = self.GetRepoRepository()
4246         repo.Sync(next_manifest)
4247
4248       # Commit an uprev locally.
4249       if self._run.options.uprev and self._run.config.uprev:
4250         commands.UprevPackages(self._build_root, self._boards, overlays)
4251
4252     # Push the uprev commit.
4253     commands.UprevPush(self._build_root, push_overlays, self._run.options.debug)
4254
4255
4256 class ReportStage(bs.BuilderStage, ArchivingStageMixin):
4257   """Summarize all the builds."""
4258
4259   _HTML_HEAD = """<html>
4260 <head>
4261  <title>Archive Index: %(board)s / %(version)s</title>
4262 </head>
4263 <body>
4264 <h2>Artifacts Index: %(board)s / %(version)s (%(config)s config)</h2>"""
4265
4266   def __init__(self, builder_run, sync_instance, completion_instance, **kwargs):
4267     super(ReportStage, self).__init__(builder_run, **kwargs)
4268
4269     # TODO(mtennant): All these should be retrieved from builder_run instead.
4270     # Or, more correctly, the info currently retrieved from these stages should
4271     # be stored and retrieved from builder_run instead.
4272     self._sync_instance = sync_instance
4273     self._completion_instance = completion_instance
4274
4275   def _UpdateRunStreak(self, builder_run, final_status):
4276     """Update the streak counter for this builder, if applicable, and notify.
4277
4278     Update the pass/fail streak counter for the builder.  If the new
4279     streak should trigger a notification email then send it now.
4280
4281     Args:
4282       builder_run: BuilderRun for this run.
4283       final_status: Final status string for this run.
4284     """
4285
4286     # Exclude tryjobs from streak counting.
4287     if not builder_run.options.remote_trybot and not builder_run.options.local:
4288       streak_value = self._UpdateStreakCounter(
4289           final_status=final_status, counter_name=builder_run.config.name,
4290           dry_run=self._run.debug)
4291       cros_build_lib.Info('New pass/fail streak value for %s is: %s',
4292                           builder_run.config.name, streak_value)
4293       # See if updated streak should trigger a notification email.
4294       if (builder_run.config.health_alert_recipients and
4295           streak_value <= -builder_run.config.health_threshold):
4296         cros_build_lib.Info(
4297           'Builder failed %i consecutive times, sending health alert email '
4298           'to %s.',
4299           -streak_value,
4300           builder_run.config.health_alert_recipients)
4301
4302         if not self._run.debug:
4303           alerts.SendEmail('%s health alert' % builder_run.config.name,
4304                            builder_run.config.health_alert_recipients,
4305                            message=self._HealthAlertMessage(-streak_value),
4306                            smtp_server=constants.GOLO_SMTP_SERVER,
4307                            extra_fields={'X-cbuildbot-alert': 'cq-health'})
4308
4309   def _UpdateStreakCounter(self, final_status, counter_name,
4310                            dry_run=False):
4311     """Update the given streak counter based on the final status of build.
4312
4313     A streak counter counts the number of consecutive passes or failures of
4314     a particular builder. Consecutive passes are indicated by a positive value,
4315     consecutive failures by a negative value.
4316
4317     Args:
4318       final_status: String indicating final status of build,
4319                     constants.FINAL_STATUS_PASSED indicating success.
4320       counter_name: Name of counter to increment, typically the name of the
4321                     build config.
4322       dry_run: Pretend to update counter only. Default: False.
4323
4324     Returns:
4325       The new value of the streak counter.
4326     """
4327     gs_ctx = gs.GSContext(dry_run=dry_run)
4328     counter_url = os.path.join(constants.MANIFEST_VERSIONS_GS_URL,
4329                                constants.STREAK_COUNTERS,
4330                                counter_name)
4331     gs_counter = gs.GSCounter(gs_ctx, counter_url)
4332
4333     if final_status == constants.FINAL_STATUS_PASSED:
4334       streak_value = gs_counter.StreakIncrement()
4335     else:
4336       streak_value = gs_counter.StreakDecrement()
4337
4338     return streak_value
4339
4340   def _HealthAlertMessage(self, fail_count):
4341     """Returns the body of a health alert email message."""
4342     return 'The builder named %s has failed %i consecutive times. See %s' % (
4343         self._run.config['name'], fail_count, self.ConstructDashboardURL())
4344
4345   def _UploadMetadataForRun(self, builder_run, final_status):
4346     """Upload metadata.json for this entire run.
4347
4348     Args:
4349       builder_run: BuilderRun object for this run.
4350       final_status: Final status string for this run.
4351     """
4352     self.UploadMetadata(
4353         config=builder_run.config, final_status=final_status,
4354         sync_instance=self._sync_instance,
4355         completion_instance=self._completion_instance)
4356
4357   def _UploadArchiveIndex(self, builder_run):
4358     """Upload an HTML index for the artifacts at remote archive location.
4359
4360     If there are no artifacts in the archive then do nothing.
4361
4362     Args:
4363       builder_run: BuilderRun object for this run.
4364
4365     Returns:
4366       If an index file is uploaded then a dict is returned where each value
4367         is the same (the URL for the uploaded HTML index) and the keys are
4368         the boards it applies to, including None if applicable.  If no index
4369         file is uploaded then this returns None.
4370     """
4371     archive = builder_run.GetArchive()
4372     archive_path = archive.archive_path
4373
4374     config = builder_run.config
4375     boards = config.boards
4376     if boards:
4377       board_names = ' '.join(boards)
4378     else:
4379       boards = [None]
4380       board_names = '<no board>'
4381
4382     # See if there are any artifacts found for this run.
4383     uploaded = os.path.join(archive_path, commands.UPLOADED_LIST_FILENAME)
4384     if not os.path.exists(uploaded):
4385       # UPLOADED doesn't exist.  Normal if Archive stage never ran, which
4386       # is possibly normal.  Regardless, no archive index is needed.
4387       logging.info('No archived artifacts found for %s run (%s)',
4388                    builder_run.config.name, board_names)
4389
4390     else:
4391       # Prepare html head.
4392       head_data = {
4393           'board': board_names,
4394           'config': config.name,
4395           'version': builder_run.GetVersion(),
4396       }
4397       head = self._HTML_HEAD % head_data
4398
4399       files = osutils.ReadFile(uploaded).splitlines() + [
4400           '.|Google Storage Index',
4401           '..|',
4402       ]
4403       index = os.path.join(archive_path, 'index.html')
4404       # TODO (sbasi) crbug.com/362776: Rework the way we do uploading to
4405       # multiple buckets. Currently this can only be done in the Archive Stage
4406       # therefore index.html will only end up in the normal Chrome OS bucket.
4407       commands.GenerateHtmlIndex(index, files, url_base=archive.download_url,
4408                                  head=head)
4409       commands.UploadArchivedFile(
4410           archive_path, [archive.upload_url], os.path.basename(index),
4411           debug=self._run.debug, acl=self.acl)
4412       return dict((b, archive.download_url + '/index.html') for b in boards)
4413
4414   def PerformStage(self):
4415     # Make sure local archive directory is prepared, if it was not already.
4416     # TODO(mtennant): It is not clear how this happens, but a CQ master run
4417     # that never sees an open tree somehow reaches Report stage without a
4418     # set up archive directory.
4419     if not os.path.exists(self.archive_path):
4420       self.archive.SetupArchivePath()
4421
4422     if results_lib.Results.BuildSucceededSoFar():
4423       final_status = constants.FINAL_STATUS_PASSED
4424     else:
4425       final_status = constants.FINAL_STATUS_FAILED
4426
4427     # Upload metadata, and update the pass/fail streak counter for the main
4428     # run only. These aren't needed for the child builder runs.
4429     self._UploadMetadataForRun(self._run, final_status)
4430     self._UpdateRunStreak(self._run, final_status)
4431
4432     # Iterate through each builder run, whether there is just the main one
4433     # or multiple child builder runs.
4434     archive_urls = {}
4435     for builder_run in self._run.GetUngroupedBuilderRuns():
4436       # Generate an index for archived artifacts if there are any.  All the
4437       # archived artifacts for one run/config are in one location, so the index
4438       # is only specific to each run/config.  In theory multiple boards could
4439       # share that archive, but in practice it is usually one board.  A
4440       # run/config without a board will also usually not have artifacts to
4441       # archive, but that restriction is not assumed here.
4442       run_archive_urls = self._UploadArchiveIndex(builder_run)
4443       if run_archive_urls:
4444         archive_urls.update(run_archive_urls)
4445
4446         # Also update the LATEST files, since this run did archive something.
4447         archive = builder_run.GetArchive()
4448         archive.UpdateLatestMarkers(builder_run.manifest_branch,
4449                                     builder_run.debug)
4450
4451     version = getattr(self._run.attrs, 'release_tag', '')
4452     results_lib.Results.Report(sys.stdout, archive_urls=archive_urls,
4453                                current_version=version)