import os
import sys
from xml.etree import ElementTree
+from xml.dom import minidom
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot.stages import build_stages
from chromite.lib import commandline
from chromite.lib import cros_build_lib
+from chromite.lib import gclient
from chromite.lib import git
from chromite.lib import osutils
from chromite.lib import patch as cros_patch
+from chromite.scripts import cros_mark_chrome_as_stable
PRE_CQ = validation_pool.PRE_CQ
print >> sys.stderr, self.repo.ExportManifest(
mark_revision=self.output_manifest_sha1)
+ def RunPrePatchBuild(self):
+ """Run through a pre-patch build to prepare for incremental build.
+
+ This function runs though the InitSDKStage, SetupBoardStage, and
+ BuildPackagesStage. It is intended to be called before applying
+ any patches under test, to prepare the chroot and sysroot in a state
+ corresponding to ToT prior to an incremental build.
+
+ Returns:
+ True if all stages were successful, False if any of them failed.
+ """
+ suffix = ' (pre-Patch)'
+ try:
+ build_stages.InitSDKStage(
+ self._run, chroot_replace=True, suffix=suffix).Run()
+ for builder_run in self._run.GetUngroupedBuilderRuns():
+ for board in builder_run.config.boards:
+ build_stages.SetupBoardStage(
+ builder_run, board=board, suffix=suffix).Run()
+ build_stages.BuildPackagesStage(
+ builder_run, board=board, suffix=suffix).Run()
+ except failures_lib.StepFailure:
+ return False
+
+ return True
+
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
self.Initialize()
elif self._run.options.buildbot:
lkgm_manager.GenerateBlameList(self.repo, old_filename)
+ # Incremental builds request an additional build before patching changes.
+ if self._run.config.build_before_patching:
+ pre_build_passed = self.RunPrePatchBuild()
+ if not pre_build_passed:
+ cros_build_lib.PrintBuildbotStepText('Pre-patch build failed.')
+
class LKGMSyncStage(SyncStage):
"""Stage that syncs to the last known good manifest blessed by builders."""
dry_run=dry_run,
master=self._run.config.master))
+ def _SetChromeVersionIfApplicable(self, manifest):
+ """If 'chrome' is in |manifest|, write the version to the BuilderRun object.
+
+ Args:
+ manifest: Path to the manifest.
+ """
+ manifest_dom = minidom.parse(manifest)
+ elements = manifest_dom.getElementsByTagName(lkgm_manager.CHROME_ELEMENT)
+
+ if elements:
+ chrome_version = elements[0].getAttribute(
+ lkgm_manager.CHROME_VERSION_ATTR)
+ logging.info(
+ 'Chrome version was found in the manifest: %s', chrome_version)
+ # Update the metadata dictionary. This is necessary because the
+ # metadata dictionary is preserved through re-executions, so
+ # SyncChromeStage can read the version from the dictionary
+ # later. This is easier than parsing the manifest again after
+ # the re-execution.
+ self._run.attrs.metadata.UpdateKeyDictWithDict(
+ 'version', {'chrome': chrome_version})
+
def GetNextManifest(self):
"""Uses the initialized manifest manager to get the next manifest."""
assert self.manifest_manager, \
'Must run GetStageManager before checkout out build.'
+ build_id = self._run.attrs.metadata.GetDict().get('build_id')
+
to_return = self.manifest_manager.GetNextBuildSpec(
- dashboard_url=self.ConstructDashboardURL())
+ dashboard_url=self.ConstructDashboardURL(),
+ build_id=build_id)
previous_version = self.manifest_manager.GetLatestPassingSpec()
target_version = self.manifest_manager.current_version
self._Print('\nRELEASETAG: %s\n' % (
self.manifest_manager.current_version))
+ self._SetChromeVersionIfApplicable(next_manifest)
# To keep local trybots working, remove restricted checkouts from the
# official manifest we get from manifest-versions.
with self.LocalizeManifest(
self.ManifestCheckout(new_manifest)
-class MasterSlaveSyncStage(ManifestVersionedSyncStage):
- """Stage that generates a unique manifest file candidate, and sync's to it."""
+class MasterSlaveLKGMSyncStage(ManifestVersionedSyncStage):
+ """Stage that generates a unique manifest file candidate, and sync's to it.
+
+ This stage uses an LKGM manifest manager that handles LKGM
+ candidates and their states.
+ """
+
+ # Timeout for waiting on the latest candidate manifest.
+ LATEST_CANDIDATE_TIMEOUT_SECONDS = 20 * 60
# TODO(mtennant): Turn this into self._run.attrs.sub_manager or similar.
# An instance of lkgm_manager.LKGMManager for slave builds.
sub_manager = None
def __init__(self, builder_run, **kwargs):
- super(MasterSlaveSyncStage, self).__init__(builder_run, **kwargs)
+ super(MasterSlaveLKGMSyncStage, self).__init__(builder_run, **kwargs)
# lkgm_manager deals with making sure we're synced to whatever manifest
# we get back in GetNextManifest so syncing again is redundant.
self.skip_sync = True
+ self._chrome_version = None
def _GetInitializedManager(self, internal):
"""Returns an initialized lkgm manager.
self.RegisterManifestManager(self._GetInitializedManager(self.internal))
if (self._run.config.master and self._GetSlaveConfigs()):
assert self.internal, 'Unified masters must use an internal checkout.'
- MasterSlaveSyncStage.sub_manager = self._GetInitializedManager(False)
+ MasterSlaveLKGMSyncStage.sub_manager = self._GetInitializedManager(False)
def ForceVersion(self, version):
- manifest = super(MasterSlaveSyncStage, self).ForceVersion(version)
- if MasterSlaveSyncStage.sub_manager:
- MasterSlaveSyncStage.sub_manager.BootstrapFromVersion(version)
+ manifest = super(MasterSlaveLKGMSyncStage, self).ForceVersion(version)
+ if MasterSlaveLKGMSyncStage.sub_manager:
+ MasterSlaveLKGMSyncStage.sub_manager.BootstrapFromVersion(version)
return manifest
'Manifest manager instantiated with wrong class.'
if self._run.config.master:
- manifest = self.manifest_manager.CreateNewCandidate()
- if MasterSlaveSyncStage.sub_manager:
- MasterSlaveSyncStage.sub_manager.CreateFromManifest(
+ build_id = self._run.attrs.metadata.GetDict().get('build_id')
+ manifest = self.manifest_manager.CreateNewCandidate(
+ chrome_version=self._chrome_version,
+ build_id=build_id)
+ if MasterSlaveLKGMSyncStage.sub_manager:
+ MasterSlaveLKGMSyncStage.sub_manager.CreateFromManifest(
manifest, dashboard_url=self.ConstructDashboardURL())
return manifest
else:
return self.manifest_manager.GetLatestCandidate(
- dashboard_url=self.ConstructDashboardURL())
+ dashboard_url=self.ConstructDashboardURL(),
+ timeout=self.LATEST_CANDIDATE_TIMEOUT_SECONDS)
+ def GetLatestChromeVersion(self):
+ """Returns the version of Chrome to uprev."""
+ return cros_mark_chrome_as_stable.GetLatestRelease(gclient.GetBaseURLs()[0])
-class CommitQueueSyncStage(MasterSlaveSyncStage):
+ @failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
+ def PerformStage(self):
+ """Performs the stage."""
+ if (self._chrome_rev == constants.CHROME_REV_LATEST and
+ self._run.config.master):
+ # PFQ master needs to determine what version of Chrome to build
+ # for all slaves.
+ self._chrome_version = self.GetLatestChromeVersion()
+
+ ManifestVersionedSyncStage.PerformStage(self)
+
+
+class CommitQueueSyncStage(MasterSlaveLKGMSyncStage):
"""Commit Queue Sync stage that handles syncing and applying patches.
- This stage handles syncing to a manifest, passing around that manifest to
- other builders and finding the Gerrit Reviews ready to be committed and
- applying them into its own checkout.
+ Similar to the MasterSlaveLKGMsync Stage, this stage handles syncing
+ to a manifest, passing around that manifest to other builders.
+
+ What makes this stage different is that the CQ master finds the
+ patches on Gerrit which are ready to be committed, apply them, and
+ includes the pathces in the new manifest. The slaves sync to the
+ manifest, and apply the paches written in the manifest.
"""
def __init__(self, builder_run, **kwargs):
# - For the master commit queue, it's initialized in GetNextManifest.
# - For slave commit queues, it's initialized in _SetPoolFromManifest.
#
- # In all cases, the pool is saved to disk, and refreshed after bootstrapping
- # by HandleSkip.
+ # In all cases, the pool is saved to disk.
self.pool = None
def HandleSkip(self):
filename = self._run.options.validation_pool
if filename:
self.pool = validation_pool.ValidationPool.Load(filename,
- metadata=self._run.attrs.metadata)
+ metadata=self._run.attrs.metadata, record_patches=False)
else:
self._SetPoolFromManifest(self.manifest_manager.GetLocalManifest())
assert isinstance(self.manifest_manager, lkgm_manager.LKGMManager), \
'Manifest manager instantiated with wrong class.'
+ build_id = self._run.attrs.metadata.GetDict().get('build_id')
+
if self._run.config.master:
try:
# In order to acquire a pool, we need an initialized buildroot.
cros_build_lib.Warning(str(e))
return None
- manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool)
- if MasterSlaveSyncStage.sub_manager:
- MasterSlaveSyncStage.sub_manager.CreateFromManifest(
- manifest, dashboard_url=self.ConstructDashboardURL())
+ manifest = self.manifest_manager.CreateNewCandidate(validation_pool=pool,
+ build_id=build_id)
+ if MasterSlaveLKGMSyncStage.sub_manager:
+ MasterSlaveLKGMSyncStage.sub_manager.CreateFromManifest(
+ manifest, dashboard_url=self.ConstructDashboardURL(),
+ build_id=build_id)
return manifest
else:
dashboard_url=self.ConstructDashboardURL())
if manifest:
if self._run.config.build_before_patching:
- pre_build_passed = self._RunPrePatchBuild()
+ pre_build_passed = self.RunPrePatchBuild()
cros_build_lib.PrintBuildbotStepName(
'CommitQueueSync : Apply Patches')
if not pre_build_passed:
return manifest
- def _RunPrePatchBuild(self):
- """Run through a pre-patch build to prepare for incremental build.
-
- This function runs though the InitSDKStage, SetupBoardStage, and
- BuildPackagesStage. It is intended to be called before applying
- any patches under test, to prepare the chroot and sysroot in a state
- corresponding to ToT prior to an incremental build.
-
- Returns:
- True if all stages were successful, False if any of them failed.
- """
- suffix = ' (pre-Patch)'
- try:
- build_stages.InitSDKStage(
- self._run, chroot_replace=True, suffix=suffix).Run()
- for builder_run in self._run.GetUngroupedBuilderRuns():
- for board in builder_run.config.boards:
- build_stages.SetupBoardStage(
- builder_run, board=board, suffix=suffix).Run()
- build_stages.BuildPackagesStage(
- builder_run, board=board, suffix=suffix).Run()
- except failures_lib.StepFailure:
- return False
-
- return True
-
@failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
"""Performs normal stage and prints blamelist at end."""
self.pool = validation_pool.ValidationPool.Load(filename,
metadata=self._run.attrs.metadata)
- @failures_lib.SetFailureType(failures_lib.InfrastructureFailure)
def PerformStage(self):
super(PreCQSyncStage, self).PerformStage()
self.pool = validation_pool.ValidationPool.AcquirePreCQPool(
metadata=self._run.attrs.metadata)
self.pool.ApplyPoolIntoRepo()
+ if len(self.pool.changes) == 0:
+ cros_build_lib.Die('No changes have been applied.')
+
class PreCQLauncherStage(SyncStage):
"""Scans for CLs and automatically launches Pre-CQ jobs to test them."""
diff = datetime.timedelta(minutes=self.INFLIGHT_DELAY)
return datetime.datetime.now() - self.inflight[change] > diff
+ @staticmethod
+ def _PrintPatchStatus(patch, status):
+ """Print a link to |patch| with |status| info."""
+ items = (
+ status,
+ os.path.basename(patch.project),
+ str(patch),
+ )
+ cros_build_lib.PrintBuildbotLink(' | '.join(items), patch.url)
+
def GetPreCQStatus(self, pool, changes):
"""Get the Pre-CQ status of a list of changes.
busy.add(change)
pool.UpdateCLStatus(PRE_CQ, change, self.STATUS_WAITING,
self._run.options.debug)
+ self._PrintPatchStatus(change, 'failed')
elif status == self.STATUS_PASSED:
passed.add(change)
+ self._PrintPatchStatus(change, 'passed')
return busy, passed
cmd.append('--debug')
for patch in plan:
cmd += ['-g', cros_patch.AddPrefix(patch, patch.gerrit_number)]
+ self._PrintPatchStatus(patch, 'testing')
cros_build_lib.RunCommand(cmd, cwd=self._build_root)
for patch in plan:
if pool.GetCLStatus(PRE_CQ, patch) != self.STATUS_PASSED: