--- /dev/null
+#!/usr/bin/env python
+
+from __future__ import with_statement
+from RuoteAMQP.participant import Participant
+import os, sys, io
+import ConfigParser
+import optparse
+from string import Template
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+# Fallback configuration. If you need to customize it, copy it somewhere
+# ( ideally to your system's configuration directory ), modify it and
+# pass it with the -c option
+
+from common.envparas import export
+import repomaker
+
+P_NAME = "repomaker"
+
+DEFAULTCONF = """
+[$pname]
+# ${pname} specific conf
+
+# where the put the snapshots repos
+builds = /srv/snapshots
+
+# configuration YAML file for supported repos
+repo_conf = /etc/repos/repos.yaml
+
+# OBS apiurl to query repo status
+obs_api = http://api.build.tizen.org
+oscrc = /etc/boss/oscrc
+
+# path of OBS backend services, should be updates if OBS didn't use default
+raw_repos = /srv/obs/repos
+obs_triggers_path = /srv/obs/repos_sync
+obs_building_path = /srv/obs/build
+
+# use armv8 trick for armv7hl with hw FL
+no_armv8 = no
+
+# notification mail to specifed address
+mailto = root@localhost
+
+# optional, needed if it is different with normal snapshots
+;sandbox_repo_baseurl = http://hostname-or-ip/sandbox
+"""
+
+CONFS = {}
+
+PARAM_LIST = ['BUILDS_PATH',
+ 'REPO_CONF',
+ 'OBS_API_URL',
+ 'OSCRC_PATH',
+ 'RAW_REPOS',
+ 'OBS_TRIGGERS_PATH',
+ 'OBS_BUILDING_PATH',
+ 'NO_ARMV8',
+ 'MAILTO',
+ 'SANDBOX_REPO_BASEURL',
+ 'EMAIL_TEMPLATES_DIR',
+ 'OBS_EVENT_STRING',
+ 'BUILD_TAG']
+
+export(PARAM_LIST, locals())
+
+def main():
+ event = json.loads(' '.join(OBS_EVENT_STRING.split()))
+ event_fields = event['fields']['obsEvent']
+
+ if not event_fields:
+ print "Invalud OBS event: %s" %(OBS_EVENT_STRING)
+ sys.exit(-1)
+
+ try:
+ from common.buildservice import BuildService
+ bs = BuildService(apiurl=CONFS['apiurl'], oscrc=CONFS['oscrc'])
+ except Exception, e:
+ print 'OBS access errors: ', str(e)
+ sys.exit(-1)
+
+ repomgr = repomaker.RepoManager(bs, CONFS)
+
+ print "%s job running" % P_NAME
+
+ try:
+ status = repomgr.create(event_fields['project'], event_fields['repo'])
+ print status
+ if status:
+ with open('%s.env' %(BUILD_TAG), 'w') as f:
+ for k in status.keys():
+ f.write("%s = %s\n" %(k, status[k]))
+ else:
+ sys.exit(-1)
+ except Exception, e:
+ print 'Error: ', str(e)
+ sys.exit(-1)
+
+if __name__ == "__main__":
+ parser = optparse.OptionParser()
+ parser.add_option("-c", "--config", dest="config",
+ help="specify configuration file")
+ parser.add_option("", "--get-defconf", dest="get_defconf", action="store_true",
+ help="Print out the default configuration file")
+
+ (opts, args) = parser.parse_args()
+
+ temp = Template(DEFAULTCONF)
+ str_conf = temp.substitute(pname=P_NAME)
+
+ if opts.get_defconf:
+ print str_conf
+ sys.exit(0)
+
+ if opts.config:
+ with open(opts.config) as f:
+ str_conf = f.read()
+
+ config = ConfigParser.ConfigParser()
+ config.readfp(io.BytesIO(str_conf))
+
+ try:
+ # repomaker participant specific conf
+ CONFS['raw_repos'] = RAW_REPOS or config.get(P_NAME, 'raw_repos')
+ CONFS['repo_conf'] = REPO_CONF or config.get(P_NAME, 'repo_conf')
+ CONFS['apiurl'] = OBS_API_URL or config.get(P_NAME, 'obs_api')
+ CONFS['oscrc'] = OSCRC_PATH or config.get(P_NAME, 'oscrc')
+ CONFS['builds'] = BUILDS_PATH or config.get(P_NAME, 'builds')
+ except ConfigParser.NoOptionError, e:
+ print 'In config, %s' % str(e)
+ sys.exit(1)
+
+ builds_dir = CONFS['builds']
+ if not os.path.exists(builds_dir):
+ os.makedirs(builds_dir)
+ # the owner will be root, with 0777 permission
+ os.chmod(builds_dir, 0777)
+
+ try:
+ CONFS['obs_triggers_path'] = OBS_TRIGGERS_PATH or config.get(P_NAME, 'obs_triggers_path')
+ except ConfigParser.NoOptionError:
+ CONFS['obs_triggers_path'] = '/srv/obs/repos_sync'
+
+ try:
+ CONFS['obs_building_path'] = OBS_BUILDING_PATH or config.get(P_NAME, 'obs_building_path')
+ except ConfigParser.NoOptionError:
+ CONFS['obs_building_path'] = '/srv/obs/build'
+
+ try:
+ mailto = MAILTO or config.get(P_NAME, 'mailto')
+ CONFS['mailto'] = filter(None, [s.strip() for s in mailto.split(',')])
+ except ConfigParser.NoOptionError:
+ CONFS['mailto'] = []
+
+ try:
+ CONFS['no_armv8'] = (NO_ARMV8.lower() or config.get(P_NAME, 'no_armv8').lower()) == 'yes'
+ except ConfigParser.NoOptionError:
+ CONFS['no_armv8'] = False
+
+ try:
+ CONFS['sandbox_repo_baseurl'] = SANDBOX_REPO_BASEURL or config.get(P_NAME, 'sandbox_repo_baseurl')
+ except ConfigParser.NoOptionError:
+ CONFS['sandbox_repo_baseurl'] = None
+
+ CONFS['email_templates_dir'] = EMAIL_TEMPLATES_DIR
+
+ # check the toplevel dirs in repos.yaml
+ dirs = repomaker.get_toplevel_dirs_from_repoconf(CONFS['repo_conf'])
+ for d in dirs:
+ if not os.path.exists(d):
+ os.makedirs(d)
+ # the owner will be root, with 0777 permission
+ os.chmod(d, 0777)
+
+ # UGLY code need to be removed
+ tmpdir = '/srv/tmp'
+ if not os.path.exists(tmpdir):
+ os.makedirs(tmpdir)
+ # the owner will be root, with 0777 permission
+ os.chmod(tmpdir, 0777)
+
+ main()
r.append((rev, srcmd5, version, t, user, comment))
return r
+ def getProjectConfig(self, project):
+ """
+ getProjectConfig(project) -> string
+
+ Get buliding config of project
+ """
+ return ''.join(core.show_project_conf(self.apiurl, project))
+
def getProjectMeta(self, project):
"""
getProjectMeta(project) -> string
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+from linkrepo import LinkRepo
+import os, sys
+import re
+from time import sleep
+from datetime import datetime
+import tempfile
+import stat
+import shutil
+import xml.etree.ElementTree as ET
+import yaml
+
+
+ARCHES = {'ia32': 'i586', 'armv7l': 'armv7el', 'armv7hl': 'armv8el'}
+ARCHES_REV = {'i586': 'ia32', 'armv7el': 'armv7l', 'armv8el': 'armv7hl'}
+
+# fixing of buggy xml.dom.minidom.toprettyxml
+XMLTEXT_RE = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL)
+
+def get_toplevel_dirs_from_repoconf(confp):
+ """Helper function to extract the toplevel paths from repo configs
+ """
+ paths = set()
+ if os.path.isfile(confp):
+ repos = yaml.load(file(confp))['Repositories']
+ for repo in repos:
+ if 'TopLevel' in repo:
+ paths.add(os.path.dirname(repo['TopLevel'].rstrip('/')))
+
+ return paths
+
+class RepoManager:
+ def __init__(self, bs, myconf = None):
+ self.conf = myconf
+ self.bs = bs
+
+ if 'no_armv8' in myconf and myconf['no_armv8']:
+ self.no_armv8 = True
+ else:
+ self.no_armv8 = False
+
+ def _read_template(self, name):
+ fp = os.path.join(self.conf['email_templates_dir'], name)
+ try:
+ with file(fp) as f:
+ return f.read()
+ except:
+ return ''
+
+ def _readin_repos(self):
+ repos_file = "%s" %self.conf['repo_conf']
+ if os.path.isfile(repos_file):
+ return yaml.load(file(repos_file))['Repositories']
+ else:
+ print 'missing repos config file:', repos_file
+ return None
+
+ def _get_repo_by_name(self, name):
+ repos_meta = self._readin_repos()
+ if not repos_meta:
+ return None
+
+ repo = None
+ for r in repos_meta:
+ if 'Project' not in r or 'Target' not in r:
+ continue
+
+ if r['Name'] == name:
+ repo = r
+ repo['targets'] = []
+ if 'Architectures' in r:
+ repo['archs'] = r['Architectures']
+ else:
+ repo['archs'] = ['ia32']
+
+ for a in repo['archs']:
+ try:
+ arch = ARCHES[a]
+
+ if a == 'armv7hl' and self.no_armv8:
+ arch = a
+ repo['targets'].append(('/'.join((r['Target'] , arch))))
+ except KeyError:
+ print "Unspecified arch %s" % a
+
+ if 'GroupFile' in r:
+ repo['patterns'] = True
+ else:
+ repo['patterns'] = False
+ break
+
+ return repo
+
+ def _get_repo(self, prj, target):
+ repos_meta = self._readin_repos()
+ if not repos_meta:
+ return None
+
+ repo = None
+ for r in repos_meta:
+ if 'Project' not in r or 'Target' not in r:
+ continue
+
+ if r['Project'] == prj and r['Target'] == target:
+ repo = r
+
+ if 'Architectures' in r:
+ repo['archs'] = r['Architectures']
+ else:
+ repo['archs'] = ['ia32']
+
+ if 'GroupFile' in r:
+ repo['patterns'] = True
+ else:
+ repo['patterns'] = False
+ break
+
+ return repo
+
+ def _check_published(self, repo, arch = None):
+ prj = repo['Project']
+ state = self.bs.getRepoState(prj)
+ print "state for %s (%s): %s" %(prj, arch, state)
+ ret = True
+ for t in repo['targets']:
+ (target, architecture) = t.split("/")
+ if architecture in ARCHES_REV:
+ repo_arch = ARCHES_REV[architecture]
+ else:
+ repo_arch = architecture
+
+ if t in state and arch == repo_arch:
+ print "> %s %s is %s" %(prj, t, state[t])
+ count = 1
+ while ( state[t] == 'publishing' or \
+ state[t] == 'finished' or \
+ state[t] == 'scheduling' ) and count < 100:
+ print "repo %s status is %s, waiting..." % (t, state[t])
+ sys.stdout.flush()
+
+ sleep(5)
+ state = self.bs.getRepoState(prj)
+ count = count + 1
+
+ if state[t] != 'published':
+ print ">> %s %s is still in '%s', skip" %(prj, t, state[t])
+ ret = False
+ break
+
+ return ret
+
+ def update_image_configs(self, dest_repo, dest_pkg):
+ """ Checking for patterns at: rawrepo/repodata/{group.xml,patterns.xml}.gz
+ """
+ import re
+
+ pg = re.compile("image-configurations.*.rpm")
+ found = False
+ for root, dirs, files in os.walk("%s" %dest_pkg):
+ for f in files:
+ if re.match(pg, f):
+ print "image configurations found: %s/%s" %(root,f)
+ tmpdir = tempfile.mkdtemp()
+ pwd = os.getcwd()
+ os.chdir(tmpdir)
+ os.system("rpm2cpio %s/%s | cpio -idmv" %(root,f))
+ os.system("mkdir -p %s/builddata" %dest_repo)
+ os.system("rm %s/builddata/image-configs/*.ks" %(dest_repo))
+ os.system("mkdir -p %s/builddata/image-configs" %dest_repo)
+ if os.path.exists("%s/usr/share/image-configurations/image-configs.xml" %tmpdir):
+ shutil.copyfile("%s/usr/share/image-configurations/image-configs.xml" %tmpdir, '%s/builddata/image-configs.xml' %dest_repo)
+ os.system("cp %s/usr/share/image-configurations/*.ks %s/builddata/image-configs" %(tmpdir, dest_repo))
+ os.chdir(pwd)
+ shutil.rmtree(tmpdir)
+ found = True
+ break
+
+ if found: break
+
+ if not found:
+ print "did not find any image configurations"
+
+ def create(self, prj, target):
+
+ if not prj or not target:
+ print '"project" and "target" must be specified in params'
+ return None
+
+ repo = self._get_repo( prj, target)
+ if not repo:
+ print "No repos meta in %s for %s/%s" % (self.conf['repo_conf'], prj, target)
+ return None
+
+ if repo['PartOf'] == 'sandbox':
+ if 'SandboxOf' not in repo or \
+ not self._get_repo_by_name(repo['SandboxOf']):
+ print 'Invalid sandbox repo settings for %s' % prj
+ return None
+
+ wi = {}
+
+ print "=========================="
+ print datetime.today()
+ print "Repo: %s" %repo['Name']
+ print "=========================="
+
+ if 'BaseDir' in repo:
+ builds_base = repo['BaseDir']
+ else:
+ builds_base = self.conf['builds']
+
+ rname = repo['Name']
+ # see if all related repos are published and prepare for a lockdown and a snapshot
+ ready = {}
+ link = {}
+ for arch in repo['archs']:
+ print "Checking status of %s" %arch
+ ready[arch] = True
+ link[arch] = True
+ ret = self._check_published(self._get_repo_by_name(rname), arch)
+ print "ret: %s" %ret
+ if not ret:
+ print "%s is not ready yet, can't create a snapshot" %rname
+ ready[arch] = False
+ link[arch] = False
+ continue
+
+ if 'DependsOn' in repo:
+ toprepo = self._get_repo_by_name(repo['DependsOn'])
+ if self._check_published(toprepo, arch):
+ print '%s depends on %s which is published' %(rname, repo['DependsOn'] )
+ if 'Dependents' in toprepo:
+ for d in toprepo['Dependents']:
+ deprepo = self._get_repo_by_name(d)
+ if not self._check_published(deprepo, arch):
+ ready[arch] = False
+ print "%s is not ready yet, can't create a snapshot" %d
+ break
+ else:
+ print '%s depends on %s which is not published yet' %(rname, repo['DependsOn'] )
+ ready[arch] = False
+
+ elif 'Dependents' in repo:
+ for d in repo['Dependents']:
+ deprepo = self._get_repo_by_name(d)
+ if not self._check_published(deprepo, arch):
+ ready[arch] = False
+ print "%s is not ready yet, can't create a snapshot" %d
+ break
+
+ if 'DependsOn' in deprepo and rname in deprepo['DependsOn']:
+ toprepo = self._get_repo_by_name(deprepo['DependsOn'])
+ if not self._check_published(toprepo, arch):
+ ready[arch] = False
+ print "%s is not ready yet, can't create a snapshot" %deprepo['DependsOn']
+ break
+
+ status = True
+ for arch in repo['archs']:
+ if link[arch]:
+ print "Creating repo for %s %s arch: %s, " %(prj, target, arch),
+
+ if 'GpgKey' in repo:
+ gpgkey = repo['GpgKey']
+ else:
+ gpgkey = None
+
+ if 'SignUser' in repo:
+ signer = repo['SignUser']
+ else:
+ signer = None
+
+ lr = LinkRepo(self.conf['raw_repos'], gpgkey, signer)
+
+ liverepo = {'prj': prj,
+ 'target': target,
+ 'arch': arch,
+ }
+
+ # support of 'Sandbox' repos and images for devel prjs
+ if repo['PartOf'] == 'sandbox':
+ brepo = self._get_repo_by_name(repo['SandboxOf'])
+ baserepo = {'prj': brepo['Project'],
+ 'target': brepo['Target'],
+ 'arch': arch,
+ }
+ else:
+ baserepo = None
+
+ # support for hidden binary rpms to be included in snapshot
+ if 'ExtraRpms' in repo:
+ extrarpms = repo['ExtraRpms']
+ else:
+ extrarpms = None
+
+ # whether to put obs project build conf to repodata dir
+ if 'ProjectConfig' in repo and repo['ProjectConfig']:
+ prjconf = self.bs.getProjectConfig(prj)
+ else:
+ prjconf = None
+
+ status = lr.linkrepo(self.conf['obs_triggers_path'], liverepo, repo['Location'], baserepo, extrarpms, prjconf)
+ if not os.path.exists("%s/builddata/image-configs.xml" %(repo['Location'])) and status:
+ self.update_image_configs(repo['TopLevel'], "%s/ia32/packages" %repo['Location'])
+ print "result: %s" %( "Ok" if status else "Error")
+
+ if not status:
+ wi['snapshot'] = False
+ return None
+
+ Go = False
+ if ready['ia32']:
+ for rr in ready.keys():
+ if ready[rr]:
+ Go = True
+
+ if Go:
+ tmprepo = tempfile.mkdtemp(prefix='repomaker-', dir='/srv/tmp')
+ os.makedirs("%s/repos" %tmprepo, 0755)
+ os.system("cp -al %s/builddata %s" %(repo['TopLevel'], tmprepo))
+
+ xmlroot = ET.Element("build")
+ xarch = ET.SubElement(xmlroot, "archs")
+ xrepo = ET.SubElement(xmlroot, "repos")
+
+ for arch in repo['archs']:
+ if ready[arch]:
+ ET.SubElement(xarch, "arch").text = arch
+
+ for i in os.listdir("%s/repos" %repo['TopLevel']):
+ print "working on %s" %i
+ if not os.path.exists("%s/repos/%s" %(tmprepo,i)):
+ os.makedirs("%s/repos/%s" %(tmprepo,i), 0755)
+
+ # source repo
+ os.system("cp -al %s/repos/%s/source %s/repos/%s" %(repo['TopLevel'], i, tmprepo, i))
+ ET.SubElement(xrepo, "repo").text = i
+
+ # arch specific repos
+ for arch in repo['archs']:
+ if ready[arch]:
+ os.system("cp -al %s/repos/%s/%s %s/repos/%s" %(repo['TopLevel'], i, arch, tmprepo, i))
+
+ # decide to put which project's build.conf under 'builddata'
+ prjconf = self._get_buildbase_conf(rname)
+ if prjconf:
+ import hashlib
+ xconf = ET.SubElement(xmlroot, "buildconf")
+ prjconf_fn = "%s-build.conf" % hashlib.sha256(prjconf).hexdigest()
+ xconf.text = prjconf_fn
+
+ # creating a snapshot is basically a copy of the daily build to
+ # a new location with a build ID.
+ # once snapshot is created, we can start image creation process
+ print "We are ready to create a snapshot for %s (and all other related repos)" %rname
+
+ if 'TopLevel' in repo and 'PartOf' in repo:
+ from release import BuildID
+ bid = BuildID()
+ build_id = bid.get_new_build_id(release=repo['Release'],
+ type=repo['PartOf'], # sandbox or not
+ sandbox_prj=repo['Project'])
+ ET.SubElement(xmlroot, "id").text = build_id
+
+ if 'SnapshotDir' in repo:
+ top = repo['SnapshotDir']
+ else:
+ top = builds_base
+
+ if not os.path.isdir(top):
+ os.makedirs(top, 0755)
+
+ snapshotdir = "%s/%s" %(top, build_id)
+
+ print "linking %s to %s" %(tmprepo, snapshotdir)
+ os.system("cp -al %s %s" %(tmprepo, snapshotdir))
+ os.chmod(snapshotdir, 0755)
+
+ if 'Link' in repo:
+ # create symbolic links
+ if os.path.exists("%s/%s" %(top, repo['Link'])):
+ os.remove("%s/%s" %(top, repo['Link']))
+ print "Creating symlink %s -> %s/%s" %(snapshotdir, top, repo['Link'])
+ os.symlink(build_id, "%s/%s" %(top, repo['Link']))
+
+ wi['build_id'] = build_id
+ wi['snapshot'] = True
+
+ template_str = self._read_template('new_build')
+ msg = []
+ try:
+ from Cheetah.Template import Template
+ template = Template(template_str, searchList = wi )
+ template.msg = "\n".join(msg)
+ body = str(template)
+ except ImportError:
+ from string import Template
+ template = Template(template_str)
+ body = template.safe_substitute(wi, msg="\n".join(msg))
+
+ wi['body'] = body.replace('\n', '\\n')
+ wi['snapshot_subject'] = 'Snapshot %s created' %(build_id)
+
+ tree = ET.ElementTree(xmlroot)
+ from xml.dom import minidom
+ rough_string = ET.tostring(xmlroot, 'utf-8')
+ reparsed = minidom.parseString(rough_string)
+
+ # make sure 'builddata' dir exists even no 'image-configurations.rpm'
+ try:
+ os.makedirs("%s/builddata" % snapshotdir)
+ except OSError:
+ pass
+
+ xf = open("%s/builddata/build.xml" %(snapshotdir), 'w')
+ xf.write(XMLTEXT_RE.sub('>\g<1></', reparsed.toprettyxml(indent=" ")))
+ xf.close()
+
+ # Save OBS project building config here
+ if prjconf:
+ with open("%s/builddata/%s" % \
+ (snapshotdir, prjconf_fn), 'w') as wf:
+ wf.write(prjconf)
+
+ # to put buildlog of all packages under builddata
+ print 'Copying all bulidlog to builddata dir ...'
+ sys.stdout.flush()
+
+ buildbase = self.conf['obs_building_path']
+
+ database = "%s/builddata/buildlogs" % snapshotdir
+ fail_dir = os.path.join(database, 'failed')
+ succ_dir = os.path.join(database, 'succeeded')
+ os.makedirs(fail_dir)
+ os.makedirs(succ_dir)
+
+ for arch in repo['archs']:
+ builddir = os.path.join(buildbase,
+ repo['Project'],
+ repo['Target'],
+ ARCHES[arch])
+ for pkg in os.listdir(builddir):
+ if pkg.startswith(':'):
+ continue
+ pkgdir = os.path.join(builddir, pkg)
+ with file(os.path.join(pkgdir, 'status')) as f:
+ statusline = f.readline()
+ if 'status="succeeded"' in statusline:
+ logf = os.path.join(database, 'succeeded', pkg + '.buildlog.txt')
+ elif 'status="failed"' in statusline:
+ logf = os.path.join(database, 'failed', pkg + '.buildlog.txt')
+ else:
+ # ignore other status
+ logf = None
+
+ if logf:
+ os.system('cp %s %s' % (os.path.join(pkgdir, 'logfile'),
+ logf))
+
+ # cleanup tmp dir and links
+ shutil.rmtree(tmprepo)
+
+ if status:
+ """
+ # TODO: config to enable/disable rsync
+ print "Running rsync to public repo server"
+ os.system("rsync -H --delete-after -avz /srv/www/vhosts/repo.abc.com/* rsync://repo.abc.com/external/")
+ print "Done"
+ """
+ # will always use 'builds_base' as the wi[images_path]
+ wi['IMAGES_PATH'] = builds_base
+
+ # for mails
+ # if 'MailTo' is set in repo conf, then send emails to this addr
+ if 'MailTo' in repo:
+ mailto = repo['MailTo']
+ if not isinstance(mailto, list):
+ mailto = filter(None, [s.strip() for s in mailto.split(',')])
+ wi['To'] = mailto
+ else:
+ wi['To'] = self.conf['mailto']
+
+ wi['LOCATION'] = repo['Location']
+ wi['TARGET'] = target
+ pubarch = [ rr for rr in ready.keys() if ready[rr] ]
+ wi['ARCHS'] = pubarch
+
+ # special rsync server for this repo
+ if 'RsyncServer' in repo:
+ wi['RSYNC_SERVER'] = repo['RsyncServer']
+
+ # special internal used baseurl of repos
+ if 'RepoBaseURL' in repo:
+ wi['CONF_BASEURL'] = repo['RepoBaseURL']
+
+ # use different baseurl for sandbox snapshots if speccified
+ if repo['PartOf'] == 'sandbox' and \
+ self.conf['sandbox_repo_baseurl'] is not None:
+ wi['CONF_BASEURL'] = self.conf['sandbox_repo_baseurl']
+
+ # if 'SnapshotDir' use for repo, need to send relative URI to image-dispatcher
+ if 'SnapshotDir' in repo and builds_base:
+ sdir = repo['SnapshotDir']
+ top = builds_base
+
+ if sdir.startswith(top):
+ sdir = sdir[len(top):]
+ wi['RELATIVE_URI'] = sdir
+
+ return wi
+ else:
+ return None
+
+ def _get_buildbase_conf(self, rname):
+ """Firstly find out the toplevel BASE repo of it, then fetch
+ the config and return.
+ """
+
+ brepo = self._get_repo_by_name(rname)
+ while brepo and 'DependsOn' in brepo:
+ brepo = self._get_repo_by_name(brepo['DependsOn'])
+ # now brepo should be the toplevel BASE repo or itself
+
+ if not brepo:
+ return None
+ else:
+ return self.bs.getProjectConfig(brepo['Project'])
--- /dev/null
+Hi,
+
+A new snapshot ${build_id} was published and the images are going to be
+generated.
+
+
+Regards,
+Release Engineering Team
+
+[This message was auto-generated]
+
--- /dev/null
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Intel Corporation
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; version 2 of the License
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 59
+# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os,sys
+import optparse
+import glob
+import shutil
+import stat
+from time import sleep
+import tempfile
+import re
+import rpm
+
+pjoin = os.path.join # shortcut
+
+SIGNER = "/usr/bin/sign"
+
+class LinkRepo():
+
+ def __init__(self, live, gpg_key=None, sign_user=None):
+ self.live = live
+
+ if gpg_key and os.path.isfile(self.gpg_key):
+ self.gpg_key=gpg_key
+ else:
+ self.gpg_key = None
+
+ if os.path.exists(SIGNER) and os.access(SIGNER, os.X_OK):
+ if sign_user:
+ self.sign_cmd=SIGNER + ' -u %s' % sign_user
+ else:
+ self.sign_cmd=SIGNER
+
+ else:
+ print 'Cant access signer, repos will not be signed!!!'
+ self.sign_cmd = None
+
+ def _rm_files(self, wild_path):
+ for fn in glob.glob(wild_path):
+ os.remove(fn)
+
+ def copy_missing(self,pkg1, pkg2, dest):
+ def get_list(repo):
+ out = {}
+ for p in os.listdir(repo):
+ pac = p.rpartition("-")[0].rpartition("-")[0]
+ if pac is not None:
+ out[pac] = p
+ return out
+
+
+ a = get_list(pkg1)
+ b = get_list(pkg2)
+
+ x = set(a.keys())
+ y= set(b.keys())
+ for c in x - y:
+ print "%s" %(dest)
+ #shutil.copyfile("%s/%s" %(pkg1, a[c]), "%s/%s" %(dest, a[c]))
+ #print "cp %s/%s %s/%s" %(pkg1, a[c], dest, a[c])
+ os.system("cp %s/%s %s/%s" %(pkg1, a[c], dest, a[c]))
+
+ def _rpmname_from_file(self, path):
+ """Get rpm package name from the path of rpm file"""
+
+ if not path.endswith('.rpm'):
+ return None
+
+ ts = rpm.ts()
+ fdno = os.open(path, os.O_RDONLY)
+ hdr = ts.hdrFromFdno(fdno)
+ os.close(fdno)
+
+ return hdr[rpm.RPMTAG_NAME]
+
+ def _rpms_from_dir(self, dpath):
+ """Get set of rpm names from dir which contains rpm files"""
+ return filter(None, map(self._rpmname_from_file, glob.glob(pjoin(dpath, '*.rpm'))))
+
+ def _ln_files(self, wild_path, dst, ignore_exists=False):
+ if ignore_exists:
+ exist_rpms = self._rpms_from_dir(dst)
+
+ for fn in glob.glob(wild_path):
+ if not ignore_exists or \
+ self._rpmname_from_file(fn) not in exist_rpms:
+ try:
+ os.link(fn, pjoin(dst, os.path.basename(fn)))
+ except OSError:
+ pass
+
+ def snapshot(self, prjraw, arch, dest, dest_pkg, dest_debug, dest_src, ignore_exists=False):
+ """Link rpm files from src path to destin path to make a snapshot.
+ `ignore_exists`: means whether to overwrite the existing
+ file for the same rpm package.
+ """
+
+ print "creating snapshot from live repo: %s" %prjraw
+ # 'ia32' as the default arch
+ srpm_dir = 'src'
+ drpm_dir = 'i*'
+ bins = "i386 i586 i686 noarch"
+ xsrpm_dir = "src.armv8el"
+ if arch.startswith('armv7l'):
+ #srpm_dir += '.armv7el'
+ drpm_dir = 'armv7l'
+ bins = "armv7l noarch noarch.armv7el"
+ elif arch.startswith('armv7hl'):
+ #srpm_dir += '.armv8el'
+ drpm_dir = 'armv7hl'
+ bins = "armv7hl noarch noarch.armv8el"
+
+ srpm_glob = pjoin(prjraw, srpm_dir, '*src.rpm')
+ drpm_glob = pjoin(prjraw, drpm_dir, '*debuginfo*.rpm')
+ dsrpm_glob = pjoin(prjraw, drpm_dir, '*debugsource*.rpm')
+
+ # snapshot src and debuginfo
+ self._ln_files(srpm_glob, dest_src, ignore_exists)
+ self._ln_files(drpm_glob, dest_debug, ignore_exists)
+ self._ln_files(dsrpm_glob, dest_debug, ignore_exists)
+
+ # copy missing arm packages
+ #print "copy missing src packages from %s to %s" %(pjoin(prjraw, srpm_dir), dest_src)
+ if os.path.exists(pjoin(prjraw, xsrpm_dir)) and srpm_dir == "src":
+ self.copy_missing(pjoin(prjraw, xsrpm_dir), pjoin(prjraw, "src"), dest_src)
+
+ for bin in bins.split():
+ if os.path.isdir(pjoin(prjraw, bin)):
+ if bin.startswith('noarch.arm'):
+ tbin = 'noarch'
+ else:
+ tbin = bin
+
+ if not os.path.exists(pjoin(dest_pkg, tbin)):
+ os.makedirs(pjoin(dest_pkg, tbin))
+ self._ln_files(pjoin(prjraw, bin, '*.%s.rpm' % tbin),
+ pjoin(dest_pkg, tbin),
+ ignore_exists)
+
+ self._rm_files(pjoin(dest_pkg, bin, '*debuginfo*rpm'))
+ self._rm_files(pjoin(dest_pkg, bin, '*debugsource*rpm'))
+
+ if os.path.isdir(pjoin(dest, arch, 'packages', 'repodata')):
+ self._ln_files(pjoin(dest, arch, 'debug', 'repodata', '*'), pjoin(dest_debug, 'repodata'))
+ self._ln_files(pjoin(dest, arch, 'packages', 'repodata', '*'), pjoin(dest_pkg, 'repodata'))
+ self._ln_files(pjoin(dest, 'source', 'repodata', '*'), pjoin(dest_src, 'repodata'))
+
+ return True
+
+ def sign(self, path):
+ if self.sign_cmd and self.gpg_key:
+ os.system('%s -d %s/repodata/repomd.xml' % (self.sign_cmd, path))
+ shutil.copyfile(self.gpg_key, '%s/repodata/repomd.xml.key' % path)
+ else:
+ print 'Not signing repos: %s %s' %(self.sign_cmd, self.gpg_key)
+
+ def createrepo(self, dest_pkg, dest_src, dest_debug):
+ """ Call external command 'createrepo' to generate repodata,
+ and sign the rempmd.xml if needed
+ """
+ if os.path.exists('%s/repodata/repomd.xml.asc' %dest_pkg):
+ os.system('rm %s/repodata/repomd.xml.asc' %dest_debug)
+ os.system('rm %s/repodata/repomd.xml.asc' %dest_src)
+ os.system('rm %s/repodata/repomd.xml.asc' %dest_pkg)
+
+ # debug
+ os.system('createrepo --quiet -d --changelog-limit=1 --update %s' % dest_debug)
+ self.sign(dest_debug)
+
+ # source
+ os.system('createrepo --quiet -d --changelog-limit=10 --update %s' % dest_src)
+ self.sign(dest_src)
+
+ self._rm_files(pjoin(dest_pkg, 'repodata', '*comps*'))
+ self._rm_files(pjoin(dest_pkg, 'repodata', '*patterns*'))
+ self._rm_files(pjoin(dest_pkg, 'repodata', '*group*'))
+ self._rm_files(pjoin(dest_pkg, 'repodata', '*image-config*'))
+
+ # packages
+ os.system('createrepo --quiet --unique-md-filenames -d --changelog-limit=5 --update %s' % dest_pkg)
+ self.sign(dest_pkg)
+
+
+ def update_package_groups( self, dest_pkg):
+ """ Checking for patterns at: rawrepo/repodata/{group.xml,patterns.xml}.gz
+ """
+ pg = re.compile("package-groups-.*.rpm")
+ found = False
+ for root, dirs, files in os.walk("%s" %dest_pkg):
+ for f in files:
+ if re.match(pg, f):
+ print "package groups found: %s/%s" %(root,f)
+ tmpdir = tempfile.mkdtemp()
+ pwd = os.getcwd()
+ os.chdir(tmpdir)
+ os.system("rpm2cpio %s/%s | cpio -ivd ./usr/share/package-groups/patterns.xml" %(root,f))
+ os.system("rpm2cpio %s/%s | cpio -ivd ./usr/share/package-groups/group.xml" %(root,f))
+ shutil.copyfile("%s/usr/share/package-groups/patterns.xml" %tmpdir, '%s/repodata/patterns.xml' %dest_pkg)
+ os.system('modifyrepo %s/repodata/patterns.xml %s/repodata' %(dest_pkg, dest_pkg))
+ shutil.copyfile("%s/usr/share/package-groups/group.xml" %tmpdir, '%s/repodata/group.xml' %dest_pkg)
+ os.system('modifyrepo %s/repodata/group.xml %s/repodata' %(dest_pkg, dest_pkg))
+ self.sign(dest_pkg)
+ os.chdir(pwd)
+ shutil.rmtree(tmpdir)
+ found = True
+
+ if not found:
+ print "did not find any package groups"
+
+ def update_pattern(self, prjraw, dest_pkg, meta):
+ """ Checking for patterns at: rawrepo/repodata/{group.xml,patterns.xml}.gz
+ """
+ # find out the pattern file name from repomd.xml if any
+ print "Checking for patterns at: %s/repodata/%s.gz" %( prjraw, meta )
+ pattern = None
+ with open('%s/repodata/repomd.xml' % prjraw) as f:
+ patf_re = re.compile("(repodata/.*%s.gz)" %meta)
+ for ln in f:
+ m = patf_re.search(ln)
+ if m:
+ pattern = m.group(1)
+ break
+
+ if pattern:
+ shutil.copyfile(pjoin(prjraw, pattern), '%s/repodata/%s.gz' %( dest_pkg, meta) )
+
+ pwd = os.path.abspath(os.curdir)
+ os.chdir(pjoin(dest_pkg, 'repodata'))
+ os.system('gunzip -f %s.gz' %meta)
+ os.chdir(pwd)
+
+ os.system('modifyrepo %s/repodata/%s %s/repodata' %(dest_pkg, meta, dest_pkg))
+ else:
+ print 'No %s found' %meta
+
+ def cleanup(self, dest, dest_tmp, arch):
+ """ cleanup old repo and move new ones to the final places,
+ then cleanup the tmp dirs
+ """
+
+ dest_src = pjoin(dest_tmp, 'source')
+
+ shutil.rmtree(pjoin(dest, arch), ignore_errors = True)
+ shutil.move(pjoin(dest_tmp, arch), dest)
+ if not arch.startswith('arm'):
+ shutil.rmtree(pjoin(dest, 'source'), ignore_errors = True)
+ shutil.move(dest_src, dest)
+
+ shutil.rmtree(dest_tmp, ignore_errors = True)
+
+ def save_project_config(self, dest, prjconf):
+ fpath = os.path.join(dest, 'repodata', 'build.conf')
+ with open(fpath, 'w') as wf:
+ wf.write(prjconf)
+
+ def fix_perm(self, dest):
+ """ Remove all the WOTH permissions except symlink
+ """
+ for r, ds, fs in os.walk(dest):
+ for f in fs + ds:
+ fp = pjoin(r, f)
+ if os.path.islink(fp):
+ continue
+
+ statinfo = os.stat(fp)
+ if statinfo.st_mode & stat.S_IWOTH:
+ os.chmod(fp, statinfo.st_mode & ~stat.S_IWOTH)
+
+ def linkrepo(self, trigger_dir, liverepo, dest_repo,
+ baserepo=None, extrarpms=None, prjconf=None):
+
+ def _get_rawrepo(repo):
+ prj = repo['prj']
+ target = repo['target']
+
+ if os.path.exists("%s/%s" %( trigger_dir, prj) ):
+ f = open("%s/%s" %( trigger_dir, prj), "r")
+ p = f.readline()
+ x = p.rstrip("\x00")
+ else:
+ print "%s/%s does not exist." %( trigger_dir, prj)
+ return None
+
+ raw_repo = pjoin(self.live, x, target)
+ if not os.path.isdir(raw_repo):
+ print 'Raw live repo of Project %s does not exist' % baserepo['prj']
+ return None
+
+ return raw_repo
+
+ prj = liverepo['prj']
+ target = liverepo['target']
+ arch = liverepo['arch']
+
+ project_raw_repo = _get_rawrepo(liverepo)
+ if not project_raw_repo:
+ return False
+
+ try:
+ os.makedirs(dest_repo)
+ except OSError, e:
+ if not os.path.isdir(dest_repo):
+ print 'Cannot create dir:', e
+ return False
+
+ # all the needed path
+ dest_tmp = pjoin(dest_repo, '.tmp')
+ if os.path.isdir(dest_tmp):
+ # some garbage there, clean it up
+ shutil.rmtree(dest_tmp, ignore_errors=True)
+
+ dest_pkg = pjoin(dest_tmp, arch, 'packages')
+ dest_debug = pjoin(dest_tmp, arch, 'debug')
+ dest_src = pjoin(dest_tmp, 'source')
+
+ for d in (dest_pkg, dest_debug, dest_src):
+ os.makedirs(pjoin(d, 'repodata'))
+
+ # create the snapshot of current live repos
+ ret = self.snapshot(project_raw_repo, arch, dest_repo, dest_pkg, dest_debug, dest_src)
+ print "snapshot %s" %ret
+
+ if baserepo:
+ base_raw_repo = _get_rawrepo(baserepo)
+ if base_raw_repo:
+ self.snapshot(base_raw_repo, arch, dest_repo, dest_pkg, dest_debug, dest_src, ignore_exists=True)
+ else:
+ print 'Skip invalid base raw repo for sandbox'
+
+ if extrarpms:
+ self.snapshot(extrarpms, arch, dest_repo, dest_pkg, dest_debug, dest_src, ignore_exists=True)
+
+ # call external 'createrepo' now
+ self.createrepo(dest_pkg, dest_src, dest_debug)
+
+ self.update_package_groups( dest_pkg)
+
+ if prjconf:
+ print 'saving OBS building config to repo'
+ self.save_project_config(dest_pkg, prjconf)
+
+ self.cleanup(dest_repo, dest_tmp, arch)
+
+ # fix the permissions
+ self.fix_perm(dest_repo)
+
+ return True
+
+if __name__ == '__main__':
+ usage = "Usage: %prog [options] <prj> <target> <dest_repo> <arch>"
+ parser = optparse.OptionParser(usage)
+ opts, args = parser.parse_args()
+
+ try:
+ prj, target, dest_repo, arch = args[:4]
+ except ValueError:
+ parser.error('parameter wrong, please see usage')
+ sys.exit(1)
+
+ linker = LinkRepo('/srv/obs/repos')
+ repo = {'prj': prj, 'target': target, 'arch': arch}
+ linker.linkrepo('/srv/obs/repos_sync', repo, dest_repo)
--- /dev/null
+#!/usr/bin/env python
+
+from datetime import date, datetime, timedelta
+import os
+
+RELEASE_SPOOL = '/var/spool/repomaker'
+
+def _simple_buildid(today = None):
+ if today is None:
+ today = date.today()
+ return today.strftime('%Y%m%d')
+
+class BuildID:
+ def __init__(self, spool = RELEASE_SPOOL):
+ if not os.path.exists(spool):
+ os.makedirs(spool)
+
+ self.spool = spool
+
+ def _get_latest_build_id(self, type, release):
+ f = open("%s/%s-%s" %(self.spool, type, release) )
+ latest = f.readline()
+ return latest
+
+ def _save_new_build_id(self, id, type, release):
+ f = open("%s/%s-%s" %(self.spool, type, release), 'w')
+ f.write(id)
+ f.close()
+
+ def _get_release_number(self, release, today = None):
+ return _simple_buildid(today)
+
+ def _get_current_release(self):
+ return 'tizen'
+
+ def get_new_build_id(self, release = None, type='release', sandbox_prj=None, today=None):
+ if release is None:
+ release = self._get_current_release()
+
+ buildid = self._get_release_number(release, today)
+
+ if type == 'sandbox' and sandbox_prj is not None:
+ type = 'sandbox-' + sandbox_prj.replace(':', '-')
+
+ if os.path.exists("%s/%s-%s" %(self.spool, type, release) ):
+ latest = self._get_latest_build_id(type, release)
+ if buildid in latest:
+ # same day, new build number
+ l = latest.split(".")
+ build = int(l[-1]) + 1
+ buildid += '.%s' %build
+ else:
+ buildid += '.1'
+ else:
+ buildid += '.1'
+
+ self._save_new_build_id(buildid, type, release)
+
+ if type.startswith('sandbox'):
+ return '_'.join([release + '-' + type, buildid])
+ else:
+ return '_'.join([release, buildid])
+
+if __name__ == "__main__":
+ # verify spool
+ print 'Two serial buildid for one day:'
+ bid = BuildID()
+ id = bid.get_new_build_id()
+ print id
+ id = bid.get_new_build_id()
+ print id
+
+ id = bid.get_new_build_id('tizen-99')
+ print id
+ id = bid.get_new_build_id('tizen-99')
+ print id