path += ".solv"
return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
-def usecachedrepo(repo, repoext, cookie, mark=False):
- cookie = repo['cookie']
+def usecachedrepo(repo, repoext, mark=False):
+ if not repoext:
+ cookie = repo['cookie']
+ else:
+ cookie = repo['extcookie']
handle = repo['handle']
try:
repopath = calccachepath(repo, repoext)
extcookie[0] = chr(1)
repo['extcookie'] = extcookie
f.write(repo['extcookie'])
- f.write(repo['cookie'])
+ if not repoext:
+ f.write(repo['cookie'])
+ else:
+ f.write(repo['extcookie'])
f.close()
os.rename(tmpname, calccachepath(repo, repoext))
except IOError, e:
return (filename, chksum, chksumtype)
return (None, None, None)
+def repomd_add_ext(repo, repodata, what):
+ filename, chksum, chksumtype = repomd_find(repo, what)
+ if not filename:
+ return False
+ if what == 'prestodelta':
+ what = 'deltainfo'
+ handle = repodata.new_handle()
+ repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
+ repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
+ repodata.set_bin_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksumtype, chksum)
+ if what == 'deltainfo':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
+ elif what == 'filelists':
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+ repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+ repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
+ return True
+
+def repomd_load_ext(repo, repodata):
+ repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
+ if repomdtype == 'filelists':
+ ext = 'FL'
+ elif repomdtype == 'deltainfo':
+ ext = 'DL'
+ else:
+ return False
+ sys.stdout.write("[%s:%s" % (repo['alias'], ext))
+ if usecachedrepo(repo, ext):
+ sys.stdout.write(" cached]\n")
+ sys.stdout.flush()
+ return True
+ sys.stdout.write(" fetching]\n")
+ sys.stdout.flush()
+ filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
+ filechksum, filechksumtype = repodata.lookup_bin_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
+ f = curlfopen(repo, filename, True, filechksum, filechksumtype)
+ if not f:
+ return False
+ if ext == 'FL':
+ repo['handle'].add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+ elif ext == 'DL':
+ repo['handle'].add_deltainfoxml(f, Repo.REPO_USE_LOADING)
+ solv.xfclose(f)
+ writecachedrepo(repo, ext, repodata)
+ return True
+
def susetags_find(repo, what):
di = repo['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
di.prepend_keyname(solv.SUSETAGS_FILE);
return (what, chksum, chksumtype)
return (None, None, None)
+def susetags_load_ext(repo, repodata):
+ return False
+
def validarch(pool, arch):
if not arch:
return False
return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches.keys()) ]
return []
+def load_stub(repodata):
+ if repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE):
+ return repomd_load_ext(repodata.repo.appdata, repodata)
+ if repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME):
+ return susetags_load_ext(repodata.repo.appdata, repodata)
+ return False
+
parser = OptionParser(usage="usage: solv.py [options] COMMAND")
(options, args) = parser.parse_args()
pool = solv.Pool()
pool.setarch(os.uname()[4])
+pool.set_loadcallback(load_stub)
repos = []
for reposdir in ["/etc/zypp/repos.d"]:
for alias in cfg:
repo = cfg[alias]
repo['alias'] = alias
+ if 'baseurl' not in repo:
+ print "repo %s has no baseurl" % alias
+ continue
if 'priority' not in repo:
repo['priority'] = 99
if 'autorefresh' not in repo:
sysrepostat = os.stat("/var/lib/rpm/Packages")
sysrepocookie = calc_checksum_stat(sysrepostat)
sysrepo['cookie'] = sysrepocookie
-if usecachedrepo(sysrepo, None, sysrepocookie):
+if usecachedrepo(sysrepo, None):
print "cached"
else:
print "reading"
except OSError, e:
pass
repo['cookie'] = None
- if not dorefresh and usecachedrepo(repo, None, None):
+ if not dorefresh and usecachedrepo(repo, None):
print "repo: '%s': cached" % repo['alias']
continue
del repo['handle']
continue
repo['cookie'] = calc_checksum_fp(f)
- if usecachedrepo(repo, None, repo['cookie'], True):
+ if usecachedrepo(repo, None, True):
print "cached"
solv.xfclose(f)
continue
if f:
repo['handle'].add_updateinfoxml(f, 0)
solv.xfclose(f)
+ repodata = repo['handle'].add_repodata(0)
+ if not repomd_add_ext(repo, repodata, 'deltainfo'):
+ repomd_add_ext(repo, repodata, 'prestodelta')
+ repomd_add_ext(repo, repodata, 'filelists')
+ repodata.internalize()
elif repo['type'] == 'yast2':
print "susetags repo '%s':" % repo['alias'],
sys.stdout.flush()
del repo['handle']
continue
repo['cookie'] = calc_checksum_fp(f)
- if usecachedrepo(repo, None, repo['cookie'], True):
+ if usecachedrepo(repo, None, True):
print "cached"
solv.xfclose(f)
continue
# if the checksum was bad we work with the data we got, but don't cache it
if 'True' not in badchecksum:
writecachedrepo(repo, None)
+ # must be called after writing the repo
+ repo['handle'].create_stubs()
if cmd == 'se' or cmd == 'search':
matches = {}
if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
continue
baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
+ candidate = None
for installedp in pool.providers(p.nameid):
if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
candidate = installedp
- # add applydeltarpm code here...
+ if candidate:
+ # add applydeltarpm code here...
+ print "PKG", p.str(), "CANDIDATE", candidate.str()
chksum, chksumtype = p.lookup_bin_checksum(solv.SOLVABLE_CHECKSUM);
f = curlfopen(repo, location, False, chksum, chksumtype)
if not f:
} XRule;
typedef struct {
+ Repo* repo;
+ Id id;
+} XRepodata;
+
+typedef struct {
Pool *pool;
Id id;
} Pool_solvable_iterator;
Id const id;
} XRule;
+typedef struct {
+ Repo* const repo;
+ Id const id;
+} XRepodata;
+
# put before pool/repo so we can access the constructor
%nodefaultctor Dataiterator;
%nodefaultdtor Dataiterator;
int priority;
int subpriority;
int const nsolvables;
-#if defined(SWIGPYTHON)
- PyObject *appdata;
-#endif
#if defined(SWIGRUBY)
VALUE appdata;
#endif
void set_debuglevel(int level) {
pool_setdebuglevel($self, level);
}
+ %{
+ SWIGINTERN int loadcallback(Pool *pool, Repodata *data, void *d) {
+ XRepodata *xd = new_XRepodata(data->repo, data - data->repo->repodata);
+ PyObject *args = Py_BuildValue("(O)", SWIG_NewPointerObj(SWIG_as_voidptr(xd), SWIGTYPE_p_XRepodata, SWIG_POINTER_OWN | 0));
+ PyObject *result = PyEval_CallObject((PyObject *)d, args);
+ if (!result)
+ return 0; /* exception */
+ int ecode = 0;
+ int vresult = 0;
+ Py_DECREF(args);
+ ecode = SWIG_AsVal_int(result, &vresult);
+ Py_DECREF(result);
+ return SWIG_IsOK(ecode) ? vresult : 0;
+ }
+ %}
+ void set_loadcallback(PyObject *callable) {
+ if (!callable) {
+ if ($self->loadcallback == loadcallback) {
+ Py_DECREF($self->loadcallbackdata);
+ pool_setloadcallback($self, 0, 0);
+ }
+ return;
+ }
+ Py_INCREF(callable);
+ pool_setloadcallback($self, loadcallback, callable);
+ }
Id str2id(const char *str, int create=1) {
return str2id($self, str, create);
}
unsigned int lookup_num(Id entry, Id keyname, unsigned int notfound = 0) {
return repo_lookup_num($self, entry, keyname, notfound);
}
- bool write(FILE *fp, int flags = 0) {
+ void write(FILE *fp) {
repo_write($self, fp, repo_write_stdkeyfilter, 0, 0);
- return 1;
}
# HACK, remove if no longer needed!
bool write_first_repodata(FILE *fp, int flags = 0) {
return s;
}
%}
+
+ XRepodata *add_repodata(int flags = 0) {
+ Repodata *rd = repo_add_repodata($self, flags);
+ return new_XRepodata($self, rd - $self->repodata);
+ }
+
+ void create_stubs() {
+ Repodata *data;
+ if (!$self->nrepodata)
+ return;
+ data = $self->repodata + ($self->nrepodata - 1);
+ if (data->state != REPODATA_STUB)
+ repodata_create_stubs(data);
+ }
+#if defined(SWIGPYTHON)
+ PyObject *appdata;
+ %{
+ SWIGINTERN void Repo_appdata_set(Repo *repo, PyObject *o) {
+ repo->appdata = o;
+ }
+ SWIGINTERN PyObject *Repo_appdata_get(Repo *repo) {
+ PyObject *o = repo->appdata;
+ Py_INCREF(o);
+ return o;
+ }
+ %}
+#endif
}
%extend Dataiterator {
}
#endif
}
+
+
+%extend XRepodata {
+ XRepodata(Repo *repo, Id id) {
+ XRepodata *xr = sat_calloc(1, sizeof(*xr));
+ xr->repo = repo;
+ xr->id = id;
+ return xr;
+ }
+ Id new_handle() {
+ return repodata_new_handle($self->repo->repodata + $self->id);
+ }
+ void set_id(Id solvid, Id keyname, Id id) {
+ repodata_set_id($self->repo->repodata + $self->id, solvid, keyname, id);
+ }
+ void set_str(Id solvid, Id keyname, const char *str) {
+ repodata_set_str($self->repo->repodata + $self->id, solvid, keyname, str);
+ }
+ void set_poolstr(Id solvid, Id keyname, const char *str) {
+ repodata_set_poolstr($self->repo->repodata + $self->id, solvid, keyname, str);
+ }
+ void add_idarray(Id solvid, Id keyname, Id id) {
+ repodata_add_idarray($self->repo->repodata + $self->id, solvid, keyname, id);
+ }
+ void add_flexarray(Id solvid, Id keyname, Id handle) {
+ repodata_add_flexarray($self->repo->repodata + $self->id, solvid, keyname, handle);
+ }
+ void set_bin_checksum(Id solvid, Id keyname, Id chksumtype, const char *chksum) {
+ repodata_set_bin_checksum($self->repo->repodata + $self->id, solvid, keyname, chksumtype, (const unsigned char *)chksum);
+ }
+ const char *lookup_str(Id solvid, Id keyname) {
+ return repodata_lookup_str($self->repo->repodata + $self->id, solvid, keyname);
+ }
+ SWIGCDATA lookup_bin_checksum(Id solvid, Id keyname, Id *OUTPUT) {
+ const unsigned char *b;
+ *OUTPUT = 0;
+ b = repodata_lookup_bin_checksum($self->repo->repodata + $self->id, solvid, keyname, OUTPUT);
+ return cdata_void((char *)b, sat_chksum_len(*OUTPUT));
+ }
+ void internalize() {
+ repodata_internalize($self->repo->repodata + $self->id);
+ }
+ void create_stubs() {
+ repodata_create_stubs($self->repo->repodata + $self->id);
+ }
+ void write(FILE *fp) {
+ repodata_write($self->repo->repodata + $self->id, fp, repo_write_stdkeyfilter, 0);
+ }
+}