- add missing repodata handling parts: rewrite in case of new data and new fileprovides
authorMichael Schroeder <mls@suse.de>
Mon, 28 Feb 2011 18:13:56 +0000 (19:13 +0100)
committerMichael Schroeder <mls@suse.de>
Mon, 28 Feb 2011 18:13:56 +0000 (19:13 +0100)
examples/pysolv.py
examples/solv.i

index f4d80e3..f0001f5 100644 (file)
@@ -1,5 +1,33 @@
 #!/usr/bin/python
 
+#
+# Copyright (c) 2011, Novell Inc.
+#
+# This program is licensed under the BSD license, read LICENSE.BSD
+# for further information
+#
+
+# pysolv a little software installer demoing the sat solver library/bindings
+
+# things it does:
+# - understands globs for package names / dependencies
+# - understands .arch suffix
+# - repository data caching
+# - on demand loading of secondary repository data
+# - checksum verification
+# - deltarpm support
+#
+# things not yet ported:
+# - installation of commandline packages
+# - gpg verification
+# - file conflicts
+# - fastestmirror implementation
+#
+# things available in the library but missing from pysolv:
+# - vendor policy loading
+# - soft locks file handling
+# - multi version handling
+
 import sys
 import os
 import glob
@@ -93,6 +121,7 @@ def writecachedrepo(repo, repoext, info=None):
        elif repoext:
            info.write(f)
        else:
+           # rewrite_repos case
            repo['handle'].write_first_repodata(f)
        if repo['alias'] != '@System' and not repoext:
            if 'extcookie' not in repo:
@@ -108,6 +137,22 @@ def writecachedrepo(repo, repoext, info=None):
        else:
            f.write(repo['extcookie'])
        f.close()
+       if repo['handle'].iscontiguous():
+           # switch to saved repo to activate paging and save memory
+           nf = solv.xfopen(tmpname)
+           if not repoext:
+               # main repo
+               repo['handle'].empty()
+               if not repo['handle'].add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
+                   sys.exit("internal error, cannot reload solv file")
+           else:
+               # extension repodata
+               # need to extend to repo boundaries, as this is how
+               # info.write() has written the data
+               info.extend_to_repo()
+               # LOCALPOOL does not help as pool already contains all ids
+               info.read_solv_flags(nf, Repo.REPO_EXTEND_SOLVABLES)
+           solv.xfclose(nf)
        os.rename(tmpname, calccachepath(repo, repoext))
     except IOError, e:
        if tmpname:
@@ -418,12 +463,31 @@ def depglob(pool, name, globname, globdep):
            return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches.keys()) ]
     return []
     
+
 def load_stub(repodata):
     if repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE):
        return repomd_load_ext(repodata.repo.appdata, repodata)
     if repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME):
        return susetags_load_ext(repodata.repo.appdata, repodata)
     return False
+
+def rewrite_repos(pool, addedprovides):
+    addedprovidesset = set(addedprovides)
+    for repohandle in pool.repos:
+       repo = repohandle.appdata
+       if not repohandle.nsolvables:
+           continue
+       # make sure there's just one real repodata with extensions
+       repodata = repohandle.first_repodata()
+       if not repodata:
+           continue
+       oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
+       oldaddedprovidesset = set(oldaddedprovides)
+       if not addedprovidesset <= oldaddedprovidesset:
+           for id in addedprovides:
+               repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
+           repodata.internalize()
+           writecachedrepo(repo, None, repodata)
     
 
 parser = OptionParser(usage="usage: solv.py [options] COMMAND")
@@ -592,7 +656,9 @@ if cmd == 'se' or cmd == 'search':
 
 # XXX: insert rewrite_repos function
 
-pool.addfileprovides()
+addedprovides = pool.addfileprovides_ids()
+if addedprovides:
+    rewrite_repos(pool, addedprovides)
 pool.createwhatprovides()
 
 jobs = []
index 7da1bb0..ce5f430 100644 (file)
@@ -479,6 +479,17 @@ typedef struct {
   void addfileprovides() {
     pool_addfileprovides($self);
   }
+  Queue addfileprovides_ids() {
+    Queue r;
+    Id *addedfileprovides = 0;
+    queue_init(&r);
+    pool_addfileprovides_ids($self, $self->installed, &addedfileprovides);
+    if (addedfileprovides) {
+      for (; *addedfileprovides; addedfileprovides++)
+        queue_push(&r, *addedfileprovides);
+    }
+    return r;
+  }
   void createwhatprovides() {
     pool_createwhatprovides($self);
   }
@@ -583,10 +594,14 @@ typedef struct {
   static const int REPO_EXTEND_SOLVABLES = REPO_EXTEND_SOLVABLES;
   static const int SOLV_ADD_NO_STUBS = SOLV_ADD_NO_STUBS;       /* repo_solv */
   static const int SUSETAGS_RECORD_SHARES = SUSETAGS_RECORD_SHARES; /* repo_susetags */
+  static const int SOLV_ADD_NO_STUBS = SOLV_ADD_NO_STUBS ; /* repo_solv */
 
   void free(int reuseids = 0) {
     repo_free($self, reuseids);
   }
+  void empty(int reuseids = 0) {
+    repo_empty($self, reuseids);
+  }
   bool add_solv(const char *name, int flags = 0) {
     FILE *fp = fopen(name, "r");
     if (!fp)
@@ -701,6 +716,25 @@ typedef struct {
   }
   %}
 #endif
+  bool iscontiguous() {
+    int i;
+    for (i = $self->start; i < $self->end; i++)
+      if ($self->pool->solvables[i].repo != $self)
+        return 0;
+    return 1;
+  }
+  XRepodata *first_repodata() {
+     int i;
+     if (!$self->nrepodata)
+       return 0;
+     /* make sure all repodatas but the first are extensions */
+     if ($self->repodata[0].loadcallback)
+        return 0;
+     for (i = 1; i < $self->nrepodata; i++)
+       if (!$self->repodata[i].loadcallback)
+         return 0;       /* oops, not an extension */
+     return new_XRepodata($self, 0);
+   }
 }
 
 %extend Dataiterator {
@@ -721,8 +755,12 @@ typedef struct {
     dataiterator_free($self);
     sat_free($self);
   }
+  %newobject __iter__;
   Dataiterator *__iter__() {
-    return $self;
+    Dataiterator *ndi;
+    ndi = sat_calloc(1, sizeof(*ndi));
+    dataiterator_init_clone(ndi, $self);
+    return ndi;
   }
   %exception next {
     $action
@@ -789,8 +827,12 @@ typedef struct {
 
 
 %extend Pool_solvable_iterator {
+  %newobject __iter__;
   Pool_solvable_iterator *__iter__() {
-    return $self;
+    Pool_solvable_iterator *s;
+    s = sat_calloc(1, sizeof(*s));
+    *s = *$self;
+    return s;
   }
   %exception next {
     $action
@@ -820,8 +862,12 @@ typedef struct {
 }
 
 %extend Pool_repo_iterator {
+  %newobject __iter__;
   Pool_repo_iterator *__iter__() {
-    return $self;
+    Pool_repo_iterator *s;
+    s = sat_calloc(1, sizeof(*s));
+    *s = *$self;
+    return s;
   }
   %exception next {
     $action
@@ -850,8 +896,12 @@ typedef struct {
 }
 
 %extend Repo_solvable_iterator {
+  %newobject __iter__;
   Repo_solvable_iterator *__iter__() {
-    return $self;
+    Repo_solvable_iterator *s;
+    s = sat_calloc(1, sizeof(*s));
+    *s = *$self;
+    return s;
   }
   %exception next {
     $action
@@ -1292,6 +1342,12 @@ typedef struct {
   const char *lookup_str(Id solvid, Id keyname) {
     return repodata_lookup_str($self->repo->repodata + $self->id, solvid, keyname);
   }
+  Queue lookup_idarray(Id solvid, Id keyname) {
+    Queue r;
+    queue_init(&r);
+    repodata_lookup_idarray($self->repo->repodata + $self->id, solvid, keyname, &r);
+    return r;
+  }
   SWIGCDATA lookup_bin_checksum(Id solvid, Id keyname, Id *OUTPUT) {
     const unsigned char *b;
     *OUTPUT = 0;
@@ -1307,4 +1363,17 @@ typedef struct {
   void write(FILE *fp) {
     repodata_write($self->repo->repodata + $self->id, fp, repo_write_stdkeyfilter, 0);
   }
+  bool read_solv_flags(FILE *fp, int flags = 0) {
+    Repodata *data = $self->repo->repodata + $self->id;
+    int r, oldstate = data->state;
+    data->state = REPODATA_LOADING;
+    r = repo_add_solv_flags(data->repo, fp, flags | REPO_USE_LOADING);
+    if (r)
+      data->state = oldstate;
+    return r;
+  }
+  void extend_to_repo() {
+    Repodata *data = $self->repo->repodata + $self->id;
+    repodata_extend_block(data, data->repo->start, data->repo->end - data->repo->start);
+  }
 }