Imported Upstream version 0.7.13
[platform/upstream/libsolv.git] / examples / pysolv
index 7c628da..75067f2 100755 (executable)
@@ -38,24 +38,12 @@ import time
 import subprocess
 import rpm
 from stat import *
-from solv import Pool, Repo, Dataiterator, Job, Solver, Transaction
 from iniparse import INIConfig
 from optparse import OptionParser
 
 #import gc
 #gc.set_debug(gc.DEBUG_LEAK)
 
-def calc_cookie_file(filename):
-    chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
-    chksum.add("1.1")
-    chksum.add_stat(filename)
-    return chksum.raw()
-
-def calc_cookie_fp(fp):
-    chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
-    chksum.add_fp(fp)
-    return chksum.raw()
-
 class repo_generic(dict):
     def __init__(self, name, type, attribs = {}):
         for k in attribs:
@@ -63,6 +51,25 @@ class repo_generic(dict):
         self.name = name
         self.type = type
 
+    def calc_cookie_file(self, filename):
+        chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+        chksum.add("1.1")
+        chksum.add_stat(filename)
+        return chksum.raw()
+
+    def calc_cookie_fp(self, fp):
+        chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+        chksum.add("1.1");
+        chksum.add_fp(fp)
+        return chksum.raw()
+
+    def calc_cookie_ext(self, f, cookie):
+        chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+        chksum.add("1.1");
+        chksum.add(cookie)
+        chksum.add_fstat(f.fileno())
+        return chksum.raw()
+
     def cachepath(self, ext = None):
         path = re.sub(r'^\.', '_', self.name)
         if ext:
@@ -75,40 +82,36 @@ class repo_generic(dict):
         self.handle = pool.add_repo(self.name)
         self.handle.appdata = self
         self.handle.priority = 99 - self['priority']
-        if self['autorefresh']:
-            dorefresh = True
+        dorefresh = bool(int(self['autorefresh']))
         if dorefresh:
             try:
                 st = os.stat(self.cachepath())
-                if time.time() - st[ST_MTIME] < self['metadata_expire']:
+                if self['metadata_expire'] == -1 or time.time() - st[ST_MTIME] < self['metadata_expire']:
                     dorefresh = False
-            except OSError, e:
+            except OSError:
                 pass
         self['cookie'] = ''
+        self['extcookie'] = ''
         if not dorefresh and self.usecachedrepo(None):
-            print "repo: '%s': cached" % self.name
+            print("repo: '%s': cached" % self.name)
             return True
-        return self.load_if_changed()
-
-    def load_if_changed(self):
         return False
 
-    def load_ext(repodata):
+    def load_ext(self, repodata):
         return False
 
     def setfromurls(self, urls):
         if not urls:
             return
         url = urls[0]
-        print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
+        print("[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url))
         self['baseurl'] = url
 
     def setfrommetalink(self, metalink):
-        nf = self.download(metalink, False, None)
-        if not nf:
+        f = self.download(metalink, False, None)
+        if not f:
             return None
-        f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
-        solv.xfclose(nf)
+        f = os.fdopen(f.dup(), 'r')
         urls = []
         chksum = None
         for l in f.readlines():
@@ -126,11 +129,10 @@ class repo_generic(dict):
         return chksum
         
     def setfrommirrorlist(self, mirrorlist):
-        nf = self.download(mirrorlist, False, None)
-        if not nf:
+        f = self.download(mirrorlist, False, None)
+        if not f:
             return
-        f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
-        solv.xfclose(nf)
+        f = os.fdopen(f.dup(), 'r')
         urls = []
         for l in f.readline():
             l = l.strip()
@@ -156,7 +158,7 @@ class repo_generic(dict):
                     url = file
         if not url:
             if 'baseurl' not in self:
-                print "%s: no baseurl" % self.name
+                print("%s: no baseurl" % self.name)
                 return None
             url = re.sub(r'/$', '', self['baseurl']) + '/' + file
         f = tempfile.TemporaryFile()
@@ -165,39 +167,39 @@ class repo_generic(dict):
             return None
         os.lseek(f.fileno(), 0, os.SEEK_SET)
         if st:
-            print "%s: download error %d" % (file, st)
+            print("%s: download error %d" % (file, st))
             if markincomplete:
                 self['incomplete'] = True
             return None
         if chksum:
             fchksum = solv.Chksum(chksum.type)
             if not fchksum:
-                print "%s: unknown checksum type" % file
+                print("%s: unknown checksum type" % file)
                 if markincomplete:
                     self['incomplete'] = True
                 return None
             fchksum.add_fd(f.fileno())
             if fchksum != chksum:
-                print "%s: checksum mismatch" % file
+                print("%s: checksum mismatch" % file)
                 if markincomplete:
                     self['incomplete'] = True
                 return None
         if uncompress:
-            return solv.xfopen_fd(file, os.dup(f.fileno()))
-        return solv.xfopen_fd(None, os.dup(f.fileno()))
+            return solv.xfopen_fd(file, f.fileno())
+        return solv.xfopen_fd(None, f.fileno())
 
     def usecachedrepo(self, ext, mark=False):
-        if not ext:
-            cookie = self['cookie']
-        else:
-            cookie = self['extcookie']
         try: 
             repopath = self.cachepath(ext)
-            f = open(repopath, 'r')
+            f = open(repopath, 'rb')
             f.seek(-32, os.SEEK_END)
             fcookie = f.read(32)
             if len(fcookie) != 32:
                 return False
+            if not ext:
+                cookie = self['cookie']
+            else:
+                cookie = self['extcookie']
             if cookie and fcookie != cookie:
                 return False
             if self.type != 'system' and not ext:
@@ -206,11 +208,12 @@ class repo_generic(dict):
                 if len(fextcookie) != 32:
                     return False
             f.seek(0)
+            f = solv.xfopen_fd('', f.fileno())
             flags = 0
             if ext:
-                flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
+                flags = solv.Repo.REPO_USE_LOADING|solv.Repo.REPO_EXTEND_SOLVABLES
                 if ext != 'DL':
-                    flags |= Repo.REPO_LOCALPOOL
+                    flags |= solv.Repo.REPO_LOCALPOOL
             if not self.handle.add_solv(f, flags):
                 return False
             if self.type != 'system' and not ext:
@@ -220,74 +223,68 @@ class repo_generic(dict):
                 # no futimes in python?
                 try:
                     os.utime(repopath, None)
-                except Exception, e:
+                except Exception:
                     pass
-        except IOError, e:
+        except IOError:
             return False
         return True
 
-    def genextcookie(self, f):
-        chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
-        chksum.add(self['cookie'])
-        if f:
-            stat = os.fstat(f.fileno())
-            chksum.add(str(stat[ST_DEV]))
-            chksum.add(str(stat[ST_INO]))
-            chksum.add(str(stat[ST_SIZE]))
-            chksum.add(str(stat[ST_MTIME]))
-        extcookie = chksum.raw()
-        # compatibility to c code
-        if ord(extcookie[0]) == 0:
-            extcookie[0] = chr(1)
-        self['extcookie'] = extcookie
-        
-    def writecachedrepo(self, ext, info=None):
+    def writecachedrepo(self, ext, repodata=None):
+        if 'incomplete' in self:
+            return
+        tmpname = None
         try:
             if not os.path.isdir("/var/cache/solv"):
-                os.mkdir("/var/cache/solv", 0755)
+                os.mkdir("/var/cache/solv", 0o755)
             (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
-            os.fchmod(fd, 0444)
-            f = os.fdopen(fd, 'w+')
-            if not info:
+            os.fchmod(fd, 0o444)
+            f = os.fdopen(fd, 'wb+')
+            f = solv.xfopen_fd(None, f.fileno())
+            if not repodata:
                 self.handle.write(f)
             elif ext:
-                info.write(f)
-            else:       # rewrite_repos case
+                repodata.write(f)
+            else:       # rewrite_repos case, do not write stubs
                 self.handle.write_first_repodata(f)
+            f.flush()
             if self.type != 'system' and not ext:
-                if 'extcookie' not in self:
-                    self.genextcookie(f)
+                if not self['extcookie']:
+                    self['extcookie'] = self.calc_cookie_ext(f, self['cookie'])
                 f.write(self['extcookie'])
             if not ext:
                 f.write(self['cookie'])
             else:
                 f.write(self['extcookie'])
-            f.close()
+            f.close
             if self.handle.iscontiguous():
                 # switch to saved repo to activate paging and save memory
                 nf = solv.xfopen(tmpname)
                 if not ext:
                     # main repo
                     self.handle.empty()
-                    if not self.handle.add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
+                    flags = solv.Repo.SOLV_ADD_NO_STUBS
+                    if repodata:
+                        flags = 0       # rewrite repos case, recreate stubs
+                    if not self.handle.add_solv(nf, flags):
                         sys.exit("internal error, cannot reload solv file")
                 else:
                     # extension repodata
                     # need to extend to repo boundaries, as this is how
-                    # info.write() has written the data
-                    info.extend_to_repo()
-                    # LOCALPOOL does not help as pool already contains all ids
-                    info.add_solv(nf, Repo.REPO_EXTEND_SOLVABLES)
-                solv.xfclose(nf)
+                    # repodata.write() has written the data
+                    repodata.extend_to_repo()
+                    flags = solv.Repo.REPO_EXTEND_SOLVABLES
+                    if ext != 'DL':
+                        flags |= solv.Repo.REPO_LOCALPOOL
+                    repodata.add_solv(nf, flags)
             os.rename(tmpname, self.cachepath(ext))
-        except IOError, e:
+        except (OSError, IOError):
             if tmpname:
                 os.unlink(tmpname)
-                
+
     def updateaddedprovides(self, addedprovides):
         if 'incomplete' in self:
             return 
-        if 'handle' not in self:
+        if not hasattr(self, 'handle'):
             return 
         if self.handle.isempty():
             return
@@ -302,30 +299,55 @@ class repo_generic(dict):
             repodata.internalize()
             self.writecachedrepo(None, repodata)
 
+    def packagespath(self):
+        return ''
+
+    def add_ext_keys(self, ext, repodata, handle):
+        if ext == 'DL':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
+        elif ext == 'DU':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
+        elif ext == 'FL':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+        else:
+            for langtag, langtagtype in [
+                (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
+            ]:
+                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self.handle.pool.id2langid(langtag, ext, 1))
+                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
+        
+
 class repo_repomd(repo_generic):
-    def load_if_changed(self):
-        print "rpmmd repo '%s':" % self.name,
+    def load(self, pool):
+        if super(repo_repomd, self).load(pool):
+            return True
+        sys.stdout.write("rpmmd repo '%s': " % self.name)
         sys.stdout.flush()
         f = self.download("repodata/repomd.xml", False, None, None)
         if not f:
-            print "no repomd.xml file, skipped"
+            print("no repomd.xml file, skipped")
             self.handle.free(True)
             del self.handle
             return False
-        self['cookie'] = calc_cookie_fp(f)
+        self['cookie'] = self.calc_cookie_fp(f)
         if self.usecachedrepo(None, True):
-            print "cached"
-            solv.xfclose(f)
+            print("cached")
             return True
         self.handle.add_repomdxml(f, 0)
-        solv.xfclose(f)
-        print "fetching"
+        print("fetching")
         (filename, filechksum) = self.find('primary')
         if filename:
             f = self.download(filename, True, filechksum, True)
             if f:
                 self.handle.add_rpmmd(f, None, 0)
-                solv.xfclose(f)
             if 'incomplete' in self:
                 return False # hopeless, need good primary
         (filename, filechksum) = self.find('updateinfo')
@@ -333,23 +355,21 @@ class repo_repomd(repo_generic):
             f = self.download(filename, True, filechksum, True)
             if f:
                 self.handle.add_updateinfoxml(f, 0)
-                solv.xfclose(f)
         self.add_exts()
-        if 'incomplete' not in self:
-            self.writecachedrepo(None)
+        self.writecachedrepo(None)
         # must be called after writing the repo
         self.handle.create_stubs()
         return True
 
     def find(self, what):
-        di = self.handle.Dataiterator(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
+        di = self.handle.Dataiterator_meta(solv.REPOSITORY_REPOMD_TYPE, what, solv.Dataiterator.SEARCH_STRING)
         di.prepend_keyname(solv.REPOSITORY_REPOMD)
         for d in di:
-            d.setpos_parent()
-            filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
-            chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
+            dp = d.parentpos()
+            filename = dp.lookup_str(solv.REPOSITORY_REPOMD_LOCATION)
+            chksum = dp.lookup_checksum(solv.REPOSITORY_REPOMD_CHECKSUM)
             if filename and not chksum:
-                print "no %s file checksum!" % filename
+                print("no %s file checksum!" % filename)
                 filename = None
                 chksum = None
             if filename:
@@ -366,16 +386,12 @@ class repo_repomd(repo_generic):
         repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
         repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
         repodata.set_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
-        if ext == 'DL':
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
-        elif ext == 'FL':
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+        self.add_ext_keys(ext, repodata, handle)
         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
 
     def add_exts(self):
         repodata = self.handle.add_repodata(0)
+        repodata.extend_to_repo()
         self.add_ext(repodata, 'deltainfo', 'DL')
         self.add_ext(repodata, 'filelists', 'FL')
         repodata.internalize()
@@ -401,33 +417,32 @@ class repo_repomd(repo_generic):
         if not f:
             return False
         if ext == 'FL':
-            self.handle.add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+            self.handle.add_rpmmd(f, 'FL', solv.Repo.REPO_USE_LOADING|solv.Repo.REPO_EXTEND_SOLVABLES|solv.Repo.REPO_LOCALPOOL)
         elif ext == 'DL':
-            self.handle.add_deltainfoxml(f, Repo.REPO_USE_LOADING)
-        solv.xfclose(f)
+            self.handle.add_deltainfoxml(f, solv.Repo.REPO_USE_LOADING)
         self.writecachedrepo(ext, repodata)
         return True
 
 class repo_susetags(repo_generic):
-    def load_if_changed(self):
-        print "susetags repo '%s':" % self.name,
+    def load(self, pool):
+        if super(repo_susetags, self).load(pool):
+            return True
+        sys.stdout.write("susetags repo '%s': " % self.name)
         sys.stdout.flush()
         f = self.download("content", False, None, None)
         if not f:
-            print "no content file, skipped"
+            print("no content file, skipped")
             self.handle.free(True)
             del self.handle
             return False
-        self['cookie'] = calc_cookie_fp(f)
+        self['cookie'] = self.calc_cookie_fp(f)
         if self.usecachedrepo(None, True):
-            print "cached"
-            solv.xfclose(f)
+            print("cached")
             return True
         self.handle.add_content(f, 0)
-        solv.xfclose(f)
-        print "fetching"
-        defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
-        descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+        print("fetching")
+        defvendorid = self.handle.meta.lookup_id(solv.SUSETAGS_DEFAULTVENDOR)
+        descrdir = self.handle.meta.lookup_str(solv.SUSETAGS_DESCRDIR)
         if not descrdir:
             descrdir = "suse/setup/descr"
         (filename, filechksum) = self.find('packages.gz')
@@ -436,30 +451,27 @@ class repo_susetags(repo_generic):
         if filename:
             f = self.download(descrdir + '/' + filename, True, filechksum, True)
             if f:
-                self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
-                solv.xfclose(f)
+                self.handle.add_susetags(f, defvendorid, None, solv.Repo.REPO_NO_INTERNALIZE|solv.Repo.SUSETAGS_RECORD_SHARES)
                 (filename, filechksum) = self.find('packages.en.gz')
                 if not filename:
                     (filename, filechksum) = self.find('packages.en')
                 if filename:
                     f = self.download(descrdir + '/' + filename, True, filechksum, True)
                     if f:
-                        self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
-                        solv.xfclose(f)
+                        self.handle.add_susetags(f, defvendorid, None, solv.Repo.REPO_NO_INTERNALIZE|solv.Repo.REPO_REUSE_REPODATA|solv.Repo.REPO_EXTEND_SOLVABLES)
                 self.handle.internalize()
         self.add_exts()
-        if 'incomplete' not in self:
-            self.writecachedrepo(None)
+        self.writecachedrepo(None)
         # must be called after writing the repo
         self.handle.create_stubs()
         return True
 
     def find(self, what):
-        di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
+        di = self.handle.Dataiterator_meta(solv.SUSETAGS_FILE_NAME, what, solv.Dataiterator.SEARCH_STRING)
         di.prepend_keyname(solv.SUSETAGS_FILE)
         for d in di:
-            d.setpos_parent()
-            chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
+            dp = d.parentpos()
+            chksum = dp.lookup_checksum(solv.SUSETAGS_FILE_CHECKSUM)
             return (what, chksum)
         return (None, None)
 
@@ -471,31 +483,15 @@ class repo_susetags(repo_generic):
         repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
         if chksum:
             repodata.set_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
-        if ext == 'DU':
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
-        elif ext == 'FL':
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
-            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
-        else:
-            for langtag, langtagtype in [
-                (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
-                (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
-                (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
-                (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
-                (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
-                (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
-            ]:
-                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self.handle.pool.id2langid(langtag, ext, 1))
-                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
+        self.add_ext_keys(ext, repodata, handle)
         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
         
     def add_exts(self):
         repodata = self.handle.add_repodata(0)
-        di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
+        di = self.handle.Dataiterator_meta(solv.SUSETAGS_FILE_NAME, None, 0)
         di.prepend_keyname(solv.SUSETAGS_FILE)
         for d in di:
-            filename = d.str()
+            filename = d.str
             if not filename:
                 continue
             if filename[0:9] != "packages.":
@@ -521,22 +517,30 @@ class repo_susetags(repo_generic):
             return True
         sys.stdout.write("fetching]\n")
         sys.stdout.flush()
-        defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
-        descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+        defvendorid = self.handle.meta.lookup_id(solv.SUSETAGS_DEFAULTVENDOR)
+        descrdir = self.handle.meta.lookup_str(solv.SUSETAGS_DESCRDIR)
         if not descrdir:
             descrdir = "suse/setup/descr"
         filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
         f = self.download(descrdir + '/' + filename, True, filechksum)
         if not f:
             return False
-        self.handle.add_susetags(f, defvendorid, ext, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
-        solv.xfclose(f)
+        flags = solv.Repo.REPO_USE_LOADING|solv.Repo.REPO_EXTEND_SOLVABLES
+        if ext != 'DL':
+            flags |= solv.Repo.REPO_LOCALPOOL
+        self.handle.add_susetags(f, defvendorid, ext, flags)
         self.writecachedrepo(ext, repodata)
         return True
 
+    def packagespath(self):
+        datadir = repo.handle.meta.lookup_str(solv.SUSETAGS_DATADIR)
+        if not datadir:
+            datadir = 'suse'
+        return datadir + '/'
+
 class repo_unknown(repo_generic):
     def load(self, pool):
-        print "unsupported repo '%s': skipped" % self.name
+        print("unsupported repo '%s': skipped" % self.name)
         return False
 
 class repo_system(repo_generic):
@@ -544,14 +548,16 @@ class repo_system(repo_generic):
         self.handle = pool.add_repo(self.name)
         self.handle.appdata = self
         pool.installed = self.handle
-        print "rpm database:",
-        self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
+        sys.stdout.write("rpm database: ")
+        self['cookie'] = self.calc_cookie_file("/var/lib/rpm/Packages")
         if self.usecachedrepo(None):
-            print "cached"
+            print("cached")
             return True
-        print "reading"
-        self.handle.add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
-        self.handle.add_rpmdb(None, Repo.REPO_REUSE_REPODATA)
+        print("reading")
+        if hasattr(self.handle.__class__, 'add_products'):
+            self.handle.add_products("/etc/products.d", solv.Repo.REPO_NO_INTERNALIZE)
+        f = solv.xfopen(self.cachepath())
+        self.handle.add_rpmdb_reffp(f, solv.Repo.REPO_REUSE_REPODATA)
         self.writecachedrepo(None)
         return True
 
@@ -561,141 +567,6 @@ class repo_cmdline(repo_generic):
         self.handle.appdata = self 
         return True
 
-def validarch(pool, arch):
-    if not arch:
-        return False
-    id = pool.str2id(arch, False)
-    if not id:
-        return False
-    return pool.isknownarch(id)
-
-def limitjobs(pool, jobs, flags, evrstr):
-    njobs = []
-    evr = pool.str2id(evrstr)
-    for j in jobs:
-        how = j.how
-        sel = how & Job.SOLVER_SELECTMASK
-        what = pool.rel2id(j.what, evr, flags)
-        if flags == solv.REL_ARCH:
-            how |= Job.SOLVER_SETARCH
-        elif flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
-            if evrstr.find('-') >= 0:
-                how |= Job.SOLVER_SETEVR
-            else:
-                how |= Job.SOLVER_SETEV
-        njobs.append(pool.Job(how, what))
-    return njobs
-
-def limitjobs_evrarch(pool, jobs, flags, evrstr):
-    m = re.match(r'(.+)\.(.+?)$', evrstr)
-    if m and validarch(pool, m.group(2)):
-        jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
-        evrstr = m.group(1)
-    return limitjobs(pool, jobs, flags, evrstr)
-
-def mkjobs_filelist(pool, cmd, arg):
-    if re.search(r'[[*?]', arg):
-        type = Dataiterator.SEARCH_GLOB
-    else:
-        type = Dataiterator.SEARCH_STRING
-    if cmd == 'erase':
-        di = pool.installed.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
-    else:
-        di = pool.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
-    matches = []
-    for d in di:
-        s = d.solvable
-        if s and s.installable():
-            matches.append(s.id)
-            di.skip_solvable()  # one match is enough
-    if matches:
-        print "[using file list match for '%s']" % arg
-        if len(matches) > 1:
-            return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
-        else:
-            return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
-    return []
-
-def mkjobs_rel(pool, cmd, name, rel, evr):
-    flags = 0
-    if rel.find('<') >= 0: flags |= solv.REL_LT
-    if rel.find('=') >= 0: flags |= solv.REL_EQ 
-    if rel.find('>') >= 0: flags |= solv.REL_GT
-    jobs = depglob(pool, name, True, True)
-    if jobs:
-        return limitjobs(pool, jobs, flags, evr)
-    m = re.match(r'(.+)\.(.+?)$', name)
-    if m and validarch(pool, m.group(2)):
-        jobs = depglob(pool, m.group(1), True, True)
-        if jobs:
-            jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
-            return limitjobs(pool, jobs, flags, evr)
-    return []
-
-def mkjobs_nevra(pool, cmd, arg):
-    jobs = depglob(pool, arg, True, True)
-    if jobs:
-        return jobs
-    m = re.match(r'(.+)\.(.+?)$', arg)
-    if m and validarch(pool, m.group(2)):
-        jobs = depglob(pool, m.group(1), True, True)
-        if jobs:
-            return limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
-    m = re.match(r'(.+)-(.+?)$', arg)
-    if m:
-        jobs = depglob(pool, m.group(1), True, False)
-        if jobs:
-            return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
-    m = re.match(r'(.+)-(.+?-.+?)$', arg)
-    if m:
-        jobs = depglob(pool, m.group(1), True, False)
-        if jobs:
-            return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
-    return []
-
-def mkjobs(pool, cmd, arg):
-    if len(arg) and arg[0] == '/':
-        jobs = mkjobs_filelist(pool, cmd, arg)
-        if jobs:
-            return jobs
-    m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
-    if m:
-        return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
-    else:
-        return mkjobs_nevra(pool, cmd, arg)
-            
-def depglob(pool, name, globname, globdep):
-    id = pool.str2id(name, False)
-    if id:
-        match = False
-        for s in pool.whatprovides(id):
-            if globname and s.nameid == id:
-                return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
-            match = True
-        if match:
-            if globname and globdep:
-                print "[using capability match for '%s']" % name
-            return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
-    if not re.search(r'[[*?]', name):
-        return []
-    if globname:
-        # try name glob
-        idmatches = {}
-        for d in pool.Dataiterator(0, solv.SOLVABLE_NAME, name, Dataiterator.SEARCH_GLOB):
-            s = d.solvable
-            if s.installable():
-                idmatches[s.nameid] = True
-        if idmatches:
-            return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
-    if globdep:
-        # try dependency glob
-        idmatches = pool.matchprovidingids(name, Dataiterator.SEARCH_GLOB)
-        if idmatches:
-            print "[using capability match for '%s']" % name
-            return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches) ]
-    return []
-    
-
 def load_stub(repodata):
     repo = repodata.repo.appdata
     if repo:
@@ -704,6 +575,9 @@ def load_stub(repodata):
 
 
 parser = OptionParser(usage="usage: solv.py [options] COMMAND")
+parser.add_option('-r', '--repo', action="append", type="string", dest="repos", help="limit to specified repositories")
+parser.add_option('--best', action="store_true", dest="best", help="force installation/update to best packages")
+parser.add_option('--clean', action="store_true", dest="clean", help="delete no longer needed packages")
 (options, args) = parser.parse_args()
 if not args:
     parser.print_help(sys.stderr)
@@ -711,21 +585,30 @@ if not args:
 
 cmd = args[0]
 args = args[1:]
-if cmd == 'li':
-    cmd = 'list'
-if cmd == 'in':
-    cmd = 'install'
-if cmd == 'rm':
-    cmd = 'erase'
-if cmd == 've':
-    cmd = 'verify'
-if cmd == 'se':
-    cmd = 'search'
 
+cmdabbrev = {'ls': 'list', 'in': 'install', 'rm': 'erase', 've': 'verify', 'se': 'search'}
+if cmd in cmdabbrev:
+    cmd = cmdabbrev[cmd]
+
+cmdactionmap = {
+  'install': solv.Job.SOLVER_INSTALL,
+  'erase':   solv.Job.SOLVER_ERASE,
+  'up':      solv.Job.SOLVER_UPDATE,
+  'dup':     solv.Job.SOLVER_DISTUPGRADE,
+  'verify':  solv.Job.SOLVER_VERIFY,
+  'list':    0,
+  'info':    0
+}
 
 # read all repo configs
 repos = []
-for reposdir in ["/etc/zypp/repos.d"]:
+reposdirs = []
+if os.path.isdir("/etc/zypp/repos.d"):
+  reposdirs = [ "/etc/zypp/repos.d" ]
+else:
+  reposdirs = [ "/etc/yum/repos.d" ]
+
+for reposdir in reposdirs:
     if not os.path.isdir(reposdir):
         continue
     for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
@@ -747,7 +630,7 @@ for reposdir in ["/etc/zypp/repos.d"]:
             repos.append(repo)
 
 pool = solv.Pool()
-pool.setarch(os.uname()[4])
+pool.setarch()
 pool.set_loadcallback(load_stub)
 
 # now load all enabled repos into the pool
@@ -757,15 +640,35 @@ for repo in repos:
     if int(repo['enabled']):
         repo.load(pool)
     
+repofilter = None
+if options.repos:
+    for reponame in options.repos:
+        mrepos = [ repo for repo in repos if repo.name == reponame ]
+        if not mrepos:
+            print("no repository matches '%s'" % reponame)
+            sys.exit(1)
+        repo = mrepos[0]
+        if hasattr(repo, 'handle'):
+            if not repofilter:
+                repofilter = pool.Selection()
+            repofilter.add(repo.handle.Selection(solv.Job.SOLVER_SETVENDOR))
+
 if cmd == 'search':
-    matches = {}
-    di = pool.Dataiterator(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
+    pool.createwhatprovides()
+    sel = pool.Selection()
+    di = pool.Dataiterator(solv.SOLVABLE_NAME, args[0], solv.Dataiterator.SEARCH_SUBSTRING|solv.Dataiterator.SEARCH_NOCASE)
     for d in di:
-        matches[d.solvid] = True
-    for solvid in sorted(matches.keys()):
-        print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
+        sel.add_raw(solv.Job.SOLVER_SOLVABLE, d.solvid)
+    if repofilter:
+       sel.filter(repofilter)
+    for s in sel.solvables():
+        print(" - %s [%s]: %s" % (s, s.repo.name, s.lookup_str(solv.SOLVABLE_SUMMARY)))
     sys.exit(0)
 
+if cmd not in cmdactionmap:
+    print("unknown command %s" % cmd)
+    sys.exit(1)
+
 cmdlinerepo = None
 if cmd == 'list' or cmd == 'info' or cmd == 'install':
     for arg in args:
@@ -774,11 +677,15 @@ if cmd == 'list' or cmd == 'info' or cmd == 'install':
                 cmdlinerepo = repo_cmdline('@commandline', 'cmdline')
                 cmdlinerepo.load(pool)
                 cmdlinerepo['packages'] = {}
-            cmdlinerepo['packages'][arg] = cmdlinerepo.handle.add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
+            s = cmdlinerepo.handle.add_rpm(arg, solv.Repo.REPO_REUSE_REPODATA|solv.Repo.REPO_NO_INTERNALIZE)
+            if not s:
+                print(pool.errstr)
+                sys.exit(1)
+            cmdlinerepo['packages'][arg] = s
     if cmdlinerepo:
         cmdlinerepo.handle.internalize()
 
-addedprovides = pool.addfileprovides_ids()
+addedprovides = pool.addfileprovides_queue()
 if addedprovides:
     sysrepo.updateaddedprovides(addedprovides)
     for repo in repos:
@@ -790,263 +697,265 @@ pool.createwhatprovides()
 jobs = []
 for arg in args:
     if cmdlinerepo and arg in cmdlinerepo['packages']:
-        jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
+        jobs.append(pool.Job(solv.Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg].id))
     else:
-        njobs = mkjobs(pool, cmd, arg)
-        if not njobs:
-            print "nothing matches '%s'" % arg
+        flags = solv.Selection.SELECTION_NAME|solv.Selection.SELECTION_PROVIDES|solv.Selection.SELECTION_GLOB
+        flags |= solv.Selection.SELECTION_CANON|solv.Selection.SELECTION_DOTARCH|solv.Selection.SELECTION_REL
+        if len(arg) and arg[0] == '/':
+            flags |= solv.Selection.SELECTION_FILELIST
+            if cmd == 'erase':
+                flags |= solv.Selection.SELECTION_INSTALLED_ONLY
+        sel = pool.select(arg, flags)
+        if repofilter:
+           sel.filter(repofilter)
+        if sel.isempty():
+            sel = pool.select(arg, flags | solv.Selection.SELECTION_NOCASE)
+            if repofilter:
+               sel.filter(repofilter)
+            if not sel.isempty():
+                print("[ignoring case for '%s']" % arg)
+        if sel.isempty():
+            print("nothing matches '%s'" % arg)
             sys.exit(1)
-        jobs += njobs
+        if sel.flags & solv.Selection.SELECTION_FILELIST:
+            print("[using file list match for '%s']" % arg)
+        if sel.flags & solv.Selection.SELECTION_PROVIDES:
+            print("[using capability match for '%s']" % arg)
+        jobs += sel.jobs(cmdactionmap[cmd])
+
+if not jobs and (cmd == 'up' or cmd == 'dup' or cmd == 'verify' or repofilter):
+    sel = pool.Selection_all()
+    if repofilter:
+       sel.filter(repofilter)
+    jobs += sel.jobs(cmdactionmap[cmd])
+
+if not jobs:
+    print("no package matched.")
+    sys.exit(1)
 
 if cmd == 'list' or cmd == 'info':
-    if not jobs:
-        print "no package matched."
-        sys.exit(1)
     for job in jobs:
         for s in job.solvables():
             if cmd == 'info':
-                print "Name:        %s" % s
-                print "Repo:        %s" % s.repo
-                print "Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+                print("Name:        %s" % s)
+                print("Repo:        %s" % s.repo)
+                print("Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY))
                 str = s.lookup_str(solv.SOLVABLE_URL)
                 if str:
-                    print "Url:         %s" % str
+                    print("Url:         %s" % str)
                 str = s.lookup_str(solv.SOLVABLE_LICENSE)
                 if str:
-                    print "License:     %s" % str
-                print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
-                print
+                    print("License:     %s" % str)
+                print("Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION))
+                print('')
             else:
-                print "  - %s [%s]" % (s, s.repo)
-                print "    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+                print("  - %s [%s]" % (s, s.repo))
+                print("    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY))
     sys.exit(0)
 
-if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == 'verify':
-    if not jobs:
-        if cmd == 'up' or cmd == 'verify' or cmd == 'dup':
-            jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
-        else:
-            print "no package matched."
+# up magic: use install instead of update if no installed package matches
+for job in jobs:
+    if cmd == 'up' and job.isemptyupdate():
+        job.how ^= solv.Job.SOLVER_UPDATE ^ solv.Job.SOLVER_INSTALL
+    if options.best:
+        job.how |= solv.Job.SOLVER_FORCEBEST
+    if options.clean:
+        job.how |= solv.Job.SOLVER_CLEANDEPS
+
+#pool.set_debuglevel(2)
+solver = pool.Solver()
+solver.set_flag(solv.Solver.SOLVER_FLAG_SPLITPROVIDES, 1);
+if cmd == 'erase':
+    solver.set_flag(solv.Solver.SOLVER_FLAG_ALLOW_UNINSTALL, 1);
+
+while True:
+    problems = solver.solve(jobs)
+    if not problems:
+        break
+    for problem in problems:
+        print("Problem %d/%d:" % (problem.id, len(problems)))
+        print(problem)
+        solutions = problem.solutions()
+        for solution in solutions:
+            print("  Solution %d:" % solution.id)
+            elements = solution.elements(True)
+            for element in elements:
+                print("  - %s" % element.str())
+            print('')
+        sol = ''
+        while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
+            sys.stdout.write("Please choose a solution: ")
+            sys.stdout.flush()
+            sol = sys.stdin.readline().strip()
+        if sol == 's':
+            continue        # skip problem
+        if sol == 'q':
             sys.exit(1)
-    for job in jobs:
-        if cmd == 'up':
-            # up magic: use install instead of update if no installed package matches
-            if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), job.solvables()):
-                job.how |= Job.SOLVER_UPDATE
+        solution = solutions[int(sol) - 1]
+        for element in solution.elements():
+            newjob = element.Job()
+            if element.type == solv.Solver.SOLVER_SOLUTION_JOB:
+                jobs[element.jobidx] = newjob
             else:
-                job.how |= Job.SOLVER_INSTALL
-        elif cmd == 'install':
-            job.how |= Job.SOLVER_INSTALL
-        elif cmd == 'erase':
-            job.how |= Job.SOLVER_ERASE
-        elif cmd == 'dup':
-            job.how |= Job.SOLVER_DISTUPGRADE
-        elif cmd == 'verify':
-            job.how |= Job.SOLVER_VERIFY
-
-    #pool.set_debuglevel(2)
-    solver = None
-    while True:
-        solver = pool.Solver()
-        solver.set_flag(Solver.SOLVER_FLAG_IGNORE_ALREADY_RECOMMENDED, 1);
-        solver.set_flag(Solver.SOLVER_FLAG_SPLITPROVIDES, 1);
-        if cmd == 'erase':
-            solver.set_flag(Solver.SOLVER_FLAG_ALLOW_UNINSTALL, 1);
-        if cmd == 'dup' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_DISTUPGRADE | Job.SOLVER_SOLVABLE_ALL):
-            solver.set_flag(Solver.SOLVER_FLAG_ALLOW_DOWNGRADE, 1);
-            solver.set_flag(Solver.SOLVER_FLAG_ALLOW_VENDORCHANGE, 1);
-            solver.set_flag(Solver.SOLVER_FLAG_ALLOW_ARCHCHANGE, 1);
-        problems = solver.solve(jobs)
-        if not problems:
-            break
-        for problem in problems:
-            print "Problem %d:" % problem.id
-            r = problem.findproblemrule()
-            ri = r.info()
-            print ri.problemstr()
-            solutions = problem.solutions()
-            for solution in solutions:
-                print "  Solution %d:" % solution.id
-                elements = solution.elements(True)
-                for element in elements:
-                    print "  - %s" % element.str
-                print
-            sol = ''
-            while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
-                sys.stdout.write("Please choose a solution: ")
-                sys.stdout.flush()
-                sol = sys.stdin.readline().strip()
-            if sol == 's':
-                continue        # skip problem
-            if sol == 'q':
-                sys.exit(1)
-            solution = solutions[int(sol) - 1]
-            for element in solution.elements():
-                newjob = element.Job()
-                if element.type == Solver.SOLVER_SOLUTION_JOB:
-                    jobs[element.jobidx] = newjob
-                else:
-                    if newjob and newjob not in jobs:
-                        jobs.append(newjob)
-                        
-    # no problems, show transaction
-    trans = solver.transaction()
-    del solver
-    if trans.isempty():
-        print "Nothing to do."
-        sys.exit(0)
-    print
-    print "Transaction summary:"
-    print
-    for cl in trans.classify():
-        if cl.type == Transaction.SOLVER_TRANSACTION_ERASE:
-            print "%d erased packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_INSTALL:
-            print "%d installed packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_REINSTALLED:
-            print "%d reinstalled packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
-            print "%d downgraded packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_CHANGED:
-            print "%d changed packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED:
-            print "%d upgraded packages:" % cl.count
-        elif cl.type == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
-            print "%d vendor changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
-        elif cl.type == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
-            print "%d arch changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
+                if newjob and newjob not in jobs:
+                    jobs.append(newjob)
+                    
+# no problems, show transaction
+trans = solver.transaction()
+del solver
+if trans.isempty():
+    print("Nothing to do.")
+    sys.exit(0)
+print('')
+print("Transaction summary:")
+print('')
+for cl in trans.classify(solv.Transaction.SOLVER_TRANSACTION_SHOW_OBSOLETES | solv.Transaction.SOLVER_TRANSACTION_OBSOLETE_IS_UPGRADE):
+    if cl.type == solv.Transaction.SOLVER_TRANSACTION_ERASE:
+        print("%d erased packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_INSTALL:
+        print("%d installed packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_REINSTALLED:
+        print("%d reinstalled packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+        print("%d downgraded packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_CHANGED:
+        print("%d changed packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_UPGRADED:
+        print("%d upgraded packages:" % cl.count)
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
+        print("%d vendor changes from '%s' to '%s':" % (cl.count, cl.fromstr, cl.tostr))
+    elif cl.type == solv.Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
+        print("%d arch changes from '%s' to '%s':" % (cl.count, cl.fromstr, cl.tostr))
+    else:
+        continue
+    for p in cl.solvables():
+        if cl.type == solv.Transaction.SOLVER_TRANSACTION_UPGRADED or cl.type == solv.Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+            op = trans.othersolvable(p)
+            print("  - %s -> %s" % (p, op))
         else:
+            print("  - %s" % p)
+    print('')
+print("install size change: %d K" % trans.calc_installsizechange())
+print('')
+
+while True:
+    sys.stdout.write("OK to continue (y/n)? ")
+    sys.stdout.flush()
+    yn = sys.stdin.readline().strip()
+    if yn == 'y': break
+    if yn == 'n' or yn == 'q': sys.exit(1)
+newpkgs = trans.newsolvables()
+newpkgsfp = {}
+if newpkgs:
+    downloadsize = 0
+    for p in newpkgs:
+        downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
+    print("Downloading %d packages, %d K" % (len(newpkgs), downloadsize / 1024))
+    for p in newpkgs:
+        repo = p.repo.appdata
+        location, medianr = p.lookup_location()
+        if not location:
             continue
-        for p in cl.solvables():
-            if cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED or cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
-                op = trans.othersolvable(p)
-                print "  - %s -> %s" % (p, op)
-            else:
-                print "  - %s" % p
-        print
-    print "install size change: %d K" % trans.calc_installsizechange()
-    print
-    
-    while True:
-        sys.stdout.write("OK to continue (y/n)? ")
-        sys.stdout.flush()
-        yn = sys.stdin.readline().strip()
-        if yn == 'y': break
-        if yn == 'n': sys.exit(1)
-    newpkgs = trans.newpackages()
-    newpkgsfp = {}
-    if newpkgs:
-        downloadsize = 0
-        for p in newpkgs:
-            downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
-        print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
-        for p in newpkgs:
-            repo = p.repo.appdata
-            location, medianr = p.lookup_location()
-            if not location:
-                continue
-            if repo.type == 'commandline':
-                f = solv.xfopen(location)
-                if not f:
-                    sys.exit("\n%s: %s not found" % location)
-                newpkgsfp[p.id] = f
-                continue
-            if not sysrepo.handle.isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
-                pname = p.name
-                di = p.repo.Dataiterator(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
-                di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
-                for d in di:
-                    d.setpos_parent()
-                    if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
-                        continue
-                    baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
-                    candidate = None
-                    for installedp in pool.whatprovides(p.nameid):
-                        if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
-                            candidate = installedp
-                    if not candidate:
-                        continue
-                    seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
-                    st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
-                    if st:
-                        continue
-                    chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
-                    if not chksum:
-                        continue
-                    dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
-                    f = repo.download(dloc, False, chksum)
-                    if not f:
-                        continue
-                    nf = tempfile.TemporaryFile()
-                    nf = os.dup(nf.fileno())
-                    st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
-                    solv.xfclose(f)
-                    os.lseek(nf, 0, os.SEEK_SET)
-                    newpkgsfp[p.id] = solv.xfopen_fd("", nf)
-                    break
-                if p.id in newpkgsfp:
-                    sys.stdout.write("d")
-                    sys.stdout.flush()
-                    continue
-                        
-            if repo.type == 'susetags':
-                datadir = repo.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
-                if not datadir:
-                    datadir = 'suse'
-                location = datadir + '/' + location
-            chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
-            f = repo.download(location, False, chksum)
+        if repo.type == 'commandline':
+            f = solv.xfopen(location)
             if not f:
-                sys.exit("\n%s: %s not found in repository" % (repo.name, location))
+                sys.exit("\n%s: %s not found" % location)
             newpkgsfp[p.id] = f
-            sys.stdout.write(".")
-            sys.stdout.flush()
-        print
-    print "Committing transaction:"
-    print
-    ts = rpm.TransactionSet('/')
-    ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
-    erasenamehelper = {}
-    for p in trans.steps():
-        type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
-        if type == Transaction.SOLVER_TRANSACTION_ERASE:
-            rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
-            erasenamehelper[p.name] = p
-            if not rpmdbid:
-                sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p)
-            ts.addErase(rpmdbid)
-        elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
-            f = newpkgsfp[p.id]
-            h = ts.hdrFromFdno(solv.xfileno(f))
-            os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
-            ts.addInstall(h, p, 'u')
-        elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
-            f = newpkgsfp[p.id]
-            h = ts.hdrFromFdno(solv.xfileno(f))
-            os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
-            ts.addInstall(h, p, 'i')
-    checkproblems = ts.check()
-    if checkproblems:
-        print checkproblems
-        sys.exit("Sorry.")
-    ts.order()
-    def runCallback(reason, amount, total, p, d):
-        if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
-            return solv.xfileno(newpkgsfp[p.id])
-        if reason == rpm.RPMCALLBACK_INST_START:
-            print "install", p
-        if reason == rpm.RPMCALLBACK_UNINST_START:
-            # argh, p is just the name of the package
-            if p in erasenamehelper:
-                p = erasenamehelper[p]
-                print "erase", p
-    runproblems = ts.run(runCallback, '')
-    if runproblems:
-        print runproblems
-        sys.exit(1)
-    sys.exit(0)
-
-print "unknown command", cmd
-sys.exit(1)
+            continue
+        if not sysrepo.handle.isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
+            pname = p.name
+            di = p.repo.Dataiterator_meta(solv.DELTA_PACKAGE_NAME, pname, solv.Dataiterator.SEARCH_STRING)
+            di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
+            for d in di:
+                dp = d.parentpos()
+                if dp.lookup_id(solv.DELTA_PACKAGE_EVR) != p.evrid or dp.lookup_id(solv.DELTA_PACKAGE_ARCH) != p.archid:
+                    continue
+                baseevrid = dp.lookup_id(solv.DELTA_BASE_EVR)
+                candidate = None
+                for installedp in pool.whatprovides(p.nameid):
+                    if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
+                        candidate = installedp
+                if not candidate:
+                    continue
+                seq = dp.lookup_deltaseq()
+                st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
+                if st:
+                    continue
+                chksum = dp.lookup_checksum(solv.DELTA_CHECKSUM)
+                if not chksum:
+                    continue
+                dloc, dmedianr = dp.lookup_deltalocation()
+                dloc = repo.packagespath() + dloc
+                f = repo.download(dloc, False, chksum)
+                if not f:
+                    continue
+                nf = tempfile.TemporaryFile()
+                nf = os.dup(nf.fileno())   # get rid of CLOEXEC
+                f.cloexec(0)
+                st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % f.fileno(), "/dev/fd/%d" % nf])
+                if st:
+                    os.close(nf)
+                    continue
+                os.lseek(nf, 0, os.SEEK_SET)
+                newpkgsfp[p.id] = solv.xfopen_fd("", nf)
+                os.close(nf)
+                break
+            if p.id in newpkgsfp:
+                sys.stdout.write("d")
+                sys.stdout.flush()
+                continue
+                    
+        chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
+        location = repo.packagespath() + location
+        f = repo.download(location, False, chksum)
+        if not f:
+            sys.exit("\n%s: %s not found in repository" % (repo.name, location))
+        newpkgsfp[p.id] = f
+        sys.stdout.write(".")
+        sys.stdout.flush()
+    print('')
+print("Committing transaction:")
+print('')
+ts = rpm.TransactionSet('/')
+ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
+erasenamehelper = {}
+for p in trans.steps():
+    type = trans.steptype(p, solv.Transaction.SOLVER_TRANSACTION_RPM_ONLY)
+    if type == solv.Transaction.SOLVER_TRANSACTION_ERASE:
+        rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
+        erasenamehelper[p.name] = p
+        if not rpmdbid:
+            sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p)
+        ts.addErase(rpmdbid)
+    elif type == solv.Transaction.SOLVER_TRANSACTION_INSTALL:
+        f = newpkgsfp[p.id]
+        h = ts.hdrFromFdno(f.fileno())
+        os.lseek(f.fileno(), 0, os.SEEK_SET)
+        ts.addInstall(h, p, 'u')
+    elif type == solv.Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
+        f = newpkgsfp[p.id]
+        h = ts.hdrFromFdno(f.fileno())
+        os.lseek(f.fileno(), 0, os.SEEK_SET)
+        ts.addInstall(h, p, 'i')
+checkproblems = ts.check()
+if checkproblems:
+    print(checkproblems)
+    sys.exit("Sorry.")
+ts.order()
+def runCallback(reason, amount, total, p, d):
+    if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+        return newpkgsfp[p.id].fileno()
+    if reason == rpm.RPMCALLBACK_INST_START:
+        print("install %s" % p)
+    if reason == rpm.RPMCALLBACK_UNINST_START:
+        # argh, p is just the name of the package
+        if p in erasenamehelper:
+            p = erasenamehelper[p]
+            print("erase %s" % p)
+runproblems = ts.run(runCallback, '')
+if runproblems:
+    print(runproblems)
+    sys.exit(1)
+sys.exit(0)
 
 # vim: sw=4 et