- add ver files, fix build
authorMichael Schroeder <mls@suse.de>
Tue, 24 May 2011 18:40:00 +0000 (20:40 +0200)
committerMichael Schroeder <mls@suse.de>
Tue, 24 May 2011 18:40:00 +0000 (20:40 +0200)
examples/CMakeLists.txt
examples/pysolv [changed mode: 0755->0644]
ext/CMakeLists.txt
ext/libsolvext.ver [new file with mode: 0644]
package/libsolv.spec.in
src/CMakeLists.txt
src/libsolv.ver [new file with mode: 0644]
tools/CMakeLists.txt

index 1a29116..c3264a9 100644 (file)
@@ -1,10 +1,10 @@
-ADD_EXECUTABLE(solv solv.c)
-IF ( DEBIAN )
-TARGET_LINK_LIBRARIES(solv libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
-ELSE ( DEBIAN )
-TARGET_LINK_LIBRARIES(solv libsolvext libsolv ${RPMDB_LIBRARY} ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
-ENDIF ( DEBIAN )
+ADD_EXECUTABLE (solv solv.c)
+IF (DEBIAN)
+TARGET_LINK_LIBRARIES (solv libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
+ELSE (DEBIAN)
+TARGET_LINK_LIBRARIES (solv libsolvext libsolv ${RPMDB_LIBRARY} ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
+ENDIF (DEBIAN)
 
-install(TARGETS
+INSTALL(TARGETS
     solv
     DESTINATION ${BIN_INSTALL_DIR})
old mode 100755 (executable)
new mode 100644 (file)
index 045cf35..f129dce
@@ -58,562 +58,562 @@ def calc_cookie_fp(fp):
 
 class repo_generic(dict):
     def __init__(self, name, type, attribs = {}):
-       for k in attribs:
-           self[k] = attribs[k]
-       self.name = name
-       self.type = type
+        for k in attribs:
+            self[k] = attribs[k]
+        self.name = name
+        self.type = type
 
     def cachepath(self, ext = None):
-       path = re.sub(r'^\.', '_', self.name)
-       if ext:
-           path += "_" + ext + ".solvx"
-       else:
-           path += ".solv"
-       return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
-       
+        path = re.sub(r'^\.', '_', self.name)
+        if ext:
+            path += "_" + ext + ".solvx"
+        else:
+            path += ".solv"
+        return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
+        
     def load(self, pool):
-       self.handle = pool.add_repo(self.name)
-       self.handle.appdata = self
-       self.handle.priority = 99 - self['priority']
-       if self['autorefresh']:
-           dorefresh = True
-       if dorefresh:
-           try:
-               st = os.stat(self.cachepath())
-               if time.time() - st[ST_MTIME] < self['metadata_expire']:
-                   dorefresh = False
-           except OSError, e:
-               pass
-       self['cookie'] = ''
-       if not dorefresh and self.usecachedrepo(None):
-           print "repo: '%s': cached" % self.name
-           return True
-       return self.load_if_changed()
+        self.handle = pool.add_repo(self.name)
+        self.handle.appdata = self
+        self.handle.priority = 99 - self['priority']
+        if self['autorefresh']:
+            dorefresh = True
+        if dorefresh:
+            try:
+                st = os.stat(self.cachepath())
+                if time.time() - st[ST_MTIME] < self['metadata_expire']:
+                    dorefresh = False
+            except OSError, e:
+                pass
+        self['cookie'] = ''
+        if not dorefresh and self.usecachedrepo(None):
+            print "repo: '%s': cached" % self.name
+            return True
+        return self.load_if_changed()
 
     def load_if_changed(self):
-       return False
+        return False
 
     def load_ext(repodata):
-       return False
+        return False
 
     def setfromurls(self, urls):
-       if not urls:
-           return
-       url = urls[0]
-       print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
-       self['baseurl'] = url
+        if not urls:
+            return
+        url = urls[0]
+        print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
+        self['baseurl'] = url
 
     def setfrommetalink(self, metalink):
-       nf = self.download(metalink, False, None)
-       if not nf:
-           return None
-       f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
-       solv.xfclose(nf)
-       urls = []
-       chksum = None
-       for l in f.readlines():
-           l = l.strip()
-           m = re.match(r'^<hash type="sha256">([0-9a-fA-F]{64})</hash>', l)
-           if m:
-               chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256, m.group(1))
-           m = re.match(r'^<url.*>(https?://.+)repodata/repomd.xml</url>', l)
-           if m:
-               urls.append(m.group(1))
-       if not urls:
-           chksum = None       # in case the metalink is about a different file
-       f.close()
-       self.setfromurls(urls)
+        nf = self.download(metalink, False, None)
+        if not nf:
+            return None
+        f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
+        solv.xfclose(nf)
+        urls = []
+        chksum = None
+        for l in f.readlines():
+            l = l.strip()
+            m = re.match(r'^<hash type="sha256">([0-9a-fA-F]{64})</hash>', l)
+            if m:
+                chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256, m.group(1))
+            m = re.match(r'^<url.*>(https?://.+)repodata/repomd.xml</url>', l)
+            if m:
+                urls.append(m.group(1))
+        if not urls:
+            chksum = None       # in case the metalink is about a different file
+        f.close()
+        self.setfromurls(urls)
         return chksum
-       
+        
     def setfrommirrorlist(self, mirrorlist):
-       nf = self.download(mirrorlist, False, None)
-       if not nf:
-           return
-       f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
-       solv.xfclose(nf)
-       urls = []
-       for l in f.readline():
-           l = l.strip()
-           if l[0:6] == 'http://' or l[0:7] == 'https://':
-               urls.append(l)
-       self.setfromurls(urls)
-       f.close()
-       
+        nf = self.download(mirrorlist, False, None)
+        if not nf:
+            return
+        f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
+        solv.xfclose(nf)
+        urls = []
+        for l in f.readline():
+            l = l.strip()
+            if l[0:6] == 'http://' or l[0:7] == 'https://':
+                urls.append(l)
+        self.setfromurls(urls)
+        f.close()
+        
     def download(self, file, uncompress, chksum, markincomplete=False):
-       url = None
+        url = None
         if 'baseurl' not in self:
-           if 'metalink' in self:
-               if file != self['metalink']:
-                   metalinkchksum = self.setfrommetalink(self['metalink'])
-                   if file == 'repodata/repomd.xml' and metalinkchksum and not chksum:
-                       chksum = metalinkchksum
-               else:
-                   url = file
-           elif 'mirrorlist' in self:
-               if file != self['mirrorlist']:
-                   self.setfrommirrorlist(self['mirrorlist'])
-               else:
-                   url = file
-       if not url:
-           if 'baseurl' not in self:
-               print "%s: no baseurl" % self.name
-               return None
-           url = re.sub(r'/$', '', self['baseurl']) + '/' + file
-       f = tempfile.TemporaryFile()
-       st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
-       if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
-           return None
-       os.lseek(f.fileno(), 0, os.SEEK_SET)
-       if st:
-           print "%s: download error %d" % (file, st)
-           if markincomplete:
-               self['incomplete'] = True
-           return None
-       if chksum:
-           fchksum = solv.Chksum(chksum.type)
-           if not fchksum:
-               print "%s: unknown checksum type" % file
-               if markincomplete:
-                   self['incomplete'] = True
-               return None
-           fchksum.add_fd(f.fileno())
-           if fchksum != chksum:
-               print "%s: checksum mismatch" % file
-               if markincomplete:
-                   self['incomplete'] = True
-               return None
-       if uncompress:
-           return solv.xfopen_fd(file, os.dup(f.fileno()))
-       return solv.xfopen_fd(None, os.dup(f.fileno()))
+            if 'metalink' in self:
+                if file != self['metalink']:
+                    metalinkchksum = self.setfrommetalink(self['metalink'])
+                    if file == 'repodata/repomd.xml' and metalinkchksum and not chksum:
+                        chksum = metalinkchksum
+                else:
+                    url = file
+            elif 'mirrorlist' in self:
+                if file != self['mirrorlist']:
+                    self.setfrommirrorlist(self['mirrorlist'])
+                else:
+                    url = file
+        if not url:
+            if 'baseurl' not in self:
+                print "%s: no baseurl" % self.name
+                return None
+            url = re.sub(r'/$', '', self['baseurl']) + '/' + file
+        f = tempfile.TemporaryFile()
+        st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
+        if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
+            return None
+        os.lseek(f.fileno(), 0, os.SEEK_SET)
+        if st:
+            print "%s: download error %d" % (file, st)
+            if markincomplete:
+                self['incomplete'] = True
+            return None
+        if chksum:
+            fchksum = solv.Chksum(chksum.type)
+            if not fchksum:
+                print "%s: unknown checksum type" % file
+                if markincomplete:
+                    self['incomplete'] = True
+                return None
+            fchksum.add_fd(f.fileno())
+            if fchksum != chksum:
+                print "%s: checksum mismatch" % file
+                if markincomplete:
+                    self['incomplete'] = True
+                return None
+        if uncompress:
+            return solv.xfopen_fd(file, os.dup(f.fileno()))
+        return solv.xfopen_fd(None, os.dup(f.fileno()))
 
     def usecachedrepo(self, ext, mark=False):
-       if not ext:
-           cookie = self['cookie']
-       else:
-           cookie = self['extcookie']
-       try: 
-           repopath = self.cachepath(ext)
-           f = open(repopath, 'r')
-           f.seek(-32, os.SEEK_END)
-           fcookie = f.read(32)
-           if len(fcookie) != 32:
-               return False
-           if cookie and fcookie != cookie:
-               return False
-           if self.type != 'system' and not ext:
-               f.seek(-32 * 2, os.SEEK_END)
-               fextcookie = f.read(32)
-               if len(fextcookie) != 32:
-                   return False
-           f.seek(0)
-           flags = 0
-           if ext:
-               flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
-               if ext != 'DL':
-                   flags |= Repo.REPO_LOCALPOOL
-           if not self.handle.add_solv(f, flags):
-               return False
-           if self.type != 'system' and not ext:
-               self['cookie'] = fcookie
-               self['extcookie'] = fextcookie
-           if mark:
-               # no futimes in python?
-               try:
-                   os.utime(repopath, None)
-               except Exception, e:
-                   pass
-       except IOError, e:
-           return False
-       return True
+        if not ext:
+            cookie = self['cookie']
+        else:
+            cookie = self['extcookie']
+        try: 
+            repopath = self.cachepath(ext)
+            f = open(repopath, 'r')
+            f.seek(-32, os.SEEK_END)
+            fcookie = f.read(32)
+            if len(fcookie) != 32:
+                return False
+            if cookie and fcookie != cookie:
+                return False
+            if self.type != 'system' and not ext:
+                f.seek(-32 * 2, os.SEEK_END)
+                fextcookie = f.read(32)
+                if len(fextcookie) != 32:
+                    return False
+            f.seek(0)
+            flags = 0
+            if ext:
+                flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
+                if ext != 'DL':
+                    flags |= Repo.REPO_LOCALPOOL
+            if not self.handle.add_solv(f, flags):
+                return False
+            if self.type != 'system' and not ext:
+                self['cookie'] = fcookie
+                self['extcookie'] = fextcookie
+            if mark:
+                # no futimes in python?
+                try:
+                    os.utime(repopath, None)
+                except Exception, e:
+                    pass
+        except IOError, e:
+            return False
+        return True
 
     def genextcookie(self, f):
-       chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
-       chksum.add(self['cookie'])
-       if f:
-           stat = os.fstat(f.fileno())
-           chksum.add(str(stat[ST_DEV]))
-           chksum.add(str(stat[ST_INO]))
-           chksum.add(str(stat[ST_SIZE]))
-           chksum.add(str(stat[ST_MTIME]))
-       extcookie = chksum.raw()
-       # compatibility to c code
-       if ord(extcookie[0]) == 0:
-           extcookie[0] = chr(1)
-       self['extcookie'] = extcookie
-       
+        chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
+        chksum.add(self['cookie'])
+        if f:
+            stat = os.fstat(f.fileno())
+            chksum.add(str(stat[ST_DEV]))
+            chksum.add(str(stat[ST_INO]))
+            chksum.add(str(stat[ST_SIZE]))
+            chksum.add(str(stat[ST_MTIME]))
+        extcookie = chksum.raw()
+        # compatibility to c code
+        if ord(extcookie[0]) == 0:
+            extcookie[0] = chr(1)
+        self['extcookie'] = extcookie
+        
     def writecachedrepo(self, ext, info=None):
-       try:
-           if not os.path.isdir("/var/cache/solv"):
-               os.mkdir("/var/cache/solv", 0755)
-           (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
-           os.fchmod(fd, 0444)
-           f = os.fdopen(fd, 'w+')
-           if not info:
-               self.handle.write(f)
-           elif ext:
-               info.write(f)
-           else:       # rewrite_repos case
-               self.handle.write_first_repodata(f)
-           if self.type != 'system' and not ext:
-               if 'extcookie' not in self:
-                   self.genextcookie(f)
-               f.write(self['extcookie'])
-           if not ext:
-               f.write(self['cookie'])
-           else:
-               f.write(self['extcookie'])
-           f.close()
-           if self.handle.iscontiguous():
-               # switch to saved repo to activate paging and save memory
-               nf = solv.xfopen(tmpname)
-               if not ext:
-                   # main repo
-                   self.handle.empty()
-                   if not self.handle.add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
-                       sys.exit("internal error, cannot reload solv file")
-               else:
-                   # extension repodata
-                   # need to extend to repo boundaries, as this is how
-                   # info.write() has written the data
-                   info.extend_to_repo()
-                   # LOCALPOOL does not help as pool already contains all ids
-                   info.add_solv(nf, Repo.REPO_EXTEND_SOLVABLES)
-               solv.xfclose(nf)
-           os.rename(tmpname, self.cachepath(ext))
-       except IOError, e:
-           if tmpname:
-               os.unlink(tmpname)
-               
+        try:
+            if not os.path.isdir("/var/cache/solv"):
+                os.mkdir("/var/cache/solv", 0755)
+            (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
+            os.fchmod(fd, 0444)
+            f = os.fdopen(fd, 'w+')
+            if not info:
+                self.handle.write(f)
+            elif ext:
+                info.write(f)
+            else:       # rewrite_repos case
+                self.handle.write_first_repodata(f)
+            if self.type != 'system' and not ext:
+                if 'extcookie' not in self:
+                    self.genextcookie(f)
+                f.write(self['extcookie'])
+            if not ext:
+                f.write(self['cookie'])
+            else:
+                f.write(self['extcookie'])
+            f.close()
+            if self.handle.iscontiguous():
+                # switch to saved repo to activate paging and save memory
+                nf = solv.xfopen(tmpname)
+                if not ext:
+                    # main repo
+                    self.handle.empty()
+                    if not self.handle.add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
+                        sys.exit("internal error, cannot reload solv file")
+                else:
+                    # extension repodata
+                    # need to extend to repo boundaries, as this is how
+                    # info.write() has written the data
+                    info.extend_to_repo()
+                    # LOCALPOOL does not help as pool already contains all ids
+                    info.add_solv(nf, Repo.REPO_EXTEND_SOLVABLES)
+                solv.xfclose(nf)
+            os.rename(tmpname, self.cachepath(ext))
+        except IOError, e:
+            if tmpname:
+                os.unlink(tmpname)
+                
     def updateaddedprovides(self, addedprovides):
-       if 'incomplete' in self:
-           return 
-       if 'handle' not in self:
-           return 
-       if self.handle.isempty():
-           return
-       # make sure there's just one real repodata with extensions
-       repodata = self.handle.first_repodata()
-       if not repodata:
-           return
-       oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
-       if not set(addedprovides) <= set(oldaddedprovides):
-           for id in addedprovides:
-               repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
-           repodata.internalize()
-           self.writecachedrepo(None, repodata)
+        if 'incomplete' in self:
+            return 
+        if 'handle' not in self:
+            return 
+        if self.handle.isempty():
+            return
+        # make sure there's just one real repodata with extensions
+        repodata = self.handle.first_repodata()
+        if not repodata:
+            return
+        oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
+        if not set(addedprovides) <= set(oldaddedprovides):
+            for id in addedprovides:
+                repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
+            repodata.internalize()
+            self.writecachedrepo(None, repodata)
 
 class repo_repomd(repo_generic):
     def load_if_changed(self):
-       print "rpmmd repo '%s':" % self.name,
-       sys.stdout.flush()
-       f = self.download("repodata/repomd.xml", False, None, None)
-       if not f:
-           print "no repomd.xml file, skipped"
-           self.handle.free(True)
-           del self.handle
-           return False
-       self['cookie'] = calc_cookie_fp(f)
-       if self.usecachedrepo(None, True):
-           print "cached"
-           solv.xfclose(f)
-           return True
-       self.handle.add_repomdxml(f, 0)
-       solv.xfclose(f)
-       print "fetching"
-       (filename, filechksum) = self.find('primary')
-       if filename:
-           f = self.download(filename, True, filechksum, True)
-           if f:
-               self.handle.add_rpmmd(f, None, 0)
-               solv.xfclose(f)
-           if 'incomplete' in self:
-               return False # hopeless, need good primary
-       (filename, filechksum) = self.find('updateinfo')
-       if filename:
-           f = self.download(filename, True, filechksum, True)
-           if f:
-               self.handle.add_updateinfoxml(f, 0)
-               solv.xfclose(f)
-       self.add_exts()
-       if 'incomplete' not in self:
-           self.writecachedrepo(None)
-       # must be called after writing the repo
-       self.handle.create_stubs()
-       return True
+        print "rpmmd repo '%s':" % self.name,
+        sys.stdout.flush()
+        f = self.download("repodata/repomd.xml", False, None, None)
+        if not f:
+            print "no repomd.xml file, skipped"
+            self.handle.free(True)
+            del self.handle
+            return False
+        self['cookie'] = calc_cookie_fp(f)
+        if self.usecachedrepo(None, True):
+            print "cached"
+            solv.xfclose(f)
+            return True
+        self.handle.add_repomdxml(f, 0)
+        solv.xfclose(f)
+        print "fetching"
+        (filename, filechksum) = self.find('primary')
+        if filename:
+            f = self.download(filename, True, filechksum, True)
+            if f:
+                self.handle.add_rpmmd(f, None, 0)
+                solv.xfclose(f)
+            if 'incomplete' in self:
+                return False # hopeless, need good primary
+        (filename, filechksum) = self.find('updateinfo')
+        if filename:
+            f = self.download(filename, True, filechksum, True)
+            if f:
+                self.handle.add_updateinfoxml(f, 0)
+                solv.xfclose(f)
+        self.add_exts()
+        if 'incomplete' not in self:
+            self.writecachedrepo(None)
+        # must be called after writing the repo
+        self.handle.create_stubs()
+        return True
 
     def find(self, what):
-       di = self.handle.Dataiterator(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
-       di.prepend_keyname(solv.REPOSITORY_REPOMD)
-       for d in di:
-           d.setpos_parent()
-           filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
-           chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
-           if filename and not chksum:
-               print "no %s file checksum!" % filename
-               filename = None
-               chksum = None
-           if filename:
-               return (filename, chksum)
-       return (None, None)
-       
+        di = self.handle.Dataiterator(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
+        di.prepend_keyname(solv.REPOSITORY_REPOMD)
+        for d in di:
+            d.setpos_parent()
+            filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
+            chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
+            if filename and not chksum:
+                print "no %s file checksum!" % filename
+                filename = None
+                chksum = None
+            if filename:
+                return (filename, chksum)
+        return (None, None)
+        
     def add_ext(self, repodata, what, ext):
-       filename, chksum = self.find(what)
-       if not filename and what == 'deltainfo':
-           filename, chksum = self.find('prestodelta')
-       if not filename:
-           return
-       handle = repodata.new_handle()
-       repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
-       repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
-       repodata.set_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
-       if ext == 'DL':
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
-       elif ext == 'FL':
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
-       repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
+        filename, chksum = self.find(what)
+        if not filename and what == 'deltainfo':
+            filename, chksum = self.find('prestodelta')
+        if not filename:
+            return
+        handle = repodata.new_handle()
+        repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
+        repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
+        repodata.set_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
+        if ext == 'DL':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
+        elif ext == 'FL':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+        repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
 
     def add_exts(self):
-       repodata = self.handle.add_repodata(0)
-       self.add_ext(repodata, 'deltainfo', 'DL')
-       self.add_ext(repodata, 'filelists', 'FL')
-       repodata.internalize()
+        repodata = self.handle.add_repodata(0)
+        self.add_ext(repodata, 'deltainfo', 'DL')
+        self.add_ext(repodata, 'filelists', 'FL')
+        repodata.internalize()
     
     def load_ext(self, repodata):
-       repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
-       if repomdtype == 'filelists':
-           ext = 'FL'
-       elif repomdtype == 'deltainfo':
-           ext = 'DL'
-       else:
-           return False
-       sys.stdout.write("[%s:%s: " % (self.name, ext))
-       if self.usecachedrepo(ext):
-           sys.stdout.write("cached]\n")
-           sys.stdout.flush()
-           return True
-       sys.stdout.write("fetching]\n")
-       sys.stdout.flush()
-       filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
-       filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
-       f = self.download(filename, True, filechksum)
-       if not f:
-           return False
-       if ext == 'FL':
-           self.handle.add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
-       elif ext == 'DL':
-           self.handle.add_deltainfoxml(f, Repo.REPO_USE_LOADING)
-       solv.xfclose(f)
-       self.writecachedrepo(ext, repodata)
-       return True
+        repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
+        if repomdtype == 'filelists':
+            ext = 'FL'
+        elif repomdtype == 'deltainfo':
+            ext = 'DL'
+        else:
+            return False
+        sys.stdout.write("[%s:%s: " % (self.name, ext))
+        if self.usecachedrepo(ext):
+            sys.stdout.write("cached]\n")
+            sys.stdout.flush()
+            return True
+        sys.stdout.write("fetching]\n")
+        sys.stdout.flush()
+        filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
+        filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
+        f = self.download(filename, True, filechksum)
+        if not f:
+            return False
+        if ext == 'FL':
+            self.handle.add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+        elif ext == 'DL':
+            self.handle.add_deltainfoxml(f, Repo.REPO_USE_LOADING)
+        solv.xfclose(f)
+        self.writecachedrepo(ext, repodata)
+        return True
 
 class repo_susetags(repo_generic):
     def load_if_changed(self):
-       print "susetags repo '%s':" % self.name,
-       sys.stdout.flush()
-       f = self.download("content", False, None, None)
+        print "susetags repo '%s':" % self.name,
+        sys.stdout.flush()
+        f = self.download("content", False, None, None)
         if not f:
-           print "no content file, skipped"
-           self.handle.free(True)
-           del self.handle
-           return False
-       self['cookie'] = calc_cookie_fp(f)
-       if self.usecachedrepo(None, True):
-           print "cached"
-           solv.xfclose(f)
-           return True
-       self.handle.add_content(f, 0)
-       solv.xfclose(f)
-       print "fetching"
-       defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
-       descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
-       if not descrdir:
-           descrdir = "suse/setup/descr"
-       (filename, filechksum) = self.find('packages.gz')
-       if not filename:
-           (filename, filechksum) = self.find('packages')
-       if filename:
-           f = self.download(descrdir + '/' + filename, True, filechksum, True)
-           if f:
-               self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
-               solv.xfclose(f)
-               (filename, filechksum) = self.find('packages.en.gz')
-               if not filename:
-                   (filename, filechksum) = self.find('packages.en')
-               if filename:
-                   f = self.download(descrdir + '/' + filename, True, filechksum, True)
-                   if f:
-                       self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
-                       solv.xfclose(f)
-               self.handle.internalize()
-       self.add_exts()
-       if 'incomplete' not in self:
-           self.writecachedrepo(None)
-       # must be called after writing the repo
-       self.handle.create_stubs()
-       return True
+            print "no content file, skipped"
+            self.handle.free(True)
+            del self.handle
+            return False
+        self['cookie'] = calc_cookie_fp(f)
+        if self.usecachedrepo(None, True):
+            print "cached"
+            solv.xfclose(f)
+            return True
+        self.handle.add_content(f, 0)
+        solv.xfclose(f)
+        print "fetching"
+        defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
+        descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+        if not descrdir:
+            descrdir = "suse/setup/descr"
+        (filename, filechksum) = self.find('packages.gz')
+        if not filename:
+            (filename, filechksum) = self.find('packages')
+        if filename:
+            f = self.download(descrdir + '/' + filename, True, filechksum, True)
+            if f:
+                self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
+                solv.xfclose(f)
+                (filename, filechksum) = self.find('packages.en.gz')
+                if not filename:
+                    (filename, filechksum) = self.find('packages.en')
+                if filename:
+                    f = self.download(descrdir + '/' + filename, True, filechksum, True)
+                    if f:
+                        self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
+                        solv.xfclose(f)
+                self.handle.internalize()
+        self.add_exts()
+        if 'incomplete' not in self:
+            self.writecachedrepo(None)
+        # must be called after writing the repo
+        self.handle.create_stubs()
+        return True
 
     def find(self, what):
-       di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
-       di.prepend_keyname(solv.SUSETAGS_FILE)
-       for d in di:
-           d.setpos_parent()
-           chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
-           return (what, chksum)
-       return (None, None)
+        di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
+        di.prepend_keyname(solv.SUSETAGS_FILE)
+        for d in di:
+            d.setpos_parent()
+            chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
+            return (what, chksum)
+        return (None, None)
 
     def add_ext(self, repodata, what, ext):
-       (filename, chksum) = self.find(what)
-       if not filename:
-           return
-       handle = repodata.new_handle()
-       repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
-       if chksum:
-           repodata.set_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
-       if ext == 'DU':
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
-       elif ext == 'FL':
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
-           repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
-       else:
-           for langtag, langtagtype in [
-               (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
-               (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
-               (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
-               (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
-               (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
-               (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
-           ]:
-               repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self.handle.pool.id2langid(langtag, ext, 1))
-               repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
-       repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
-       
+        (filename, chksum) = self.find(what)
+        if not filename:
+            return
+        handle = repodata.new_handle()
+        repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
+        if chksum:
+            repodata.set_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
+        if ext == 'DU':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
+        elif ext == 'FL':
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
+            repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
+        else:
+            for langtag, langtagtype in [
+                (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
+                (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
+            ]:
+                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self.handle.pool.id2langid(langtag, ext, 1))
+                repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
+        repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
+        
     def add_exts(self):
-       repodata = self.handle.add_repodata(0)
-       di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
-       di.prepend_keyname(solv.SUSETAGS_FILE)
-       for d in di:
-           filename = d.str()
-           if not filename:
-               continue
-           if filename[0:9] != "packages.":
-               continue
-           if len(filename) == 11 and filename != "packages.gz":
-               ext = filename[9:11]
-           elif filename[11:12] == ".":
-               ext = filename[9:11]
-           else:
-               continue
-           if ext == "en":
-               continue
-           self.add_ext(repodata, filename, ext)
-       repodata.internalize()
+        repodata = self.handle.add_repodata(0)
+        di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
+        di.prepend_keyname(solv.SUSETAGS_FILE)
+        for d in di:
+            filename = d.str()
+            if not filename:
+                continue
+            if filename[0:9] != "packages.":
+                continue
+            if len(filename) == 11 and filename != "packages.gz":
+                ext = filename[9:11]
+            elif filename[11:12] == ".":
+                ext = filename[9:11]
+            else:
+                continue
+            if ext == "en":
+                continue
+            self.add_ext(repodata, filename, ext)
+        repodata.internalize()
 
     def load_ext(self, repodata):
-       filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
-       ext = filename[9:11]
-       sys.stdout.write("[%s:%s: " % (self.name, ext))
-       if self.usecachedrepo(ext):
-           sys.stdout.write("cached]\n")
-           sys.stdout.flush()
-           return True
-       sys.stdout.write("fetching]\n")
-       sys.stdout.flush()
-       defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
-       descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
-       if not descrdir:
-           descrdir = "suse/setup/descr"
-       filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
-       f = self.download(descrdir + '/' + filename, True, filechksum)
-       if not f:
-           return False
-       self.handle.add_susetags(f, defvendorid, ext, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
-       solv.xfclose(f)
-       self.writecachedrepo(ext, repodata)
-       return True
+        filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
+        ext = filename[9:11]
+        sys.stdout.write("[%s:%s: " % (self.name, ext))
+        if self.usecachedrepo(ext):
+            sys.stdout.write("cached]\n")
+            sys.stdout.flush()
+            return True
+        sys.stdout.write("fetching]\n")
+        sys.stdout.flush()
+        defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
+        descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
+        if not descrdir:
+            descrdir = "suse/setup/descr"
+        filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
+        f = self.download(descrdir + '/' + filename, True, filechksum)
+        if not f:
+            return False
+        self.handle.add_susetags(f, defvendorid, ext, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
+        solv.xfclose(f)
+        self.writecachedrepo(ext, repodata)
+        return True
 
 class repo_unknown(repo_generic):
     def load(self, pool):
-       print "unsupported repo '%s': skipped" % self.name
-       return False
+        print "unsupported repo '%s': skipped" % self.name
+        return False
 
 class repo_system(repo_generic):
     def load(self, pool):
-       self.handle = pool.add_repo(self.name)
-       self.handle.appdata = self
-       pool.installed = self.handle
-       print "rpm database:",
-       self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
-       if self.usecachedrepo(None):
-           print "cached"
-           return True
-       print "reading"
-       self.handle.add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
-       self.handle.add_rpmdb(None, Repo.REPO_REUSE_REPODATA)
-       self.writecachedrepo(None)
-       return True
+        self.handle = pool.add_repo(self.name)
+        self.handle.appdata = self
+        pool.installed = self.handle
+        print "rpm database:",
+        self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
+        if self.usecachedrepo(None):
+            print "cached"
+            return True
+        print "reading"
+        self.handle.add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
+        self.handle.add_rpmdb(None, Repo.REPO_REUSE_REPODATA)
+        self.writecachedrepo(None)
+        return True
 
 class repo_cmdline(repo_generic):
     def load(self, pool):
-       self.handle = pool.add_repo(self.name)
-       self.handle.appdata = self 
-       return True
+        self.handle = pool.add_repo(self.name)
+        self.handle.appdata = self 
+        return True
 
 def validarch(pool, arch):
     if not arch:
-       return False
+        return False
     id = pool.str2id(arch, False)
     if not id:
-       return False
+        return False
     return pool.isknownarch(id)
 
 def limitjobs(pool, jobs, flags, evrstr):
     njobs = []
     evr = pool.str2id(evrstr)
     for j in jobs:
-       how = j.how
-       sel = how & Job.SOLVER_SELECTMASK
-       what = pool.rel2id(j.what, evr, flags)
+        how = j.how
+        sel = how & Job.SOLVER_SELECTMASK
+        what = pool.rel2id(j.what, evr, flags)
         if flags == solv.REL_ARCH:
-           how |= Job.SOLVER_SETARCH
-       elif flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
-           if evrstr.find('-') >= 0:
-               how |= Job.SOLVER_SETEVR
-           else:
-               how |= Job.SOLVER_SETEV
-       njobs.append(pool.Job(how, what))
+            how |= Job.SOLVER_SETARCH
+        elif flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
+            if evrstr.find('-') >= 0:
+                how |= Job.SOLVER_SETEVR
+            else:
+                how |= Job.SOLVER_SETEV
+        njobs.append(pool.Job(how, what))
     return njobs
 
 def limitjobs_evrarch(pool, jobs, flags, evrstr):
     m = re.match(r'(.+)\.(.+?)$', evrstr)
     if m and validarch(pool, m.group(2)):
-       jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
+        jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
         evrstr = m.group(1)
     return limitjobs(pool, jobs, flags, evrstr)
 
 def mkjobs_filelist(pool, cmd, arg):
     if re.search(r'[[*?]', arg):
-       type = Dataiterator.SEARCH_GLOB
+        type = Dataiterator.SEARCH_GLOB
     else:
-       type = Dataiterator.SEARCH_STRING
+        type = Dataiterator.SEARCH_STRING
     if cmd == 'erase':
-       di = pool.installed.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
+        di = pool.installed.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
     else:
-       di = pool.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
+        di = pool.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
     matches = []
     for d in di:
-       s = d.solvable
-       if s and s.installable():
-           matches.append(s.id)
-           di.skip_solvable()  # one match is enough
+        s = d.solvable
+        if s and s.installable():
+            matches.append(s.id)
+            di.skip_solvable()  # one match is enough
     if matches:
-       print "[using file list match for '%s']" % arg
-       if len(matches) > 1:
-           return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
-       else:
-           return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
+        print "[using file list match for '%s']" % arg
+        if len(matches) > 1:
+            return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
+        else:
+            return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
     return []
 
 def mkjobs_rel(pool, cmd, name, rel, evr):
@@ -623,83 +623,83 @@ def mkjobs_rel(pool, cmd, name, rel, evr):
     if rel.find('>') >= 0: flags |= solv.REL_GT
     jobs = depglob(pool, name, True, True)
     if jobs:
-       return limitjobs(pool, jobs, flags, evr)
+        return limitjobs(pool, jobs, flags, evr)
     m = re.match(r'(.+)\.(.+?)$', name)
     if m and validarch(pool, m.group(2)):
-       jobs = depglob(pool, m.group(1), True, True)
-       if jobs:
-           jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
-           return limitjobs(pool, jobs, flags, evr)
+        jobs = depglob(pool, m.group(1), True, True)
+        if jobs:
+            jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
+            return limitjobs(pool, jobs, flags, evr)
     return []
 
 def mkjobs_nevra(pool, cmd, arg):
     jobs = depglob(pool, arg, True, True)
     if jobs:
-       return jobs
+        return jobs
     m = re.match(r'(.+)\.(.+?)$', arg)
     if m and validarch(pool, m.group(2)):
-       jobs = depglob(pool, m.group(1), True, True)
-       if jobs:
-           return limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
+        jobs = depglob(pool, m.group(1), True, True)
+        if jobs:
+            return limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
     m = re.match(r'(.+)-(.+?)$', arg)
     if m:
-       jobs = depglob(pool, m.group(1), True, False)
-       if jobs:
-           return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
+        jobs = depglob(pool, m.group(1), True, False)
+        if jobs:
+            return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
     m = re.match(r'(.+)-(.+?-.+?)$', arg)
     if m:
-       jobs = depglob(pool, m.group(1), True, False)
-       if jobs:
-           return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
+        jobs = depglob(pool, m.group(1), True, False)
+        if jobs:
+            return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
     return []
 
 def mkjobs(pool, cmd, arg):
     if len(arg) and arg[0] == '/':
-       jobs = mkjobs_filelist(pool, cmd, arg)
-       if jobs:
-           return jobs
+        jobs = mkjobs_filelist(pool, cmd, arg)
+        if jobs:
+            return jobs
     m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
     if m:
-       return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
+        return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
     else:
-       return mkjobs_nevra(pool, cmd, arg)
-           
+        return mkjobs_nevra(pool, cmd, arg)
+            
 def depglob(pool, name, globname, globdep):
     id = pool.str2id(name, False)
     if id:
-       match = False
-       for s in pool.whatprovides(id):
-           if globname and s.nameid == id:
-               return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
-           match = True
-       if match:
-           if globname and globdep:
-               print "[using capability match for '%s']" % name
-           return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
+        match = False
+        for s in pool.whatprovides(id):
+            if globname and s.nameid == id:
+                return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
+            match = True
+        if match:
+            if globname and globdep:
+                print "[using capability match for '%s']" % name
+            return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
     if not re.search(r'[[*?]', name):
-       return []
+        return []
     if globname:
-       # try name glob
-       idmatches = {}
-       for d in pool.Dataiterator(0, solv.SOLVABLE_NAME, name, Dataiterator.SEARCH_GLOB):
-           s = d.solvable
-           if s.installable():
-               idmatches[s.nameid] = True
-       if idmatches:
-           return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
+        # try name glob
+        idmatches = {}
+        for d in pool.Dataiterator(0, solv.SOLVABLE_NAME, name, Dataiterator.SEARCH_GLOB):
+            s = d.solvable
+            if s.installable():
+                idmatches[s.nameid] = True
+        if idmatches:
+            return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
     if globdep:
-       # try dependency glob
-       idmatches = pool.matchprovidingids(name, Dataiterator.SEARCH_GLOB)
-       if idmatches:
-           print "[using capability match for '%s']" % name
-           return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches) ]
+        # try dependency glob
+        idmatches = pool.matchprovidingids(name, Dataiterator.SEARCH_GLOB)
+        if idmatches:
+            print "[using capability match for '%s']" % name
+            return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches) ]
     return []
     
 
 def load_stub(repodata):
     repo = repodata.repo.appdata
     if repo:
-       return repo.load_ext(repodata)
+        return repo.load_ext(repodata)
     return False
 
 
@@ -727,24 +727,24 @@ if cmd == 'se':
 repos = []
 for reposdir in ["/etc/zypp/repos.d"]:
     if not os.path.isdir(reposdir):
-       continue
+        continue
     for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
-       cfg = INIConfig(open(reponame))
-       for alias in cfg:
-           repoattr = {'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm-md', 'metadata_expire': 900}
-           for k in cfg[alias]:
-               repoattr[k] = cfg[alias][k]
-           if 'mirrorlist' in repoattr and 'metalink' not in repoattr:
-               if repoattr['mirrorlist'].find('/metalink'):
-                   repoattr['metalink'] = repoattr['mirrorlist']
-                   del repoattr['mirrorlist']
-           if repoattr['type'] == 'rpm-md':
-               repo = repo_repomd(alias, 'repomd', repoattr)
-           elif repoattr['type'] == 'yast2':
-               repo = repo_susetags(alias, 'susetags', repoattr)
-           else:
-               repo = repo_unknown(alias, 'unknown', repoattr)
-           repos.append(repo)
+        cfg = INIConfig(open(reponame))
+        for alias in cfg:
+            repoattr = {'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm-md', 'metadata_expire': 900}
+            for k in cfg[alias]:
+                repoattr[k] = cfg[alias][k]
+            if 'mirrorlist' in repoattr and 'metalink' not in repoattr:
+                if repoattr['mirrorlist'].find('/metalink'):
+                    repoattr['metalink'] = repoattr['mirrorlist']
+                    del repoattr['mirrorlist']
+            if repoattr['type'] == 'rpm-md':
+                repo = repo_repomd(alias, 'repomd', repoattr)
+            elif repoattr['type'] == 'yast2':
+                repo = repo_susetags(alias, 'susetags', repoattr)
+            else:
+                repo = repo_unknown(alias, 'unknown', repoattr)
+            repos.append(repo)
 
 pool = solv.Pool()
 pool.setarch(os.uname()[4])
@@ -755,34 +755,34 @@ sysrepo = repo_system('@System', 'system')
 sysrepo.load(pool)
 for repo in repos:
     if int(repo['enabled']):
-       repo.load(pool)
+        repo.load(pool)
     
 if cmd == 'search':
     matches = {}
     di = pool.Dataiterator(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
     for d in di:
-       matches[d.solvid] = True
+        matches[d.solvid] = True
     for solvid in sorted(matches.keys()):
-       print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
+        print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
     sys.exit(0)
 
 cmdlinerepo = None
 if cmd == 'list' or cmd == 'info' or cmd == 'install':
     for arg in args:
-       if arg.endswith(".rpm") and os.access(arg, os.R_OK):
-           if not cmdlinerepo:
-               cmdlinerepo = repo_cmdline('@commandline', 'cmdline')
-               cmdlinerepo.load(pool)
-               cmdlinerepo['packages'] = {}
-           cmdlinerepo['packages'][arg] = cmdlinerepo.handle.add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
+        if arg.endswith(".rpm") and os.access(arg, os.R_OK):
+            if not cmdlinerepo:
+                cmdlinerepo = repo_cmdline('@commandline', 'cmdline')
+                cmdlinerepo.load(pool)
+                cmdlinerepo['packages'] = {}
+            cmdlinerepo['packages'][arg] = cmdlinerepo.handle.add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
     if cmdlinerepo:
-       cmdlinerepo.handle.internalize()
+        cmdlinerepo.handle.internalize()
 
 addedprovides = pool.addfileprovides_ids()
 if addedprovides:
     sysrepo.updateaddedprovides(addedprovides)
     for repo in repos:
-       repo.updateaddedprovides(addedprovides)
+        repo.updateaddedprovides(addedprovides)
 
 pool.createwhatprovides()
 
@@ -790,139 +790,139 @@ pool.createwhatprovides()
 jobs = []
 for arg in args:
     if cmdlinerepo and arg in cmdlinerepo['packages']:
-       jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
+        jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
     else:
-       njobs = mkjobs(pool, cmd, arg)
-       if not njobs:
-           print "nothing matches '%s'" % arg
-           sys.exit(1)
-       jobs += njobs
+        njobs = mkjobs(pool, cmd, arg)
+        if not njobs:
+            print "nothing matches '%s'" % arg
+            sys.exit(1)
+        jobs += njobs
 
 if cmd == 'list' or cmd == 'info':
     if not jobs:
-       print "no package matched."
-       sys.exit(1)
+        print "no package matched."
+        sys.exit(1)
     for job in jobs:
-       for s in job.solvables():
-           if cmd == 'info':
-               print "Name:        %s" % s
-               print "Repo:        %s" % s.repo
-               print "Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
-               str = s.lookup_str(solv.SOLVABLE_URL)
-               if str:
-                   print "Url:         %s" % str
-               str = s.lookup_str(solv.SOLVABLE_LICENSE)
-               if str:
-                   print "License:     %s" % str
-               print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
-               print
-           else:
-               print "  - %s [%s]" % (s, s.repo)
-               print "    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+        for s in job.solvables():
+            if cmd == 'info':
+                print "Name:        %s" % s
+                print "Repo:        %s" % s.repo
+                print "Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
+                str = s.lookup_str(solv.SOLVABLE_URL)
+                if str:
+                    print "Url:         %s" % str
+                str = s.lookup_str(solv.SOLVABLE_LICENSE)
+                if str:
+                    print "License:     %s" % str
+                print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
+                print
+            else:
+                print "  - %s [%s]" % (s, s.repo)
+                print "    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
     sys.exit(0)
 
 if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == 'verify':
     if not jobs:
-       if cmd == 'up' or cmd == 'verify':
-           jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
-       elif cmd == 'dup':
-           pass
-       else:
-           print "no package matched."
-           sys.exit(1)
+        if cmd == 'up' or cmd == 'verify':
+            jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
+        elif cmd == 'dup':
+            pass
+        else:
+            print "no package matched."
+            sys.exit(1)
     for job in jobs:
-       if cmd == 'up':
-           # up magic: use install instead of update if no installed package matches
-           if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), job.solvables()):
-               job.how |= Job.SOLVER_UPDATE
-           else:
-               job.how |= Job.SOLVER_INSTALL
-       elif cmd == 'install':
-           job.how |= Job.SOLVER_INSTALL
-       elif cmd == 'erase':
-           job.how |= Job.SOLVER_ERASE
-       elif cmd == 'dup':
-           job.how |= Job.SOLVER_DISTUPGRADE
-       elif cmd == 'verify':
-           job.how |= Job.SOLVER_VERIFY
+        if cmd == 'up':
+            # up magic: use install instead of update if no installed package matches
+            if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), job.solvables()):
+                job.how |= Job.SOLVER_UPDATE
+            else:
+                job.how |= Job.SOLVER_INSTALL
+        elif cmd == 'install':
+            job.how |= Job.SOLVER_INSTALL
+        elif cmd == 'erase':
+            job.how |= Job.SOLVER_ERASE
+        elif cmd == 'dup':
+            job.how |= Job.SOLVER_DISTUPGRADE
+        elif cmd == 'verify':
+            job.how |= Job.SOLVER_VERIFY
 
     #pool.set_debuglevel(2)
     solver = None
     while True:
-       solver = pool.Solver()
-       solver.ignorealreadyrecommended = True
-       if cmd == 'erase':
-           solver.allowuninstall = True
-       if cmd == 'dup' and not jobs:
-           solver.distupgrade = True
-           solver.updatesystem = True
-           solver.allowdowngrade = True
-           solver.allowvendorchange = True
-           solver.allowarchchange = True
-           solver.dosplitprovides = True
-       if cmd == 'up' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_UPDATE | Job.SOLVER_SOLVABLE_ALL):
-           solver.dosplitprovides = True
-       problems = solver.solve(jobs)
-       if not problems:
-           break
-       for problem in problems:
-           print "Problem %d:" % problem.id
-           r = problem.findproblemrule()
-           ri = r.info()
-           print ri.problemstr()
-           solutions = problem.solutions()
-           for solution in solutions:
-               print "  Solution %d:" % solution.id
-               elements = solution.elements()
+        solver = pool.Solver()
+        solver.ignorealreadyrecommended = True
+        if cmd == 'erase':
+            solver.allowuninstall = True
+        if cmd == 'dup' and not jobs:
+            solver.distupgrade = True
+            solver.updatesystem = True
+            solver.allowdowngrade = True
+            solver.allowvendorchange = True
+            solver.allowarchchange = True
+            solver.dosplitprovides = True
+        if cmd == 'up' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_UPDATE | Job.SOLVER_SOLVABLE_ALL):
+            solver.dosplitprovides = True
+        problems = solver.solve(jobs)
+        if not problems:
+            break
+        for problem in problems:
+            print "Problem %d:" % problem.id
+            r = problem.findproblemrule()
+            ri = r.info()
+            print ri.problemstr()
+            solutions = problem.solutions()
+            for solution in solutions:
+                print "  Solution %d:" % solution.id
+                elements = solution.elements()
                 for element in elements:
-                   etype = element.type
-                   if etype == Solver.SOLVER_SOLUTION_JOB:
-                       print "  - do not ask to", jobs[element.jobidx]
-                   elif etype == Solver.SOLVER_SOLUTION_INFARCH:
-                       if element.solvable.isinstalled():
-                           print "  - keep %s despite the inferior architecture" % element.solvable
-                       else:
-                           print "  - install %s despite the inferior architecture" % element.solvable
-                   elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
-                       if element.solvable.isinstalled():
-                           print "  - keep obsolete %s" % element.solvable
-                       else:
-                           print "  - install %s from excluded repository" % element.solvable
-                   elif etype == Solver.SOLVER_SOLUTION_REPLACE:
-                       illegal = element.illegalreplace()
-                       if illegal & solver.POLICY_ILLEGAL_DOWNGRADE:
-                           print "  - allow downgrade of %s to %s" % (element.solvable, element.replacement)
-                       if illegal & solver.POLICY_ILLEGAL_ARCHCHANGE:
-                           print "  - allow architecture change of %s to %s" % (element.solvable, element.replacement)
-                       if illegal & solver.POLICY_ILLEGAL_VENDORCHANGE:
-                           if element.replacement.vendorid:
-                               print "  - allow vendor change from '%s' (%s) to '%s' (%s)" % (element.solvable.vendor, element.solvable, element.replacement.vendor, element.replacement)
-                           else:
-                               print "  - allow vendor change from '%s' (%s) to no vendor (%s)" % (element.solvable.vendor, element.solvable, element.replacement)
-                       if illegal == 0:
-                           print "  - allow replacement of %s with %s" % (element.solvable, element.replacement)
-                   elif etype == Solver.SOLVER_SOLUTION_ERASE:
-                       print "  - allow deinstallation of %s" % element.solvable
-               print
-           sol = ''
-           while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
-               sys.stdout.write("Please choose a solution: ")
-               sys.stdout.flush()
-               sol = sys.stdin.readline().strip()
-           if sol == 's':
-               continue        # skip problem
-           if sol == 'q':
-               sys.exit(1)
-           solution = solutions[int(sol) - 1]
-           for element in solution.elements():
-               etype = element.type
-               if etype == Solver.SOLVER_SOLUTION_JOB:
-                   jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
-               else:
-                   newjob = element.Job()
-                   if newjob and newjob not in jobs:
-                       jobs.append(newjob)
-                       
+                    etype = element.type
+                    if etype == Solver.SOLVER_SOLUTION_JOB:
+                        print "  - do not ask to", jobs[element.jobidx]
+                    elif etype == Solver.SOLVER_SOLUTION_INFARCH:
+                        if element.solvable.isinstalled():
+                            print "  - keep %s despite the inferior architecture" % element.solvable
+                        else:
+                            print "  - install %s despite the inferior architecture" % element.solvable
+                    elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
+                        if element.solvable.isinstalled():
+                            print "  - keep obsolete %s" % element.solvable
+                        else:
+                            print "  - install %s from excluded repository" % element.solvable
+                    elif etype == Solver.SOLVER_SOLUTION_REPLACE:
+                        illegal = element.illegalreplace()
+                        if illegal & solver.POLICY_ILLEGAL_DOWNGRADE:
+                            print "  - allow downgrade of %s to %s" % (element.solvable, element.replacement)
+                        if illegal & solver.POLICY_ILLEGAL_ARCHCHANGE:
+                            print "  - allow architecture change of %s to %s" % (element.solvable, element.replacement)
+                        if illegal & solver.POLICY_ILLEGAL_VENDORCHANGE:
+                            if element.replacement.vendorid:
+                                print "  - allow vendor change from '%s' (%s) to '%s' (%s)" % (element.solvable.vendor, element.solvable, element.replacement.vendor, element.replacement)
+                            else:
+                                print "  - allow vendor change from '%s' (%s) to no vendor (%s)" % (element.solvable.vendor, element.solvable, element.replacement)
+                        if illegal == 0:
+                            print "  - allow replacement of %s with %s" % (element.solvable, element.replacement)
+                    elif etype == Solver.SOLVER_SOLUTION_ERASE:
+                        print "  - allow deinstallation of %s" % element.solvable
+                print
+            sol = ''
+            while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
+                sys.stdout.write("Please choose a solution: ")
+                sys.stdout.flush()
+                sol = sys.stdin.readline().strip()
+            if sol == 's':
+                continue        # skip problem
+            if sol == 'q':
+                sys.exit(1)
+            solution = solutions[int(sol) - 1]
+            for element in solution.elements():
+                etype = element.type
+                if etype == Solver.SOLVER_SOLUTION_JOB:
+                    jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
+                else:
+                    newjob = element.Job()
+                    if newjob and newjob not in jobs:
+                        jobs.append(newjob)
+                        
     # no problems, show transaction
     trans = solver.transaction()
     del solver
@@ -933,153 +933,154 @@ if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == '
     print "Transaction summary:"
     print
     for cl in trans.classify():
-       if cl.type == Transaction.SOLVER_TRANSACTION_ERASE:
-           print "%d erased packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_INSTALL:
-           print "%d installed packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_REINSTALLED:
-           print "%d reinstalled packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
-           print "%d downgraded packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_CHANGED:
-           print "%d changed packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED:
-           print "%d upgraded packages:" % cl.count
-       elif cl.type == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
-           print "%d vendor changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
-       elif cl.type == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
-           print "%d arch changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
-       else:
-           continue
-       for p in cl.solvables():
-           if cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED or cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
-               op = trans.othersolvable(p)
-               print "  - %s -> %s" % (p, op)
-           else:
-               print "  - %s" % p
+        if cl.type == Transaction.SOLVER_TRANSACTION_ERASE:
+            print "%d erased packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_INSTALL:
+            print "%d installed packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_REINSTALLED:
+            print "%d reinstalled packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+            print "%d downgraded packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_CHANGED:
+            print "%d changed packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED:
+            print "%d upgraded packages:" % cl.count
+        elif cl.type == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
+            print "%d vendor changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
+        elif cl.type == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
+            print "%d arch changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
+        else:
+            continue
+        for p in cl.solvables():
+            if cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED or cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
+                op = trans.othersolvable(p)
+                print "  - %s -> %s" % (p, op)
+            else:
+                print "  - %s" % p
         print
     print "install size change: %d K" % trans.calc_installsizechange()
     print
     
-# vim: sw=4 et
     while True:
-       sys.stdout.write("OK to continue (y/n)? ")
-       sys.stdout.flush()
-       yn = sys.stdin.readline().strip()
-       if yn == 'y': break
-       if yn == 'n': sys.exit(1)
+        sys.stdout.write("OK to continue (y/n)? ")
+        sys.stdout.flush()
+        yn = sys.stdin.readline().strip()
+        if yn == 'y': break
+        if yn == 'n': sys.exit(1)
     newpkgs = trans.newpackages()
     newpkgsfp = {}
     if newpkgs:
-       downloadsize = 0
-       for p in newpkgs:
-           downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
-       print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
-       for p in newpkgs:
-           repo = p.repo.appdata
-           location, medianr = p.lookup_location()
-           if not location:
-               continue
-           if repo.type == 'commandline':
-               f = solv.xfopen(location)
-               if not f:
-                   sys.exit("\n%s: %s not found" % location)
-               newpkgsfp[p.id] = f
-               continue
-           if not sysrepo.handle.isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
-               pname = p.name
-               di = p.repo.Dataiterator(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
-               di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
-               for d in di:
-                   d.setpos_parent()
-                   if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
-                       continue
-                   baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
-                   candidate = None
-                   for installedp in pool.whatprovides(p.nameid):
-                       if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
-                           candidate = installedp
-                   if not candidate:
-                       continue
-                   seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
-                   st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
-                   if st:
-                       continue
-                   chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
-                   if not chksum:
-                       continue
-                   dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
-                   f = repo.download(dloc, False, chksum)
-                   if not f:
-                       continue
-                   nf = tempfile.TemporaryFile()
-                   nf = os.dup(nf.fileno())
-                   st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
-                   solv.xfclose(f)
-                   os.lseek(nf, 0, os.SEEK_SET)
-                   newpkgsfp[p.id] = solv.xfopen_fd("", nf)
-                   break
-               if p.id in newpkgsfp:
-                   sys.stdout.write("d")
-                   sys.stdout.flush()
-                   continue
-                       
-           if repo.type == 'susetags':
-               datadir = repo.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
-               if not datadir:
-                   datadir = 'suse'
-               location = datadir + '/' + location
-           chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
-           f = repo.download(location, False, chksum)
-           if not f:
-               sys.exit("\n%s: %s not found in repository" % (repo.name, location))
-           newpkgsfp[p.id] = f
-           sys.stdout.write(".")
-           sys.stdout.flush()
-       print
+        downloadsize = 0
+        for p in newpkgs:
+            downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
+        print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
+        for p in newpkgs:
+            repo = p.repo.appdata
+            location, medianr = p.lookup_location()
+            if not location:
+                continue
+            if repo.type == 'commandline':
+                f = solv.xfopen(location)
+                if not f:
+                    sys.exit("\n%s: %s not found" % location)
+                newpkgsfp[p.id] = f
+                continue
+            if not sysrepo.handle.isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
+                pname = p.name
+                di = p.repo.Dataiterator(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
+                di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
+                for d in di:
+                    d.setpos_parent()
+                    if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
+                        continue
+                    baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
+                    candidate = None
+                    for installedp in pool.whatprovides(p.nameid):
+                        if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
+                            candidate = installedp
+                    if not candidate:
+                        continue
+                    seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
+                    st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
+                    if st:
+                        continue
+                    chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
+                    if not chksum:
+                        continue
+                    dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
+                    f = repo.download(dloc, False, chksum)
+                    if not f:
+                        continue
+                    nf = tempfile.TemporaryFile()
+                    nf = os.dup(nf.fileno())
+                    st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
+                    solv.xfclose(f)
+                    os.lseek(nf, 0, os.SEEK_SET)
+                    newpkgsfp[p.id] = solv.xfopen_fd("", nf)
+                    break
+                if p.id in newpkgsfp:
+                    sys.stdout.write("d")
+                    sys.stdout.flush()
+                    continue
+                        
+            if repo.type == 'susetags':
+                datadir = repo.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
+                if not datadir:
+                    datadir = 'suse'
+                location = datadir + '/' + location
+            chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
+            f = repo.download(location, False, chksum)
+            if not f:
+                sys.exit("\n%s: %s not found in repository" % (repo.name, location))
+            newpkgsfp[p.id] = f
+            sys.stdout.write(".")
+            sys.stdout.flush()
+        print
     print "Committing transaction:"
     print
     ts = rpm.TransactionSet('/')
     ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
     erasenamehelper = {}
     for p in trans.steps():
-       type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
-       if type == Transaction.SOLVER_TRANSACTION_ERASE:
-           rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
-           erasenamehelper[p.name] = p
-           if not rpmdbid:
-               sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p)
-           ts.addErase(rpmdbid)
-       elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
-           f = newpkgsfp[p.id]
-           h = ts.hdrFromFdno(solv.xfileno(f))
-           os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
-           ts.addInstall(h, p, 'u')
-       elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
-           f = newpkgsfp[p.id]
-           h = ts.hdrFromFdno(solv.xfileno(f))
-           os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
-           ts.addInstall(h, p, 'i')
+        type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
+        if type == Transaction.SOLVER_TRANSACTION_ERASE:
+            rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
+            erasenamehelper[p.name] = p
+            if not rpmdbid:
+                sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p)
+            ts.addErase(rpmdbid)
+        elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
+            f = newpkgsfp[p.id]
+            h = ts.hdrFromFdno(solv.xfileno(f))
+            os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
+            ts.addInstall(h, p, 'u')
+        elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
+            f = newpkgsfp[p.id]
+            h = ts.hdrFromFdno(solv.xfileno(f))
+            os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
+            ts.addInstall(h, p, 'i')
     checkproblems = ts.check()
     if checkproblems:
-       print checkproblems
-       sys.exit("Sorry.")
+        print checkproblems
+        sys.exit("Sorry.")
     ts.order()
     def runCallback(reason, amount, total, p, d):
-       if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
-           return solv.xfileno(newpkgsfp[p.id])
-       if reason == rpm.RPMCALLBACK_INST_START:
-           print "install", p
-       if reason == rpm.RPMCALLBACK_UNINST_START:
-           # argh, p is just the name of the package
-           if p in erasenamehelper:
-               p = erasenamehelper[p]
-               print "erase", p
+        if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
+            return solv.xfileno(newpkgsfp[p.id])
+        if reason == rpm.RPMCALLBACK_INST_START:
+            print "install", p
+        if reason == rpm.RPMCALLBACK_UNINST_START:
+            # argh, p is just the name of the package
+            if p in erasenamehelper:
+                p = erasenamehelper[p]
+                print "erase", p
     runproblems = ts.run(runCallback, '')
     if runproblems:
-       print runproblems
-       sys.exit(1)
+        print runproblems
+        sys.exit(1)
     sys.exit(0)
 
 print "unknown command", cmd
 sys.exit(1)
+
+# vim: sw=4 et
index f38f530..f444667 100644 (file)
@@ -1,22 +1,24 @@
-SET(liblibsolvext_SRCS
+SET (libsolvext_SRCS
     repo_content.c repo_deltainfoxml.c repo_helix.c repo_products.c
     repo_releasefile_products.c repo_repomdxml.c repo_rpmmd.c
     repo_susetags.c repo_updateinfoxml.c repo_write.c repo_zyppdb.c
     repo_deb.c sat_xfopen.c)
-IF ( NOT DEBIAN )
-SET(liblibsolvext_SRCS
-    ${liblibsolvext_SRCS} pool_fileconflicts.c repo_rpmdb.c)
+IF (NOT DEBIAN)
+SET (libsolvext_SRCS
+    ${libsolvext_SRCS} pool_fileconflicts.c repo_rpmdb.c)
 ENDIF (NOT DEBIAN)
 
-ADD_LIBRARY(libsolvext STATIC ${liblibsolvext_SRCS})
+ADD_LIBRARY (libsolvext STATIC ${libsolvext_SRCS})
+SET_TARGET_PROPERTIES(libsolvext PROPERTIES OUTPUT_NAME "solvext")
 
-SET(liblibsolvext_HEADERS
+SET (libsolvext_HEADERS
     pool_fileconflicts.h repo_content.h repo_deltainfoxml.h repo_helix.h repo_products.h
     repo_releasefile_products.h repo_repomdxml.h repo_rpmdb.h repo_rpmmd.h
     repo_susetags.h repo_updateinfoxml.h repo_write.h repo_zyppdb.h
     tools_util.h repo_deb.h sat_xfopen.h)
 
-SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
+SET (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
+SET (CMAKE_SHARED_LINKER_FLAGS "${LINK_FLAGS} -Wl,--version-script=${CMAKE_SOURCE_DIR}/ext/libsolvext.ver")
 
-INSTALL(FILES ${liblibsolvext_HEADERS} DESTINATION "${CMAKE_INSTALL_PREFIX}/include/libsolv")
-INSTALL(TARGETS libsolvext LIBRARY DESTINATION ${LIB_INSTALL_DIR} ARCHIVE DESTINATION ${LIB_INSTALL_DIR})
+INSTALL (FILES ${libsolvext_HEADERS} DESTINATION "${CMAKE_INSTALL_PREFIX}/include/solv")
+INSTALL (TARGETS libsolvext LIBRARY DESTINATION ${LIB_INSTALL_DIR} ARCHIVE DESTINATION ${LIB_INSTALL_DIR})
diff --git a/ext/libsolvext.ver b/ext/libsolvext.ver
new file mode 100644 (file)
index 0000000..c782f0b
--- /dev/null
@@ -0,0 +1,37 @@
+SOLV_1.0 {
+       global:
+               pool_findfileconflicts;
+               repo_add_code11_products;
+               repo_add_content;
+               repo_add_deb;
+               repo_add_debdb;
+               repo_add_debpackages;
+               repo_add_debs;
+               repo_add_deltainfoxml;
+               repo_add_helix;
+               repo_add_products;
+               repo_add_pubkeys;
+               repo_add_releasefile_products;
+               repo_add_repomdxml;
+               repo_add_rpm;
+               repo_add_rpmdb;
+               repo_add_rpmdb_pubkeys;
+               repo_add_rpmmd;
+               repo_add_rpms;
+               repo_add_susetags;
+               repo_add_updateinfoxml;
+               repo_add_zyppdb_products;
+               repo_write;
+               repo_write_stdkeyfilter;
+               repodata_write;
+               rpm_byfp;
+               rpm_byrpmdbid;
+               rpm_byrpmh;
+               rpm_installedrpmdbids;
+               rpm_iterate_filelist;
+               rpm_query;
+               sat_xfopen;
+               sat_xfopen_fd;
+       local:
+               *;
+};
index e80f3cc..1bdb24f 100644 (file)
@@ -148,8 +148,7 @@ rm -rf "$RPM_BUILD_ROOT"
 %defattr(-,root,root)
 %_libdir/libsolv.a
 %_libdir/libsolvext.a
-%dir /usr/include/libsolv
-/usr/include/libsolv/*
+/usr/include/solv
 /usr/bin/deptestomatic
 /usr/bin/helix2solv
 
index 25e8e0c..2966e6a 100644 (file)
@@ -19,6 +19,7 @@ SET (libsolv_SRCS
     chksum.c md5.c sha1.c sha2.c satversion.c)
 
 ADD_LIBRARY (libsolv STATIC ${libsolv_SRCS})
+SET_TARGET_PROPERTIES(libsolv PROPERTIES OUTPUT_NAME "solv")
 
 SET (libsolv_HEADERS
     bitmap.h evr.h hash.h policy.h poolarch.h poolvendor.h pool.h
@@ -28,6 +29,7 @@ SET (libsolv_HEADERS
     chksum.h md5.h sha1.h sha2.h ${CMAKE_BINARY_DIR}/src/satversion.h)
 
 SET (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
+SET (CMAKE_SHARED_LINKER_FLAGS "${LINK_FLAGS} -Wl,--version-script=${CMAKE_SOURCE_DIR}/src/libsolv.ver")
 
-INSTALL (FILES ${libsolv_HEADERS} DESTINATION "${CMAKE_INSTALL_PREFIX}/include/libsolv")
+INSTALL (FILES ${libsolv_HEADERS} DESTINATION "${CMAKE_INSTALL_PREFIX}/include/solv")
 INSTALL (TARGETS libsolv LIBRARY DESTINATION ${LIB_INSTALL_DIR} ARCHIVE DESTINATION ${LIB_INSTALL_DIR})
diff --git a/src/libsolv.ver b/src/libsolv.ver
new file mode 100644 (file)
index 0000000..84daea6
--- /dev/null
@@ -0,0 +1,322 @@
+SOLV_1.0 {
+       global:
+               dataiterator_clonepos;
+               dataiterator_entersub;
+               dataiterator_free;
+               dataiterator_init;
+               dataiterator_init_clone;
+               dataiterator_jump_to_repo;
+               dataiterator_jump_to_solvid;
+               dataiterator_match;
+               dataiterator_prepend_keyname;
+               dataiterator_seek;
+               dataiterator_set_keyname;
+               dataiterator_set_match;
+               dataiterator_set_search;
+               dataiterator_setpos;
+               dataiterator_setpos_parent;
+               dataiterator_skip_attribute;
+               dataiterator_skip_repo;
+               dataiterator_skip_solvable;
+               dataiterator_step;
+               datamatcher_free;
+               datamatcher_init;
+               datamatcher_match;
+               dirpool_add_dir;
+               dirpool_free;
+               dirpool_init;
+               dirpool_make_dirtraverse;
+               map_free;
+               map_grow;
+               map_init;
+               map_init_clone;
+               policy_create_obsolete_index;
+               policy_filter_unwanted;
+               policy_findupdatepackages;
+               policy_illegal2str;
+               policy_illegal_archchange;
+               policy_illegal_vendorchange;
+               policy_is_illegal;
+               pool_add_fileconflicts_deps;
+               pool_add_solvable;
+               pool_add_solvable_block;
+               pool_addfileprovides;
+               pool_addfileprovides_ids;
+               pool_addrelproviders;
+               pool_alloctmpspace;
+               pool_arch2color_slow;
+               pool_bin2hex;
+               pool_calc_duchanges;
+               pool_calc_installsizechange;
+               pool_clear_pos;
+               pool_create;
+               pool_create_state_maps;
+               pool_createwhatprovides;
+               pool_debug;
+               pool_dep2str;
+               pool_evrcmp;
+               pool_evrcmp_str;
+               pool_evrmatch;
+               pool_free;
+               pool_free_solvable_block;
+               pool_freeidhashes;
+               pool_freetmpspace;
+               pool_freewhatprovides;
+               pool_id2evr;
+               pool_id2langid;
+               pool_id2rel;
+               pool_id2str;
+               pool_job2str;
+               pool_lookup_bin_checksum;
+               pool_lookup_checksum;
+               pool_lookup_id;
+               pool_lookup_num;
+               pool_lookup_str;
+               pool_lookup_void;
+               pool_match_dep;
+               pool_match_nevr_rel;
+               pool_queuetowhatprovides;
+               pool_rel2id;
+               pool_search;
+               pool_set_installed;
+               pool_set_languages;
+               pool_setarch;
+               pool_setarchpolicy;
+               pool_setdebuglevel;
+               pool_setvendorclasses;
+               pool_shrink_rels;
+               pool_shrink_strings;
+               pool_solvable2str;
+               pool_str2id;
+               pool_strn2id;
+               pool_tmpappend;
+               pool_tmpjoin;
+               pool_trivial_installable;
+               pool_trivial_installable_noobsoletesmap;
+               pool_vendor2mask;
+               queue_alloc_one;
+               queue_alloc_one_head;
+               queue_delete;
+               queue_delete2;
+               queue_deleten;
+               queue_free;
+               queue_init;
+               queue_init_buffer;
+               queue_init_clone;
+               queue_insert;
+               queue_insert2;
+               queue_insertn;
+               repo_add_poolstr_array;
+               repo_add_repodata;
+               repo_add_solv;
+               repo_add_solv_flags;
+               repo_addid;
+               repo_addid_dep;
+               repo_create;
+               repo_disable_paging;
+               repo_empty;
+               repo_fix_conflicts;
+               repo_fix_supplements;
+               repo_free;
+               repo_freeallrepos;
+               repo_free_solvable_block;
+               repo_internalize;
+               repo_last_repodata;
+               repo_lookup_bin_checksum;
+               repo_lookup_checksum;
+               repo_lookup_id;
+               repo_lookup_idarray;
+               repo_lookup_num;
+               repo_lookup_str;
+               repo_lookup_type;
+               repo_lookup_void;
+               repo_matchvalue;
+               repo_reserve_ids;
+               repo_search;
+               repo_set_id;
+               repo_set_num;
+               repo_set_poolstr;
+               repo_set_str;
+               repo_sidedata_create;
+               repo_sidedata_extend;
+               repodata_add_dirnumnum;
+               repodata_add_dirstr;
+               repodata_add_fixarray;
+               repodata_add_flexarray;
+               repodata_add_idarray;
+               repodata_add_poolstr_array;
+               repodata_chk2str;
+               repodata_create;
+               repodata_create_stubs;
+               repodata_delete;
+               repodata_delete_uninternalized;
+               repodata_dir2str;
+               repodata_disable_paging;
+               repodata_empty;
+               repodata_extend;
+               repodata_extend_block;
+               repodata_filelistfilter_matches;
+               repodata_free;
+               repodata_free_schemahash;
+               repodata_freedata;
+               repodata_globalize_id;
+               repodata_initdata;
+               repodata_internalize;
+               repodata_key2id;
+               repodata_localize_id;
+               repodata_lookup_bin_checksum;
+               repodata_lookup_id;
+               repodata_lookup_idarray;
+               repodata_lookup_num;
+               repodata_lookup_str;
+               repodata_lookup_type;
+               repodata_lookup_void;
+               repodata_merge_attrs;
+               repodata_merge_some_attrs;
+               repodata_new_handle;
+               repodata_schema2id;
+               repodata_search;
+               repodata_set_binary;
+               repodata_set_bin_checksum;
+               repodata_set_checksum;
+               repodata_set_constant;
+               repodata_set_constantid;
+               repodata_set_id;
+               repodata_set_idarray;
+               repodata_set_location;
+               repodata_set_num;
+               repodata_set_poolstr;
+               repodata_set_str;
+               repodata_set_void;
+               repodata_setpos_kv;
+               repodata_shrink;
+               repodata_str2dir;
+               repodata_stringify;
+               repopagestore_compress_page;
+               repopagestore_disable_paging;
+               repopagestore_free;
+               repopagestore_init;
+               repopagestore_load_page_range;
+               repopagestore_read_or_setup_pages;
+               sat_bin2hex;
+               sat_calloc;
+               sat_chksum_add;
+               sat_chksum_create;
+               sat_chksum_create_from_bin;
+               sat_chksum_free;
+               sat_chksum_get;
+               sat_chksum_get_type;
+               sat_chksum_isfinished;
+               sat_chksum_str2type;
+               sat_chksum_type2str;
+               sat_dupappend;
+               sat_dupjoin;
+               sat_free;
+               sat_hex2bin;
+               sat_malloc;
+               sat_malloc2;
+               sat_oom;
+               sat_realloc;
+               sat_realloc2;
+               sat_sort;
+               sat_timems;
+               sat_vercmp;
+               sat_version;
+               sat_version_major;
+               sat_version_minor;
+               sat_version_patch;
+               solvable_get_location;
+               solvable_identical;
+               solvable_lookup_bin_checksum;
+               solvable_lookup_bool;
+               solvable_lookup_checksum;
+               solvable_lookup_id;
+               solvable_lookup_idarray;
+               solvable_lookup_num;
+               solvable_lookup_str;
+               solvable_lookup_str_lang;
+               solvable_lookup_str_poollang;
+               solvable_lookup_void;
+               solvable_selfprovidedep;
+               solvable_trivial_installable_map;
+               solvable_trivial_installable_queue;
+               solvable_trivial_installable_repo;
+               solver_allruleinfos;
+               solver_calc_duchanges;
+               solver_calc_installsizechange;
+               solver_calculate_noobsmap;
+               solver_create;
+               solver_create_decisions_obsoletesmap;
+               solver_dep_installed;
+               solver_disablechoicerules;
+               solver_disablepolicyrules;
+               solver_disableproblem;
+               solver_enableproblem;
+               solver_findallproblemrules;
+               solver_findproblemrule;
+               solver_free;
+               solver_freedupmaps;
+               solver_next_problem;
+               solver_next_solution;
+               solver_next_solutionelement;
+               solver_prepare_solutions;
+               solver_printallsolutions;
+               solver_printcompleteprobleminfo;
+               solver_printdecisionq;
+               solver_printdecisions;
+               solver_printproblem;
+               solver_printprobleminfo;
+               solver_printproblemruleinfo;
+               solver_printrule;
+               solver_printruleclass;
+               solver_printruleelement;
+               solver_printsolution;
+               solver_printtransaction;
+               solver_printtrivial;
+               solver_printwatches;
+               solver_problem_count;
+               solver_problemruleinfo;
+               solver_problemruleinfo2str;
+               solver_reenablepolicyrules;
+               solver_reset;
+               solver_ruleinfo;
+               solver_run_sat;
+               solver_samerule;
+               solver_select2str;
+               solver_solution_count;
+               solver_solutionelement2str;
+               solver_solutionelement_count;
+               solver_solve;
+               solver_splitprovides;
+               solver_take_solution;
+               solver_take_solutionelement;
+               solver_trivial_installable;
+               solver_unifyrules;
+               stringpool_clone;
+               stringpool_free;
+               stringpool_freehash;
+               stringpool_init;
+               stringpool_init_empty;
+               stringpool_shrink;
+               stringpool_str2id;
+               stringpool_strn2id;
+               transaction_add_obsoleted;
+               transaction_all_obs_pkgs;
+               transaction_calc_duchanges;
+               transaction_calc_installsizechange;
+               transaction_calculate;
+               transaction_check_order;
+               transaction_classify;
+               transaction_classify_pkgs;
+               transaction_free;
+               transaction_free_orderdata;
+               transaction_init;
+               transaction_init_clone;
+               transaction_installedresult;
+               transaction_obs_pkg;
+               transaction_order;
+               transaction_order_add_choices;
+               transaction_type;
+       local:
+               *;
+};
index 2c37b2d..18071a2 100644 (file)
@@ -2,51 +2,51 @@
 # CMakeLists.txt for sat-solver/tools
 #
 
-ADD_LIBRARY(toolstuff STATIC common_write.c)
+ADD_LIBRARY (toolstuff STATIC common_write.c)
 
-IF ( NOT DEBIAN )
-ADD_EXECUTABLE(rpmdb2solv rpmdb2solv.c)
-TARGET_LINK_LIBRARIES(rpmdb2solv toolstuff libsolvext libsolv ${RPMDB_LIBRARY} ${EXPAT_LIBRARY})
+IF (NOT DEBIAN)
+ADD_EXECUTABLE (rpmdb2solv rpmdb2solv.c)
+TARGET_LINK_LIBRARIES (rpmdb2solv toolstuff libsolvext libsolv ${RPMDB_LIBRARY} ${EXPAT_LIBRARY})
 
-ADD_EXECUTABLE(rpms2solv rpms2solv.c)
-TARGET_LINK_LIBRARIES(rpms2solv toolstuff libsolvext libsolv ${RPMDB_LIBRARY})
+ADD_EXECUTABLE (rpms2solv rpms2solv.c)
+TARGET_LINK_LIBRARIES (rpms2solv toolstuff libsolvext libsolv ${RPMDB_LIBRARY})
 
-ADD_EXECUTABLE(findfileconflicts findfileconflicts.c)
-TARGET_LINK_LIBRARIES(findfileconflicts libsolvext libsolv ${RPMDB_LIBRARY})
+ADD_EXECUTABLE (findfileconflicts findfileconflicts.c)
+TARGET_LINK_LIBRARIES (findfileconflicts libsolvext libsolv ${RPMDB_LIBRARY})
 
-ENDIF ( NOT DEBIAN )
+ENDIF (NOT DEBIAN)
 
-ADD_EXECUTABLE(rpmmd2solv rpmmd2solv.c)
-TARGET_LINK_LIBRARIES(rpmmd2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
+ADD_EXECUTABLE (rpmmd2solv rpmmd2solv.c)
+TARGET_LINK_LIBRARIES (rpmmd2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
 
-ADD_EXECUTABLE(helix2solv helix2solv.c)
-TARGET_LINK_LIBRARIES(helix2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
+ADD_EXECUTABLE (helix2solv helix2solv.c)
+TARGET_LINK_LIBRARIES (helix2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
 
-ADD_EXECUTABLE(susetags2solv susetags2solv.c)
-TARGET_LINK_LIBRARIES(susetags2solv toolstuff libsolvext libsolv ${ZLIB_LIBRARY})
+ADD_EXECUTABLE (susetags2solv susetags2solv.c)
+TARGET_LINK_LIBRARIES (susetags2solv toolstuff libsolvext libsolv ${ZLIB_LIBRARY})
 
-ADD_EXECUTABLE(updateinfoxml2solv updateinfoxml2solv.c)
-TARGET_LINK_LIBRARIES(updateinfoxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
+ADD_EXECUTABLE (updateinfoxml2solv updateinfoxml2solv.c)
+TARGET_LINK_LIBRARIES (updateinfoxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
 
-ADD_EXECUTABLE(deltainfoxml2solv deltainfoxml2solv.c)
-TARGET_LINK_LIBRARIES(deltainfoxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
+ADD_EXECUTABLE (deltainfoxml2solv deltainfoxml2solv.c)
+TARGET_LINK_LIBRARIES (deltainfoxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
 
-ADD_EXECUTABLE(repomdxml2solv repomdxml2solv.c)
-TARGET_LINK_LIBRARIES(repomdxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
+ADD_EXECUTABLE (repomdxml2solv repomdxml2solv.c)
+TARGET_LINK_LIBRARIES (repomdxml2solv toolstuff libsolvext libsolv ${EXPAT_LIBRARY})
 
-ADD_EXECUTABLE(installcheck installcheck.c)
-TARGET_LINK_LIBRARIES(installcheck libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
+ADD_EXECUTABLE (installcheck installcheck.c)
+TARGET_LINK_LIBRARIES (installcheck libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
 
-ADD_EXECUTABLE(patchcheck patchcheck.c)
-TARGET_LINK_LIBRARIES(patchcheck libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
+ADD_EXECUTABLE (patchcheck patchcheck.c)
+TARGET_LINK_LIBRARIES (patchcheck libsolvext libsolv ${EXPAT_LIBRARY} ${ZLIB_LIBRARY})
 
-ADD_EXECUTABLE(dumpsolv dumpsolv.c )
-TARGET_LINK_LIBRARIES(dumpsolv libsolv)
+ADD_EXECUTABLE (dumpsolv dumpsolv.c )
+TARGET_LINK_LIBRARIES (dumpsolv libsolv)
 
-ADD_EXECUTABLE(mergesolv mergesolv.c )
-TARGET_LINK_LIBRARIES(mergesolv toolstuff libsolvext libsolv)
+ADD_EXECUTABLE (mergesolv mergesolv.c )
+TARGET_LINK_LIBRARIES (mergesolv toolstuff libsolvext libsolv)
 
-install(TARGETS
+INSTALL (TARGETS
     mergesolv
     dumpsolv
     susetags2solv
@@ -58,13 +58,13 @@ install(TARGETS
     installcheck
     DESTINATION ${BIN_INSTALL_DIR})
 
-IF ( NOT DEBIAN )
-install(TARGETS
+IF (NOT DEBIAN)
+INSTALL (TARGETS
     rpmdb2solv
     rpms2solv
     DESTINATION ${BIN_INSTALL_DIR})
-ENDIF ( NOT DEBIAN )
+ENDIF (NOT DEBIAN)
 
-install(PROGRAMS
+INSTALL (PROGRAMS
    repo2solv.sh
    DESTINATION ${BIN_INSTALL_DIR})