- fix gzdopen() call, it does not like mode "r+"
[platform/upstream/libsolv.git] / examples / pysolv
1 #!/usr/bin/python
2
3 #
4 # Copyright (c) 2011, Novell Inc.
5 #
6 # This program is licensed under the BSD license, read LICENSE.BSD
7 # for further information
8 #
9
10 # pysolv a little software installer demoing the sat solver library/bindings
11
12 # things it does:
13 # - understands globs for package names / dependencies
14 # - understands .arch suffix
15 # - repository data caching
16 # - on demand loading of secondary repository data
17 # - checksum verification
18 # - deltarpm support
19 # - installation of commandline packages
20 #
21 # things not yet ported:
22 # - gpg verification
23 # - file conflicts
24 # - fastestmirror implementation
25 #
26 # things available in the library but missing from pysolv:
27 # - vendor policy loading
28 # - soft locks file handling
29 # - multi version handling
30
31 import sys
32 import os
33 import glob
34 import solv
35 import re
36 import tempfile
37 import time
38 import subprocess
39 import rpm
40 from stat import *
41 from solv import Pool, Repo, Dataiterator, Job, Solver, Transaction
42 from iniparse import INIConfig
43 from optparse import OptionParser
44
45 #import gc
46 #gc.set_debug(gc.DEBUG_LEAK)
47
48 def calc_cookie_file(filename):
49     chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
50     chksum.add("1.1")
51     chksum.add_stat(filename)
52     return chksum.raw()
53
54 def calc_cookie_fp(fp):
55     chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
56     chksum.add_fp(fp)
57     return chksum.raw()
58
59 class repo_generic(dict):
60     def __init__(self, name, type, attribs = {}):
61         for k in attribs:
62             self[k] = attribs[k]
63         self.name = name
64         self.type = type
65
66     def cachepath(self, ext = None):
67         path = re.sub(r'^\.', '_', self.name)
68         if ext:
69             path += "_" + ext + ".solvx"
70         else:
71             path += ".solv"
72         return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
73         
74     def load(self, pool):
75         self.handle = pool.add_repo(self.name)
76         self.handle.appdata = self
77         self.handle.priority = 99 - self['priority']
78         if self['autorefresh']:
79             dorefresh = True
80         if dorefresh:
81             try:
82                 st = os.stat(self.cachepath())
83                 if time.time() - st[ST_MTIME] < self['metadata_expire']:
84                     dorefresh = False
85             except OSError, e:
86                 pass
87         self['cookie'] = ''
88         if not dorefresh and self.usecachedrepo(None):
89             print "repo: '%s': cached" % self.name
90             return True
91         return self.load_if_changed()
92
93     def load_if_changed(self):
94         return False
95
96     def load_ext(repodata):
97         return False
98
99     def setfromurls(self, urls):
100         if not urls:
101             return
102         url = urls[0]
103         print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
104         self['baseurl'] = url
105
106     def setfrommetalink(self, metalink):
107         nf = self.download(metalink, False, None)
108         if not nf:
109             return None
110         f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
111         solv.xfclose(nf)
112         urls = []
113         chksum = None
114         for l in f.readlines():
115             l = l.strip()
116             m = re.match(r'^<hash type="sha256">([0-9a-fA-F]{64})</hash>', l)
117             if m:
118                 chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256, m.group(1))
119             m = re.match(r'^<url.*>(https?://.+)repodata/repomd.xml</url>', l)
120             if m:
121                 urls.append(m.group(1))
122         if not urls:
123             chksum = None       # in case the metalink is about a different file
124         f.close()
125         self.setfromurls(urls)
126         return chksum
127         
128     def setfrommirrorlist(self, mirrorlist):
129         nf = self.download(mirrorlist, False, None)
130         if not nf:
131             return
132         f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
133         solv.xfclose(nf)
134         urls = []
135         for l in f.readline():
136             l = l.strip()
137             if l[0:6] == 'http://' or l[0:7] == 'https://':
138                 urls.append(l)
139         self.setfromurls(urls)
140         f.close()
141         
142     def download(self, file, uncompress, chksum, markincomplete=False):
143         url = None
144         if 'baseurl' not in self:
145             if 'metalink' in self:
146                 if file != self['metalink']:
147                     metalinkchksum = self.setfrommetalink(self['metalink'])
148                     if file == 'repodata/repomd.xml' and metalinkchksum and not chksum:
149                         chksum = metalinkchksum
150                 else:
151                     url = file
152             elif 'mirrorlist' in self:
153                 if file != self['mirrorlist']:
154                     self.setfrommirrorlist(self['mirrorlist'])
155                 else:
156                     url = file
157         if not url:
158             if 'baseurl' not in self:
159                 print "%s: no baseurl" % self.name
160                 return None
161             url = re.sub(r'/$', '', self['baseurl']) + '/' + file
162         f = tempfile.TemporaryFile()
163         st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
164         if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
165             return None
166         os.lseek(f.fileno(), 0, os.SEEK_SET)
167         if st:
168             print "%s: download error %d" % (file, st)
169             if markincomplete:
170                 self['incomplete'] = True
171             return None
172         if chksum:
173             fchksum = solv.Chksum(chksum.type)
174             if not fchksum:
175                 print "%s: unknown checksum type" % file
176                 if markincomplete:
177                     self['incomplete'] = True
178                 return None
179             fchksum.add_fd(f.fileno())
180             if fchksum != chksum:
181                 print "%s: checksum mismatch" % file
182                 if markincomplete:
183                     self['incomplete'] = True
184                 return None
185         if uncompress:
186             return solv.xfopen_fd(file, os.dup(f.fileno()))
187         return solv.xfopen_fd(None, os.dup(f.fileno()))
188
189     def usecachedrepo(self, ext, mark=False):
190         if not ext:
191             cookie = self['cookie']
192         else:
193             cookie = self['extcookie']
194         try: 
195             repopath = self.cachepath(ext)
196             f = open(repopath, 'r')
197             f.seek(-32, os.SEEK_END)
198             fcookie = f.read(32)
199             if len(fcookie) != 32:
200                 return False
201             if cookie and fcookie != cookie:
202                 return False
203             if self.type != 'system' and not ext:
204                 f.seek(-32 * 2, os.SEEK_END)
205                 fextcookie = f.read(32)
206                 if len(fextcookie) != 32:
207                     return False
208             f.seek(0)
209             flags = 0
210             if ext:
211                 flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
212                 if ext != 'DL':
213                     flags |= Repo.REPO_LOCALPOOL
214             if not self.handle.add_solv(f, flags):
215                 return False
216             if self.type != 'system' and not ext:
217                 self['cookie'] = fcookie
218                 self['extcookie'] = fextcookie
219             if mark:
220                 # no futimes in python?
221                 try:
222                     os.utime(repopath, None)
223                 except Exception, e:
224                     pass
225         except IOError, e:
226             return False
227         return True
228
229     def genextcookie(self, f):
230         chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
231         chksum.add(self['cookie'])
232         if f:
233             stat = os.fstat(f.fileno())
234             chksum.add(str(stat[ST_DEV]))
235             chksum.add(str(stat[ST_INO]))
236             chksum.add(str(stat[ST_SIZE]))
237             chksum.add(str(stat[ST_MTIME]))
238         extcookie = chksum.raw()
239         # compatibility to c code
240         if ord(extcookie[0]) == 0:
241             extcookie[0] = chr(1)
242         self['extcookie'] = extcookie
243         
244     def writecachedrepo(self, ext, info=None):
245         try:
246             if not os.path.isdir("/var/cache/solv"):
247                 os.mkdir("/var/cache/solv", 0755)
248             (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
249             os.fchmod(fd, 0444)
250             f = os.fdopen(fd, 'w+')
251             if not info:
252                 self.handle.write(f)
253             elif ext:
254                 info.write(f)
255             else:       # rewrite_repos case
256                 self.handle.write_first_repodata(f)
257             if self.type != 'system' and not ext:
258                 if 'extcookie' not in self:
259                     self.genextcookie(f)
260                 f.write(self['extcookie'])
261             if not ext:
262                 f.write(self['cookie'])
263             else:
264                 f.write(self['extcookie'])
265             f.close()
266             if self.handle.iscontiguous():
267                 # switch to saved repo to activate paging and save memory
268                 nf = solv.xfopen(tmpname)
269                 if not ext:
270                     # main repo
271                     self.handle.empty()
272                     if not self.handle.add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
273                         sys.exit("internal error, cannot reload solv file")
274                 else:
275                     # extension repodata
276                     # need to extend to repo boundaries, as this is how
277                     # info.write() has written the data
278                     info.extend_to_repo()
279                     # LOCALPOOL does not help as pool already contains all ids
280                     info.add_solv(nf, Repo.REPO_EXTEND_SOLVABLES)
281                 solv.xfclose(nf)
282             os.rename(tmpname, self.cachepath(ext))
283         except IOError, e:
284             if tmpname:
285                 os.unlink(tmpname)
286                 
287     def updateaddedprovides(self, addedprovides):
288         if 'incomplete' in self:
289             return 
290         if 'handle' not in self:
291             return 
292         if self.handle.isempty():
293             return
294         # make sure there's just one real repodata with extensions
295         repodata = self.handle.first_repodata()
296         if not repodata:
297             return
298         oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
299         if not set(addedprovides) <= set(oldaddedprovides):
300             for id in addedprovides:
301                 repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
302             repodata.internalize()
303             self.writecachedrepo(None, repodata)
304
305 class repo_repomd(repo_generic):
306     def load_if_changed(self):
307         print "rpmmd repo '%s':" % self.name,
308         sys.stdout.flush()
309         f = self.download("repodata/repomd.xml", False, None, None)
310         if not f:
311             print "no repomd.xml file, skipped"
312             self.handle.free(True)
313             del self.handle
314             return False
315         self['cookie'] = calc_cookie_fp(f)
316         if self.usecachedrepo(None, True):
317             print "cached"
318             solv.xfclose(f)
319             return True
320         self.handle.add_repomdxml(f, 0)
321         solv.xfclose(f)
322         print "fetching"
323         (filename, filechksum) = self.find('primary')
324         if filename:
325             f = self.download(filename, True, filechksum, True)
326             if f:
327                 self.handle.add_rpmmd(f, None, 0)
328                 solv.xfclose(f)
329             if 'incomplete' in self:
330                 return False # hopeless, need good primary
331         (filename, filechksum) = self.find('updateinfo')
332         if filename:
333             f = self.download(filename, True, filechksum, True)
334             if f:
335                 self.handle.add_updateinfoxml(f, 0)
336                 solv.xfclose(f)
337         self.add_exts()
338         if 'incomplete' not in self:
339             self.writecachedrepo(None)
340         # must be called after writing the repo
341         self.handle.create_stubs()
342         return True
343
344     def find(self, what):
345         di = self.handle.Dataiterator(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
346         di.prepend_keyname(solv.REPOSITORY_REPOMD)
347         for d in di:
348             d.setpos_parent()
349             filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
350             chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
351             if filename and not chksum:
352                 print "no %s file checksum!" % filename
353                 filename = None
354                 chksum = None
355             if filename:
356                 return (filename, chksum)
357         return (None, None)
358         
359     def add_ext(self, repodata, what, ext):
360         filename, chksum = self.find(what)
361         if not filename and what == 'deltainfo':
362             filename, chksum = self.find('prestodelta')
363         if not filename:
364             return
365         handle = repodata.new_handle()
366         repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
367         repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
368         repodata.set_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
369         if ext == 'DL':
370             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
371             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
372         elif ext == 'FL':
373             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
374             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
375         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
376
377     def add_exts(self):
378         repodata = self.handle.add_repodata(0)
379         self.add_ext(repodata, 'deltainfo', 'DL')
380         self.add_ext(repodata, 'filelists', 'FL')
381         repodata.internalize()
382     
383     def load_ext(self, repodata):
384         repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
385         if repomdtype == 'filelists':
386             ext = 'FL'
387         elif repomdtype == 'deltainfo':
388             ext = 'DL'
389         else:
390             return False
391         sys.stdout.write("[%s:%s: " % (self.name, ext))
392         if self.usecachedrepo(ext):
393             sys.stdout.write("cached]\n")
394             sys.stdout.flush()
395             return True
396         sys.stdout.write("fetching]\n")
397         sys.stdout.flush()
398         filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
399         filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
400         f = self.download(filename, True, filechksum)
401         if not f:
402             return False
403         if ext == 'FL':
404             self.handle.add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
405         elif ext == 'DL':
406             self.handle.add_deltainfoxml(f, Repo.REPO_USE_LOADING)
407         solv.xfclose(f)
408         self.writecachedrepo(ext, repodata)
409         return True
410
411 class repo_susetags(repo_generic):
412     def load_if_changed(self):
413         print "susetags repo '%s':" % self.name,
414         sys.stdout.flush()
415         f = self.download("content", False, None, None)
416         if not f:
417             print "no content file, skipped"
418             self.handle.free(True)
419             del self.handle
420             return False
421         self['cookie'] = calc_cookie_fp(f)
422         if self.usecachedrepo(None, True):
423             print "cached"
424             solv.xfclose(f)
425             return True
426         self.handle.add_content(f, 0)
427         solv.xfclose(f)
428         print "fetching"
429         defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
430         descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
431         if not descrdir:
432             descrdir = "suse/setup/descr"
433         (filename, filechksum) = self.find('packages.gz')
434         if not filename:
435             (filename, filechksum) = self.find('packages')
436         if filename:
437             f = self.download(descrdir + '/' + filename, True, filechksum, True)
438             if f:
439                 self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
440                 solv.xfclose(f)
441                 (filename, filechksum) = self.find('packages.en.gz')
442                 if not filename:
443                     (filename, filechksum) = self.find('packages.en')
444                 if filename:
445                     f = self.download(descrdir + '/' + filename, True, filechksum, True)
446                     if f:
447                         self.handle.add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
448                         solv.xfclose(f)
449                 self.handle.internalize()
450         self.add_exts()
451         if 'incomplete' not in self:
452             self.writecachedrepo(None)
453         # must be called after writing the repo
454         self.handle.create_stubs()
455         return True
456
457     def find(self, what):
458         di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
459         di.prepend_keyname(solv.SUSETAGS_FILE)
460         for d in di:
461             d.setpos_parent()
462             chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
463             return (what, chksum)
464         return (None, None)
465
466     def add_ext(self, repodata, what, ext):
467         (filename, chksum) = self.find(what)
468         if not filename:
469             return
470         handle = repodata.new_handle()
471         repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
472         if chksum:
473             repodata.set_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
474         if ext == 'DU':
475             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
476             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
477         elif ext == 'FL':
478             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
479             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
480         else:
481             for langtag, langtagtype in [
482                 (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
483                 (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
484                 (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
485                 (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
486                 (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
487                 (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
488             ]:
489                 repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self.handle.pool.id2langid(langtag, ext, 1))
490                 repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
491         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
492         
493     def add_exts(self):
494         repodata = self.handle.add_repodata(0)
495         di = self.handle.Dataiterator(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
496         di.prepend_keyname(solv.SUSETAGS_FILE)
497         for d in di:
498             filename = d.str()
499             if not filename:
500                 continue
501             if filename[0:9] != "packages.":
502                 continue
503             if len(filename) == 11 and filename != "packages.gz":
504                 ext = filename[9:11]
505             elif filename[11:12] == ".":
506                 ext = filename[9:11]
507             else:
508                 continue
509             if ext == "en":
510                 continue
511             self.add_ext(repodata, filename, ext)
512         repodata.internalize()
513
514     def load_ext(self, repodata):
515         filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
516         ext = filename[9:11]
517         sys.stdout.write("[%s:%s: " % (self.name, ext))
518         if self.usecachedrepo(ext):
519             sys.stdout.write("cached]\n")
520             sys.stdout.flush()
521             return True
522         sys.stdout.write("fetching]\n")
523         sys.stdout.flush()
524         defvendorid = self.handle.lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
525         descrdir = self.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
526         if not descrdir:
527             descrdir = "suse/setup/descr"
528         filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
529         f = self.download(descrdir + '/' + filename, True, filechksum)
530         if not f:
531             return False
532         self.handle.add_susetags(f, defvendorid, ext, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
533         solv.xfclose(f)
534         self.writecachedrepo(ext, repodata)
535         return True
536
537 class repo_unknown(repo_generic):
538     def load(self, pool):
539         print "unsupported repo '%s': skipped" % self.name
540         return False
541
542 class repo_system(repo_generic):
543     def load(self, pool):
544         self.handle = pool.add_repo(self.name)
545         self.handle.appdata = self
546         pool.installed = self.handle
547         print "rpm database:",
548         self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
549         if self.usecachedrepo(None):
550             print "cached"
551             return True
552         print "reading"
553         self.handle.add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
554         self.handle.add_rpmdb(None, Repo.REPO_REUSE_REPODATA)
555         self.writecachedrepo(None)
556         return True
557
558 class repo_cmdline(repo_generic):
559     def load(self, pool):
560         self.handle = pool.add_repo(self.name)
561         self.handle.appdata = self 
562         return True
563
564 def validarch(pool, arch):
565     if not arch:
566         return False
567     id = pool.str2id(arch, False)
568     if not id:
569         return False
570     return pool.isknownarch(id)
571
572 def limitjobs(pool, jobs, flags, evrstr):
573     njobs = []
574     evr = pool.str2id(evrstr)
575     for j in jobs:
576         how = j.how
577         sel = how & Job.SOLVER_SELECTMASK
578         what = pool.rel2id(j.what, evr, flags)
579         if flags == solv.REL_ARCH:
580             how |= Job.SOLVER_SETARCH
581         elif flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
582             if evrstr.find('-') >= 0:
583                 how |= Job.SOLVER_SETEVR
584             else:
585                 how |= Job.SOLVER_SETEV
586         njobs.append(pool.Job(how, what))
587     return njobs
588
589 def limitjobs_evrarch(pool, jobs, flags, evrstr):
590     m = re.match(r'(.+)\.(.+?)$', evrstr)
591     if m and validarch(pool, m.group(2)):
592         jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
593         evrstr = m.group(1)
594     return limitjobs(pool, jobs, flags, evrstr)
595
596 def mkjobs_filelist(pool, cmd, arg):
597     if re.search(r'[[*?]', arg):
598         type = Dataiterator.SEARCH_GLOB
599     else:
600         type = Dataiterator.SEARCH_STRING
601     if cmd == 'erase':
602         di = pool.installed.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
603     else:
604         di = pool.Dataiterator(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
605     matches = []
606     for d in di:
607         s = d.solvable
608         if s and s.installable():
609             matches.append(s.id)
610             di.skip_solvable()  # one match is enough
611     if matches:
612         print "[using file list match for '%s']" % arg
613         if len(matches) > 1:
614             return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
615         else:
616             return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
617     return []
618
619 def mkjobs_rel(pool, cmd, name, rel, evr):
620     flags = 0
621     if rel.find('<') >= 0: flags |= solv.REL_LT
622     if rel.find('=') >= 0: flags |= solv.REL_EQ 
623     if rel.find('>') >= 0: flags |= solv.REL_GT
624     jobs = depglob(pool, name, True, True)
625     if jobs:
626         return limitjobs(pool, jobs, flags, evr)
627     m = re.match(r'(.+)\.(.+?)$', name)
628     if m and validarch(pool, m.group(2)):
629         jobs = depglob(pool, m.group(1), True, True)
630         if jobs:
631             jobs = limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
632             return limitjobs(pool, jobs, flags, evr)
633     return []
634
635 def mkjobs_nevra(pool, cmd, arg):
636     jobs = depglob(pool, arg, True, True)
637     if jobs:
638         return jobs
639     m = re.match(r'(.+)\.(.+?)$', arg)
640     if m and validarch(pool, m.group(2)):
641         jobs = depglob(pool, m.group(1), True, True)
642         if jobs:
643             return limitjobs(pool, jobs, solv.REL_ARCH, m.group(2))
644     m = re.match(r'(.+)-(.+?)$', arg)
645     if m:
646         jobs = depglob(pool, m.group(1), True, False)
647         if jobs:
648             return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
649     m = re.match(r'(.+)-(.+?-.+?)$', arg)
650     if m:
651         jobs = depglob(pool, m.group(1), True, False)
652         if jobs:
653             return limitjobs_evrarch(pool, jobs, solv.REL_EQ, m.group(2))
654     return []
655
656 def mkjobs(pool, cmd, arg):
657     if len(arg) and arg[0] == '/':
658         jobs = mkjobs_filelist(pool, cmd, arg)
659         if jobs:
660             return jobs
661     m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
662     if m:
663         return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
664     else:
665         return mkjobs_nevra(pool, cmd, arg)
666             
667 def depglob(pool, name, globname, globdep):
668     id = pool.str2id(name, False)
669     if id:
670         match = False
671         for s in pool.whatprovides(id):
672             if globname and s.nameid == id:
673                 return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
674             match = True
675         if match:
676             if globname and globdep:
677                 print "[using capability match for '%s']" % name
678             return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
679     if not re.search(r'[[*?]', name):
680         return []
681     if globname:
682         # try name glob
683         idmatches = {}
684         for d in pool.Dataiterator(0, solv.SOLVABLE_NAME, name, Dataiterator.SEARCH_GLOB):
685             s = d.solvable
686             if s.installable():
687                 idmatches[s.nameid] = True
688         if idmatches:
689             return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
690     if globdep:
691         # try dependency glob
692         idmatches = pool.matchprovidingids(name, Dataiterator.SEARCH_GLOB)
693         if idmatches:
694             print "[using capability match for '%s']" % name
695             return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches) ]
696     return []
697     
698
699 def load_stub(repodata):
700     repo = repodata.repo.appdata
701     if repo:
702         return repo.load_ext(repodata)
703     return False
704
705
706 parser = OptionParser(usage="usage: solv.py [options] COMMAND")
707 (options, args) = parser.parse_args()
708 if not args:
709     parser.print_help(sys.stderr)
710     sys.exit(1)
711
712 cmd = args[0]
713 args = args[1:]
714 if cmd == 'li':
715     cmd = 'list'
716 if cmd == 'in':
717     cmd = 'install'
718 if cmd == 'rm':
719     cmd = 'erase'
720 if cmd == 've':
721     cmd = 'verify'
722 if cmd == 'se':
723     cmd = 'search'
724
725
726 # read all repo configs
727 repos = []
728 for reposdir in ["/etc/zypp/repos.d"]:
729     if not os.path.isdir(reposdir):
730         continue
731     for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
732         cfg = INIConfig(open(reponame))
733         for alias in cfg:
734             repoattr = {'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm-md', 'metadata_expire': 900}
735             for k in cfg[alias]:
736                 repoattr[k] = cfg[alias][k]
737             if 'mirrorlist' in repoattr and 'metalink' not in repoattr:
738                 if repoattr['mirrorlist'].find('/metalink'):
739                     repoattr['metalink'] = repoattr['mirrorlist']
740                     del repoattr['mirrorlist']
741             if repoattr['type'] == 'rpm-md':
742                 repo = repo_repomd(alias, 'repomd', repoattr)
743             elif repoattr['type'] == 'yast2':
744                 repo = repo_susetags(alias, 'susetags', repoattr)
745             else:
746                 repo = repo_unknown(alias, 'unknown', repoattr)
747             repos.append(repo)
748
749 pool = solv.Pool()
750 pool.setarch(os.uname()[4])
751 pool.set_loadcallback(load_stub)
752
753 # now load all enabled repos into the pool
754 sysrepo = repo_system('@System', 'system')
755 sysrepo.load(pool)
756 for repo in repos:
757     if int(repo['enabled']):
758         repo.load(pool)
759     
760 if cmd == 'search':
761     matches = {}
762     di = pool.Dataiterator(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
763     for d in di:
764         matches[d.solvid] = True
765     for solvid in sorted(matches.keys()):
766         print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
767     sys.exit(0)
768
769 cmdlinerepo = None
770 if cmd == 'list' or cmd == 'info' or cmd == 'install':
771     for arg in args:
772         if arg.endswith(".rpm") and os.access(arg, os.R_OK):
773             if not cmdlinerepo:
774                 cmdlinerepo = repo_cmdline('@commandline', 'cmdline')
775                 cmdlinerepo.load(pool)
776                 cmdlinerepo['packages'] = {}
777             cmdlinerepo['packages'][arg] = cmdlinerepo.handle.add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
778     if cmdlinerepo:
779         cmdlinerepo.handle.internalize()
780
781 addedprovides = pool.addfileprovides_ids()
782 if addedprovides:
783     sysrepo.updateaddedprovides(addedprovides)
784     for repo in repos:
785         repo.updateaddedprovides(addedprovides)
786
787 pool.createwhatprovides()
788
789 # convert arguments into jobs
790 jobs = []
791 for arg in args:
792     if cmdlinerepo and arg in cmdlinerepo['packages']:
793         jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
794     else:
795         njobs = mkjobs(pool, cmd, arg)
796         if not njobs:
797             print "nothing matches '%s'" % arg
798             sys.exit(1)
799         jobs += njobs
800
801 if cmd == 'list' or cmd == 'info':
802     if not jobs:
803         print "no package matched."
804         sys.exit(1)
805     for job in jobs:
806         for s in job.solvables():
807             if cmd == 'info':
808                 print "Name:        %s" % s
809                 print "Repo:        %s" % s.repo
810                 print "Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
811                 str = s.lookup_str(solv.SOLVABLE_URL)
812                 if str:
813                     print "Url:         %s" % str
814                 str = s.lookup_str(solv.SOLVABLE_LICENSE)
815                 if str:
816                     print "License:     %s" % str
817                 print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
818                 print
819             else:
820                 print "  - %s [%s]" % (s, s.repo)
821                 print "    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
822     sys.exit(0)
823
824 if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == 'verify':
825     if not jobs:
826         if cmd == 'up' or cmd == 'verify':
827             jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
828         elif cmd == 'dup':
829             pass
830         else:
831             print "no package matched."
832             sys.exit(1)
833     for job in jobs:
834         if cmd == 'up':
835             # up magic: use install instead of update if no installed package matches
836             if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), job.solvables()):
837                 job.how |= Job.SOLVER_UPDATE
838             else:
839                 job.how |= Job.SOLVER_INSTALL
840         elif cmd == 'install':
841             job.how |= Job.SOLVER_INSTALL
842         elif cmd == 'erase':
843             job.how |= Job.SOLVER_ERASE
844         elif cmd == 'dup':
845             job.how |= Job.SOLVER_DISTUPGRADE
846         elif cmd == 'verify':
847             job.how |= Job.SOLVER_VERIFY
848
849     #pool.set_debuglevel(2)
850     solver = None
851     while True:
852         solver = pool.Solver()
853         solver.ignorealreadyrecommended = True
854         if cmd == 'erase':
855             solver.allowuninstall = True
856         if cmd == 'dup' and not jobs:
857             solver.distupgrade = True
858             solver.updatesystem = True
859             solver.allowdowngrade = True
860             solver.allowvendorchange = True
861             solver.allowarchchange = True
862             solver.dosplitprovides = True
863         if cmd == 'up' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_UPDATE | Job.SOLVER_SOLVABLE_ALL):
864             solver.dosplitprovides = True
865         problems = solver.solve(jobs)
866         if not problems:
867             break
868         for problem in problems:
869             print "Problem %d:" % problem.id
870             r = problem.findproblemrule()
871             ri = r.info()
872             print ri.problemstr()
873             solutions = problem.solutions()
874             for solution in solutions:
875                 print "  Solution %d:" % solution.id
876                 elements = solution.elements()
877                 for element in elements:
878                     etype = element.type
879                     if etype == Solver.SOLVER_SOLUTION_JOB:
880                         print "  - do not ask to", jobs[element.jobidx]
881                     elif etype == Solver.SOLVER_SOLUTION_INFARCH:
882                         if element.solvable.isinstalled():
883                             print "  - keep %s despite the inferior architecture" % element.solvable
884                         else:
885                             print "  - install %s despite the inferior architecture" % element.solvable
886                     elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
887                         if element.solvable.isinstalled():
888                             print "  - keep obsolete %s" % element.solvable
889                         else:
890                             print "  - install %s from excluded repository" % element.solvable
891                     elif etype == Solver.SOLVER_SOLUTION_REPLACE:
892                         illegal = element.illegalreplace()
893                         if illegal & solver.POLICY_ILLEGAL_DOWNGRADE:
894                             print "  - allow downgrade of %s to %s" % (element.solvable, element.replacement)
895                         if illegal & solver.POLICY_ILLEGAL_ARCHCHANGE:
896                             print "  - allow architecture change of %s to %s" % (element.solvable, element.replacement)
897                         if illegal & solver.POLICY_ILLEGAL_VENDORCHANGE:
898                             if element.replacement.vendorid:
899                                 print "  - allow vendor change from '%s' (%s) to '%s' (%s)" % (element.solvable.vendor, element.solvable, element.replacement.vendor, element.replacement)
900                             else:
901                                 print "  - allow vendor change from '%s' (%s) to no vendor (%s)" % (element.solvable.vendor, element.solvable, element.replacement)
902                         if illegal == 0:
903                             print "  - allow replacement of %s with %s" % (element.solvable, element.replacement)
904                     elif etype == Solver.SOLVER_SOLUTION_ERASE:
905                         print "  - allow deinstallation of %s" % element.solvable
906                 print
907             sol = ''
908             while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
909                 sys.stdout.write("Please choose a solution: ")
910                 sys.stdout.flush()
911                 sol = sys.stdin.readline().strip()
912             if sol == 's':
913                 continue        # skip problem
914             if sol == 'q':
915                 sys.exit(1)
916             solution = solutions[int(sol) - 1]
917             for element in solution.elements():
918                 etype = element.type
919                 if etype == Solver.SOLVER_SOLUTION_JOB:
920                     jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
921                 else:
922                     newjob = element.Job()
923                     if newjob and newjob not in jobs:
924                         jobs.append(newjob)
925                         
926     # no problems, show transaction
927     trans = solver.transaction()
928     del solver
929     if trans.isempty():
930         print "Nothing to do."
931         sys.exit(0)
932     print
933     print "Transaction summary:"
934     print
935     for cl in trans.classify():
936         if cl.type == Transaction.SOLVER_TRANSACTION_ERASE:
937             print "%d erased packages:" % cl.count
938         elif cl.type == Transaction.SOLVER_TRANSACTION_INSTALL:
939             print "%d installed packages:" % cl.count
940         elif cl.type == Transaction.SOLVER_TRANSACTION_REINSTALLED:
941             print "%d reinstalled packages:" % cl.count
942         elif cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
943             print "%d downgraded packages:" % cl.count
944         elif cl.type == Transaction.SOLVER_TRANSACTION_CHANGED:
945             print "%d changed packages:" % cl.count
946         elif cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED:
947             print "%d upgraded packages:" % cl.count
948         elif cl.type == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
949             print "%d vendor changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
950         elif cl.type == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
951             print "%d arch changes from '%s' to '%s':" % (cl.count, pool.id2str(cl.fromid), pool.id2str(cl.toid))
952         else:
953             continue
954         for p in cl.solvables():
955             if cl.type == Transaction.SOLVER_TRANSACTION_UPGRADED or cl.type == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
956                 op = trans.othersolvable(p)
957                 print "  - %s -> %s" % (p, op)
958             else:
959                 print "  - %s" % p
960         print
961     print "install size change: %d K" % trans.calc_installsizechange()
962     print
963     
964 # vim: sw=4 et
965     while True:
966         sys.stdout.write("OK to continue (y/n)? ")
967         sys.stdout.flush()
968         yn = sys.stdin.readline().strip()
969         if yn == 'y': break
970         if yn == 'n': sys.exit(1)
971     newpkgs = trans.newpackages()
972     newpkgsfp = {}
973     if newpkgs:
974         downloadsize = 0
975         for p in newpkgs:
976             downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
977         print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
978         for p in newpkgs:
979             repo = p.repo.appdata
980             location, medianr = p.lookup_location()
981             if not location:
982                 continue
983             if repo.type == 'commandline':
984                 f = solv.xfopen(location)
985                 if not f:
986                     sys.exit("\n%s: %s not found" % location)
987                 newpkgsfp[p.id] = f
988                 continue
989             if not sysrepo.handle.isempty() and os.access('/usr/bin/applydeltarpm', os.X_OK):
990                 pname = p.name
991                 di = p.repo.Dataiterator(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
992                 di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
993                 for d in di:
994                     d.setpos_parent()
995                     if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
996                         continue
997                     baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
998                     candidate = None
999                     for installedp in pool.whatprovides(p.nameid):
1000                         if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
1001                             candidate = installedp
1002                     if not candidate:
1003                         continue
1004                     seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
1005                     st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
1006                     if st:
1007                         continue
1008                     chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
1009                     if not chksum:
1010                         continue
1011                     dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
1012                     f = repo.download(dloc, False, chksum)
1013                     if not f:
1014                         continue
1015                     nf = tempfile.TemporaryFile()
1016                     nf = os.dup(nf.fileno())
1017                     st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
1018                     solv.xfclose(f)
1019                     os.lseek(nf, 0, os.SEEK_SET)
1020                     newpkgsfp[p.id] = solv.xfopen_fd("", nf)
1021                     break
1022                 if p.id in newpkgsfp:
1023                     sys.stdout.write("d")
1024                     sys.stdout.flush()
1025                     continue
1026                         
1027             if repo.type == 'susetags':
1028                 datadir = repo.handle.lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
1029                 if not datadir:
1030                     datadir = 'suse'
1031                 location = datadir + '/' + location
1032             chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
1033             f = repo.download(location, False, chksum)
1034             if not f:
1035                 sys.exit("\n%s: %s not found in repository" % (repo.name, location))
1036             newpkgsfp[p.id] = f
1037             sys.stdout.write(".")
1038             sys.stdout.flush()
1039         print
1040     print "Committing transaction:"
1041     print
1042     ts = rpm.TransactionSet('/')
1043     ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
1044     erasenamehelper = {}
1045     for p in trans.steps():
1046         type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
1047         if type == Transaction.SOLVER_TRANSACTION_ERASE:
1048             rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
1049             erasenamehelper[p.name] = p
1050             if not rpmdbid:
1051                 sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p)
1052             ts.addErase(rpmdbid)
1053         elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
1054             f = newpkgsfp[p.id]
1055             h = ts.hdrFromFdno(solv.xfileno(f))
1056             os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
1057             ts.addInstall(h, p, 'u')
1058         elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
1059             f = newpkgsfp[p.id]
1060             h = ts.hdrFromFdno(solv.xfileno(f))
1061             os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
1062             ts.addInstall(h, p, 'i')
1063     checkproblems = ts.check()
1064     if checkproblems:
1065         print checkproblems
1066         sys.exit("Sorry.")
1067     ts.order()
1068     def runCallback(reason, amount, total, p, d):
1069         if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
1070             return solv.xfileno(newpkgsfp[p.id])
1071         if reason == rpm.RPMCALLBACK_INST_START:
1072             print "install", p
1073         if reason == rpm.RPMCALLBACK_UNINST_START:
1074             # argh, p is just the name of the package
1075             if p in erasenamehelper:
1076                 p = erasenamehelper[p]
1077                 print "erase", p
1078     runproblems = ts.run(runCallback, '')
1079     if runproblems:
1080         print runproblems
1081         sys.exit(1)
1082     sys.exit(0)
1083
1084 print "unknown command", cmd
1085 sys.exit(1)