- commit current state of bindings
[platform/upstream/libsolv.git] / examples / pysolv
1 #!/usr/bin/python
2
3 #
4 # Copyright (c) 2011, Novell Inc.
5 #
6 # This program is licensed under the BSD license, read LICENSE.BSD
7 # for further information
8 #
9
10 # pysolv a little software installer demoing the sat solver library/bindings
11
12 # things it does:
13 # - understands globs for package names / dependencies
14 # - understands .arch suffix
15 # - repository data caching
16 # - on demand loading of secondary repository data
17 # - checksum verification
18 # - deltarpm support
19 # - installation of commandline packages
20 #
21 # things not yet ported:
22 # - gpg verification
23 # - file conflicts
24 # - fastestmirror implementation
25 #
26 # things available in the library but missing from pysolv:
27 # - vendor policy loading
28 # - soft locks file handling
29 # - multi version handling
30
31 import sys
32 import os
33 import glob
34 import solv
35 import re
36 import tempfile
37 import time
38 import subprocess
39 import fnmatch
40 import rpm
41 from stat import *
42 from solv import Pool, Repo, Dataiterator, Job, Solver, Transaction
43 from iniparse import INIConfig
44 from optparse import OptionParser
45
46 #import gc
47 #gc.set_debug(gc.DEBUG_LEAK)
48
49 def calc_cookie_file(filename):
50     chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
51     chksum.add("1.1")
52     chksum.add_stat(filename)
53     return chksum.raw()
54
55 def calc_cookie_fp(fp):
56     chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
57     chksum.add_fp(fp)
58     return chksum.raw()
59
60 class generic_repo(dict):
61     def __init__(self, attribs):
62         for k in attribs:
63             self[k] = attribs[k]
64
65     def cachepath(self, ext = None):
66         path = re.sub(r'^\.', '_', self['alias'])
67         if ext:
68             path += "_" + ext + ".solvx"
69         else:
70             path += ".solv"
71         return "/var/cache/solv/" + re.sub(r'[/]', '_', path)
72         
73     def load(self, pool):
74         self['handle'] = pool.add_repo(repo['alias'])
75         self['handle'].appdata = repo
76         self['handle'].priority = 99 - repo['priority']
77         if self['autorefresh']:
78             dorefresh = True
79         if dorefresh:
80             try:
81                 st = os.stat(self.cachepath())
82                 if time.time() - st[ST_MTIME] < self['metadata_expire']:
83                     dorefresh = False
84             except OSError, e:
85                 pass
86         self['cookie'] = ''
87         if not dorefresh and self.usecachedrepo(None):
88             print "repo: '%s': cached" % self['alias']
89             return True
90         return self.load_if_changed()
91
92     def load_ext(repodata):
93         return False
94
95     def setfromurls(self, urls):
96         if not urls:
97             return
98         url = urls[0]
99         print "[using mirror %s]" % re.sub(r'^(.*?/...*?)/.*$', r'\1', url)
100         self['baseurl'] = url
101
102     def setfrommetalink(self, metalink):
103         nf = self.download(metalink, False, None)
104         if not nf:
105             return None
106         f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
107         solv.xfclose(nf)
108         urls = []
109         chksum = None
110         for l in f.readlines():
111             l = l.strip()
112             m = re.match(r'^<hash type="sha256">([0-9a-fA-F]{64})</hash>', l)
113             if m:
114                 chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256, m.group(1))
115             m = re.match(r'^<url.*>(https?://.+)repodata/repomd.xml</url>', l)
116             if m:
117                 urls.append(m.group(1))
118         if not urls:
119             chksum = None       # in case the metalink is about a different file
120         f.close()
121         self.setfromurls(urls)
122         return chksum
123         
124     def setfrommirrorlist(self, mirrorlist):
125         nf = self.download(mirrorlist, False, None)
126         if not nf:
127             return
128         f = os.fdopen(os.dup(solv.xfileno(nf)), 'r')
129         solv.xfclose(nf)
130         urls = []
131         for l in f.readline():
132             l = l.strip()
133             if l[0:6] == 'http://' or l[0:7] == 'https://':
134                 urls.append(l)
135         self.setfromurls(urls)
136         f.close()
137         
138     def download(self, file, uncompress, chksum, markincomplete=False):
139         url = None
140         if 'baseurl' not in self:
141             if 'metalink' in self:
142                 if file != self['metalink']:
143                     metalinkchksum = self.setfrommetalink(self['metalink'])
144                     if file == 'repodata/repomd.xml' and metalinkchksum and not chksum:
145                         chksum = metalinkchksum
146                 else:
147                     url = file
148             elif 'mirrorlist' in self:
149                 if file != self['mirrorlist']:
150                     self.setfrommirrorlist(self['mirrorlist'])
151                 else:
152                     url = file
153         if not url:
154             if 'baseurl' not in self:
155                 print "%s: no baseurl" % self['alias']
156                 return None
157             url = re.sub(r'/$', '', self['baseurl']) + '/' + file
158         f = tempfile.TemporaryFile()
159         st = subprocess.call(['curl', '-f', '-s', '-L', url], stdout=f.fileno())
160         if os.lseek(f.fileno(), 0, os.SEEK_CUR) == 0 and (st == 0 or not chksum):
161             return None
162         os.lseek(f.fileno(), 0, os.SEEK_SET)
163         if st:
164             print "%s: download error %d" % (file, st)
165             if markincomplete:
166                 self['incomplete'] = True
167             return None
168         if chksum:
169             fchksum = solv.Chksum(chksum.type)
170             if not fchksum:
171                 print "%s: unknown checksum type" % file
172                 if markincomplete:
173                     self['incomplete'] = True
174                 return None
175             fchksum.add_fd(f.fileno())
176             if not fchksum.matches(chksum):
177                 print "%s: checksum mismatch" % file
178                 if markincomplete:
179                     self['incomplete'] = True
180                 return None
181         if uncompress:
182             return solv.xfopen_fd(file, os.dup(f.fileno()))
183         return solv.xfopen_fd("", os.dup(f.fileno()))
184
185     def usecachedrepo(self, ext, mark=False):
186         if not ext:
187             cookie = self['cookie']
188         else:
189             cookie = self['extcookie']
190         handle = self['handle']
191         try: 
192             repopath = self.cachepath(ext)
193             f = open(repopath, 'r')
194             f.seek(-32, os.SEEK_END)
195             fcookie = f.read(32)
196             if len(fcookie) != 32:
197                 return False
198             if cookie and fcookie != cookie:
199                 return False
200             if self['alias'] != '@System' and not ext:
201                 f.seek(-32 * 2, os.SEEK_END)
202                 fextcookie = f.read(32)
203                 if len(fextcookie) != 32:
204                     return False
205             f.seek(0)
206             flags = 0
207             if ext:
208                 flags = Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES
209                 if ext != 'DL':
210                     flags |= Repo.REPO_LOCALPOOL
211             if not self['handle'].add_solv(f, flags):
212                 return False
213             if self['alias'] != '@System' and not ext:
214                 self['cookie'] = fcookie
215                 self['extcookie'] = fextcookie
216             if mark:
217                 # no futimes in python?
218                 try:
219                     os.utime(repopath, None)
220                 except Exception, e:
221                     pass
222         except IOError, e:
223             return False
224         return True
225
226     def genextcookie(self, f):
227         chksum = solv.Chksum(solv.REPOKEY_TYPE_SHA256)
228         chksum.add(self['cookie'])
229         if f:
230             stat = os.fstat(f.fileno())
231             chksum.add(str(stat[ST_DEV]))
232             chksum.add(str(stat[ST_INO]))
233             chksum.add(str(stat[ST_SIZE]))
234             chksum.add(str(stat[ST_MTIME]))
235         extcookie = chksum.raw()
236         # compatibility to c code
237         if ord(extcookie[0]) == 0:
238             extcookie[0] = chr(1)
239         self['extcookie'] = extcookie
240         
241     def writecachedrepo(self, ext, info=None):
242         try:
243             if not os.path.isdir("/var/cache/solv"):
244                 os.mkdir("/var/cache/solv", 0755)
245             (fd, tmpname) = tempfile.mkstemp(prefix='.newsolv-', dir='/var/cache/solv')
246             os.fchmod(fd, 0444)
247             f = os.fdopen(fd, 'w+')
248             if not info:
249                 self['handle'].write(f)
250             elif ext:
251                 info.write(f)
252             else:       # rewrite_repos case
253                 self['handle'].write_first_repodata(f)
254             if self['alias'] != '@System' and not ext:
255                 if 'extcookie' not in self:
256                     self.genextcookie(f)
257                 f.write(self['extcookie'])
258             if not ext:
259                 f.write(self['cookie'])
260             else:
261                 f.write(self['extcookie'])
262             f.close()
263             if self['handle'].iscontiguous():
264                 # switch to saved repo to activate paging and save memory
265                 nf = solv.xfopen(tmpname)
266                 if not ext:
267                     # main repo
268                     self['handle'].empty()
269                     if not self['handle'].add_solv(nf, Repo.SOLV_ADD_NO_STUBS):
270                         sys.exit("internal error, cannot reload solv file")
271                 else:
272                     # extension repodata
273                     # need to extend to repo boundaries, as this is how
274                     # info.write() has written the data
275                     info.extend_to_repo()
276                     # LOCALPOOL does not help as pool already contains all ids
277                     info.read_solv_flags(nf, Repo.REPO_EXTEND_SOLVABLES)
278                 solv.xfclose(nf)
279             os.rename(tmpname, self.cachepath(ext))
280         except IOError, e:
281             if tmpname:
282                 os.unlink(tmpname)
283                 
284     def updateaddedprovides(self, addedprovides):
285         if 'incomplete' in self:
286             return 
287         if 'handle' not in self:
288             return 
289         if not self['handle'].nsolvables:
290             return
291         # make sure there's just one real repodata with extensions
292         repodata = self['handle'].first_repodata()
293         if not repodata:
294             return
295         oldaddedprovides = repodata.lookup_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES)
296         if not set(addedprovides) <= set(oldaddedprovides):
297             for id in addedprovides:
298                 repodata.add_idarray(solv.SOLVID_META, solv.REPOSITORY_ADDEDFILEPROVIDES, id)
299             repodata.internalize()
300             self.writecachedrepo(None, repodata)
301
302 class repomd_repo(generic_repo):
303     def load_if_changed(self):
304         print "rpmmd repo '%s':" % self['alias'],
305         sys.stdout.flush()
306         f = self.download("repodata/repomd.xml", False, None, None)
307         if not f:
308             print "no repomd.xml file, skipped"
309             self['handle'].free(True)
310             del self['handle']
311             return False
312         self['cookie'] = calc_cookie_fp(f)
313         if self.usecachedrepo(None, True):
314             print "cached"
315             solv.xfclose(f)
316             return True
317         self['handle'].add_repomdxml(f, 0)
318         solv.xfclose(f)
319         print "fetching"
320         (filename, filechksum) = self.find('primary')
321         if filename:
322             f = self.download(filename, True, filechksum, True)
323             if f:
324                 self['handle'].add_rpmmd(f, None, 0)
325                 solv.xfclose(f)
326             if 'incomplete' in self:
327                 return False # hopeless, need good primary
328         (filename, filechksum) = self.find('updateinfo')
329         if filename:
330             f = self.download(filename, True, filechksum, True)
331             if f:
332                 self['handle'].add_updateinfoxml(f, 0)
333                 solv.xfclose(f)
334         self.add_exts()
335         if 'incomplete' not in self:
336             self.writecachedrepo(None)
337         # must be called after writing the repo
338         self['handle'].create_stubs()
339         return True
340
341     def find(self, what):
342         di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE, what, Dataiterator.SEARCH_STRING)
343         di.prepend_keyname(solv.REPOSITORY_REPOMD)
344         for d in di:
345             d.setpos_parent()
346             filename = d.pool.lookup_str(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_LOCATION)
347             chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.REPOSITORY_REPOMD_CHECKSUM)
348             if filename and not chksum:
349                 print "no %s file checksum!" % filename
350                 filename = None
351                 chksum = None
352             if filename:
353                 return (filename, chksum)
354         return (None, None)
355         
356     def add_ext(self, repodata, what, ext):
357         filename, chksum = self.find(what)
358         if not filename and what == 'deltainfo':
359             filename, chksum = self.find('prestodelta')
360         if not filename:
361             return
362         handle = repodata.new_handle()
363         repodata.set_poolstr(handle, solv.REPOSITORY_REPOMD_TYPE, what)
364         repodata.set_str(handle, solv.REPOSITORY_REPOMD_LOCATION, filename)
365         repodata.set_bin_checksum(handle, solv.REPOSITORY_REPOMD_CHECKSUM, chksum)
366         if what == 'deltainfo':
367             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOSITORY_DELTAINFO)
368             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_FLEXARRAY)
369         elif what == 'filelists':
370             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
371             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
372         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
373
374     def add_exts(self):
375         repodata = self['handle'].add_repodata(0)
376         self.add_ext(repodata, 'deltainfo', 'DL')
377         self.add_ext(repodata, 'filelists', 'FL')
378         repodata.internalize()
379     
380     def load_ext(self, repodata):
381         repomdtype = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_TYPE)
382         if repomdtype == 'filelists':
383             ext = 'FL'
384         elif repomdtype == 'deltainfo':
385             ext = 'DL'
386         else:
387             return False
388         sys.stdout.write("[%s:%s" % (self['alias'], ext))
389         if self.usecachedrepo(ext):
390             sys.stdout.write(" cached]\n")
391             sys.stdout.flush()
392             return True
393         sys.stdout.write(" fetching]\n")
394         sys.stdout.flush()
395         filename = repodata.lookup_str(solv.SOLVID_META, solv.REPOSITORY_REPOMD_LOCATION)
396         filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.REPOSITORY_REPOMD_CHECKSUM)
397         f = self.download(filename, True, filechksum)
398         if not f:
399             return False
400         if ext == 'FL':
401             self['handle'].add_rpmmd(f, 'FL', Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
402         elif ext == 'DL':
403             self['handle'].add_deltainfoxml(f, Repo.REPO_USE_LOADING)
404         solv.xfclose(f)
405         self.writecachedrepo(ext, repodata)
406         return True
407
408 class susetags_repo(generic_repo):
409     def load_if_changed(self):
410         print "susetags repo '%s':" % self['alias'],
411         sys.stdout.flush()
412         f = self.download("content", False, None, None)
413         if not f:
414             print "no content file, skipped"
415             self['handle'].free(True)
416             del self['handle']
417             return False
418         self['cookie'] = calc_cookie_fp(f)
419         if self.usecachedrepo(None, True):
420             print "cached"
421             solv.xfclose(f)
422             return True
423         self['handle'].add_content(f, 0)
424         solv.xfclose(f)
425         print "fetching"
426         defvendorid = self['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
427         descrdir = self['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
428         if not descrdir:
429             descrdir = "suse/setup/descr"
430         (filename, filechksum) = self.find('packages.gz')
431         if not filename:
432             (filename, filechksum) = self.find('packages')
433         if filename:
434             f = self.download(descrdir + '/' + filename, True, filechksum, True)
435             if f:
436                 self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.SUSETAGS_RECORD_SHARES)
437                 solv.xfclose(f)
438                 (filename, filechksum) = self.find('packages.en.gz')
439                 if not filename:
440                     (filename, filechksum) = self.find('packages.en')
441                 if filename:
442                     f = self.download(descrdir + '/' + filename, True, filechksum, True)
443                     if f:
444                         self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_NO_INTERNALIZE|Repo.REPO_REUSE_REPODATA|Repo.REPO_EXTEND_SOLVABLES)
445                         solv.xfclose(f)
446                 self['handle'].internalize()
447         self.add_exts()
448         if 'incomplete' not in self:
449             self.writecachedrepo(None)
450         # must be called after writing the repo
451         self['handle'].create_stubs()
452         return True
453
454     def find(self, what):
455         di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, what, Dataiterator.SEARCH_STRING)
456         di.prepend_keyname(solv.SUSETAGS_FILE)
457         for d in di:
458             d.setpos_parent()
459             chksum = d.pool.lookup_checksum(solv.SOLVID_POS, solv.SUSETAGS_FILE_CHECKSUM)
460             return (what, chksum)
461         return (None, None)
462
463     def add_ext(self, repodata, what, ext):
464         (filename, chksum) = self.find(what)
465         if not filename:
466             return
467         handle = repodata.new_handle()
468         repodata.set_str(handle, solv.SUSETAGS_FILE_NAME, filename)
469         if chksum:
470             repodata.set_bin_checksum(handle, solv.SUSETAGS_FILE_CHECKSUM, chksum)
471         if ext == 'DU':
472             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_DISKUSAGE)
473             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRNUMNUMARRAY)
474         elif ext == 'FL':
475             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.SOLVABLE_FILELIST)
476             repodata.add_idarray(handle, solv.REPOSITORY_KEYS, solv.REPOKEY_TYPE_DIRSTRARRAY)
477         else:
478             for langtag, langtagtype in [
479                 (solv.SOLVABLE_SUMMARY, solv.REPOKEY_TYPE_STR),
480                 (solv.SOLVABLE_DESCRIPTION, solv.REPOKEY_TYPE_STR),
481                 (solv.SOLVABLE_EULA, solv.REPOKEY_TYPE_STR),
482                 (solv.SOLVABLE_MESSAGEINS, solv.REPOKEY_TYPE_STR),
483                 (solv.SOLVABLE_MESSAGEDEL, solv.REPOKEY_TYPE_STR),
484                 (solv.SOLVABLE_CATEGORY, solv.REPOKEY_TYPE_ID)
485             ]:
486                 repodata.add_idarray(handle, solv.REPOSITORY_KEYS, self['handle'].pool.id2langid(langtag, ext, 1))
487                 repodata.add_idarray(handle, solv.REPOSITORY_KEYS, langtagtype)
488         repodata.add_flexarray(solv.SOLVID_META, solv.REPOSITORY_EXTERNAL, handle)
489         
490     def add_exts(self):
491         repodata = self['handle'].add_repodata(0)
492         di = self['handle'].dataiterator_new(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME, None, 0)
493         di.prepend_keyname(solv.SUSETAGS_FILE)
494         for d in di:
495             filename = d.match_str()
496             if not filename:
497                 continue
498             if filename[0:9] != "packages.":
499                 continue
500             if len(filename) == 11 and filename != "packages.gz":
501                 ext = filename[9:11]
502             elif filename[11:12] == ".":
503                 ext = filename[9:11]
504             else:
505                 continue
506             if ext == "en":
507                 continue
508             self.add_ext(repodata, filename, ext)
509         repodata.internalize()
510
511     def load_ext(self, repodata):
512         filename = repodata.lookup_str(solv.SOLVID_META, solv.SUSETAGS_FILE_NAME)
513         ext = filename[9:11]
514         sys.stdout.write("[%s:%s" % (self['alias'], ext))
515         if self.usecachedrepo(ext):
516             sys.stdout.write(" cached]\n")
517             sys.stdout.flush()
518             return True
519         sys.stdout.write(" fetching]\n")
520         sys.stdout.flush()
521         defvendorid = self['handle'].lookup_id(solv.SOLVID_META, solv.SUSETAGS_DEFAULTVENDOR)
522         descrdir = self['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DESCRDIR)
523         if not descrdir:
524             descrdir = "suse/setup/descr"
525         filechksum = repodata.lookup_checksum(solv.SOLVID_META, solv.SUSETAGS_FILE_CHECKSUM)
526         f = self.download(descrdir + '/' + filename, True, filechksum)
527         if not f:
528             return False
529         self['handle'].add_susetags(f, defvendorid, None, Repo.REPO_USE_LOADING|Repo.REPO_EXTEND_SOLVABLES)
530         solv.xfclose(f)
531         self.writecachedrepo(ext, repodata)
532         return True
533
534 class unknown_repo(generic_repo):
535     def load(self, pool):
536         print "unsupported repo '%s': skipped" % self['alias']
537         return False
538
539 class system_repo(generic_repo):
540     def load(self, pool):
541         self['handle'] = pool.add_repo(self['alias'])
542         self['handle'].appdata = self
543         pool.installed = self['handle']
544         print "rpm database:",
545         self['cookie'] = calc_cookie_file("/var/lib/rpm/Packages")
546         if self.usecachedrepo(None):
547             print "cached"
548             return True
549         print "reading"
550         self['handle'].add_products("/etc/products.d", Repo.REPO_NO_INTERNALIZE)
551         self['handle'].add_rpmdb(None)
552         self.writecachedrepo(None)
553         return True
554
555 class cmdline_repo(generic_repo):
556     def load(self, pool):
557         self['handle'] = pool.add_repo(self['alias'])
558         self['handle'].appdata = self 
559         return True
560
561 def validarch(pool, arch):
562     if not arch:
563         return False
564     id = pool.str2id(arch, False)
565     if not id:
566         return False
567     return pool.isknownarch(id)
568
569 def limitjobs(pool, jobs, flags, evr):
570     njobs = []
571     for j in jobs:
572         how = j.how
573         sel = how & Job.SOLVER_SELECTMASK
574         what = pool.rel2id(j.what, evr, flags)
575         if flags == solv.REL_ARCH:
576             how |= Job.SOLVER_SETARCH
577         elif flags == solv.REL_EQ and sel == Job.SOLVER_SOLVABLE_NAME:
578             if pool.id2str(evr).find('-') >= 0:
579                 how |= Job.SOLVER_SETEVR
580             else:
581                 how |= Job.SOLVER_SETEV
582         njobs.append(pool.Job(how, what))
583     return njobs
584
585 def limitjobs_arch(pool, jobs, flags, evr):
586     m = re.match(r'(.+)\.(.+?)$', evr)
587     if m and validarch(pool, m.group(2)):
588         jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
589         return limitjobs(pool, jobs, flags, pool.str2id(m.group(1)))
590     else:
591         return limitjobs(pool, jobs, flags, pool.str2id(evr))
592
593 def mkjobs_filelist(pool, cmd, arg):
594     if re.search(r'[[*?]', arg):
595         type = Dataiterator.SEARCH_GLOB
596     else:
597         type = Dataiterator.SEARCH_STRING
598     if cmd == 'erase':
599         di = pool.installed.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
600     else:
601         di = pool.dataiterator_new(0, solv.SOLVABLE_FILELIST, arg, type | Dataiterator.SEARCH_FILES|Dataiterator.SEARCH_COMPLETE_FILELIST)
602     matches = []
603     for d in di:
604         s = d.solvable
605         if s and s.installable():
606             matches.append(s.id)
607             di.skip_solvable()  # one match is enough
608     if matches:
609         print "[using file list match for '%s']" % arg
610         if len(matches) > 1:
611             return [ pool.Job(Job.SOLVER_SOLVABLE_ONE_OF, pool.towhatprovides(matches)) ]
612         else:
613             return [ pool.Job(Job.SOLVER_SOLVABLE | Job.SOLVER_NOAUTOSET, matches[0]) ]
614     return []
615
616 def mkjobs_rel(pool, cmd, name, rel, evr):
617     flags = 0
618     if rel.find('<') >= 0: flags |= solv.REL_LT
619     if rel.find('=') >= 0: flags |= solv.REL_EQ 
620     if rel.find('>') >= 0: flags |= solv.REL_GT
621     jobs = depglob(pool, name, True, True)
622     if jobs:
623         return limitjobs(pool, jobs, flags, pool.str2id(evr))
624     m = re.match(r'(.+)\.(.+?)$', name)
625     if m and validarch(pool, m.group(2)):
626         jobs = depglob(pool, m.group(1), True, True)
627         if jobs:
628             jobs = limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
629             return limitjobs(pool, jobs, flags, pool.str2id(evr))
630     return []
631
632 def mkjobs_nevra(pool, cmd, arg):
633     jobs = depglob(pool, arg, True, True)
634     if jobs:
635         return jobs
636     m = re.match(r'(.+)\.(.+?)$', arg)
637     if m and validarch(pool, m.group(2)):
638         jobs = depglob(pool, m.group(1), True, True)
639         if jobs:
640             return limitjobs(pool, jobs, solv.REL_ARCH, pool.str2id(m.group(2)))
641     m = re.match(r'(.+)-(.+?)$', arg)
642     if m:
643         jobs = depglob(pool, m.group(1), True, False)
644         if jobs:
645             return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
646     m = re.match(r'(.+)-(.+?-.+?)$', arg)
647     if m:
648         jobs = depglob(pool, m.group(1), True, False)
649         if jobs:
650             return limitjobs_arch(pool, jobs, solv.REL_EQ, m.group(2))
651     return []
652
653 def mkjobs(pool, cmd, arg):
654     if len(arg) and arg[0] == '/':
655         jobs = mkjobs_filelist(pool, cmd, arg)
656         if jobs:
657             return jobs
658     m = re.match(r'(.+?)\s*([<=>]+)\s*(.+?)$', arg)
659     if m:
660         return mkjobs_rel(pool, cmd, m.group(1), m.group(2), m.group(3))
661     else:
662         return mkjobs_nevra(pool, cmd, arg)
663             
664 def depglob(pool, name, globname, globdep):
665     id = pool.str2id(name, False)
666     if id:
667         match = False
668         for s in pool.providers(id):
669             if globname and s.nameid == id:
670                 return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) ]
671             match = True
672         if match:
673             if globname and globdep:
674                 print "[using capability match for '%s']" % name
675             return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) ]
676     if not re.search(r'[[*?]', name):
677         return []
678     if globname:
679         # try name glob
680         idmatches = {}
681         for d in pool.dataiterator_new(0, solv.SOLVABLE_NAME, name, Dataiterator.SEARCH_GLOB):
682             s = d.solvable
683             if s.installable():
684                 idmatches[s.nameid] = True
685         if idmatches:
686             return [ pool.Job(Job.SOLVER_SOLVABLE_NAME, id) for id in sorted(idmatches.keys()) ]
687     if globdep:
688         # try dependency glob
689         idmatches = pool.matchprovidingids(name, Dataiterator.SEARCH_GLOB)
690         if idmatches:
691             print "[using capability match for '%s']" % name
692             return [ pool.Job(Job.SOLVER_SOLVABLE_PROVIDES, id) for id in sorted(idmatches) ]
693     return []
694     
695
696 def load_stub(repodata):
697     repo = repodata.repo.appdata
698     if repo:
699         return repo.load_ext(repodata)
700     return False
701
702
703 parser = OptionParser(usage="usage: solv.py [options] COMMAND")
704 (options, args) = parser.parse_args()
705 if not args:
706     parser.print_help(sys.stderr)
707     sys.exit(1)
708
709 cmd = args[0]
710 args = args[1:]
711 if cmd == 'li':
712     cmd = 'list'
713 if cmd == 'in':
714     cmd = 'install'
715 if cmd == 'rm':
716     cmd = 'erase'
717 if cmd == 've':
718     cmd = 'verify'
719 if cmd == 'se':
720     cmd = 'search'
721
722
723 pool = solv.Pool()
724 pool.setarch(os.uname()[4])
725 pool.set_loadcallback(load_stub)
726
727 # read all repo configs
728 repos = []
729 for reposdir in ["/etc/zypp/repos.d"]:
730     if not os.path.isdir(reposdir):
731         continue
732     for reponame in sorted(glob.glob('%s/*.repo' % reposdir)):
733         cfg = INIConfig(open(reponame))
734         for alias in cfg:
735             repoattr = {'alias': alias, 'enabled': 0, 'priority': 99, 'autorefresh': 1, 'type': 'rpm-md', 'metadata_expire': 900}
736             for k in cfg[alias]:
737                 repoattr[k] = cfg[alias][k]
738             if 'mirrorlist' in repoattr and 'metalink' not in repoattr:
739                 if repoattr['mirrorlist'].find('/metalink'):
740                     repoattr['metalink'] = repoattr['mirrorlist']
741                     del repoattr['mirrorlist']
742             if repoattr['type'] == 'rpm-md':
743                 repo = repomd_repo(repoattr)
744             elif repoattr['type'] == 'yast2':
745                 repo = susetags_repo(repoattr)
746             else:
747                 repo = unknown_repo(repoattr)
748             repos.append(repo)
749
750 # now load all enabled repos into the pool
751 sysrepo = system_repo({ 'alias': '@System', 'type': 'system' })
752 sysrepo.load(pool)
753 for repo in repos:
754     if int(repo['enabled']):
755         repo.load(pool)
756     
757 if cmd == 'search':
758     matches = {}
759     di = pool.dataiterator_new(0, solv.SOLVABLE_NAME, args[0], Dataiterator.SEARCH_SUBSTRING|Dataiterator.SEARCH_NOCASE)
760     for d in di:
761         matches[d.solvid] = True
762     for solvid in sorted(matches.keys()):
763         print " - %s [%s]: %s" % (pool.solvid2str(solvid), pool.solvables[solvid].repo.name, pool.lookup_str(solvid, solv.SOLVABLE_SUMMARY))
764     sys.exit(0)
765
766 cmdlinerepo = None
767 if cmd == 'list' or cmd == 'info' or cmd == 'install':
768     for arg in args:
769         if arg.endswith(".rpm") and os.access(arg, os.R_OK):
770             if not cmdlinerepo:
771                 cmdlinerepo = cmdline_repo({ 'alias': '@commandline', 'type': 'commandline' })
772                 cmdlinerepo.load(pool)
773                 cmdlinerepo['packages'] = {}
774             cmdlinerepo['packages'][arg] = cmdlinerepo['handle'].add_rpm(arg, Repo.REPO_REUSE_REPODATA|Repo.REPO_NO_INTERNALIZE)
775     if cmdlinerepo:
776         cmdlinerepo['handle'].internalize()
777
778 addedprovides = pool.addfileprovides_ids()
779 if addedprovides:
780     sysrepo.updateaddedprovides(addedprovides)
781     for repo in repos:
782         repo.updateaddedprovides(addedprovides)
783
784 pool.createwhatprovides()
785
786 # convert arguments into jobs
787 jobs = []
788 for arg in args:
789     if cmdlinerepo and arg in cmdlinerepo['packages']:
790         jobs.append(pool.Job(Job.SOLVER_SOLVABLE, cmdlinerepo['packages'][arg]))
791     else:
792         njobs = mkjobs(pool, cmd, arg)
793         if not njobs:
794             print "nothing matches '%s'" % arg
795             sys.exit(1)
796         jobs += njobs
797
798 if cmd == 'list' or cmd == 'info':
799     if not jobs:
800         print "no package matched."
801         sys.exit(1)
802     for job in jobs:
803         for s in pool.jobsolvables(job):
804             if cmd == 'info':
805                 print "Name:        %s" % s.str()
806                 print "Repo:        %s" % s.repo.name
807                 print "Summary:     %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
808                 str = s.lookup_str(solv.SOLVABLE_URL)
809                 if str:
810                     print "Url:         %s" % str
811                 str = s.lookup_str(solv.SOLVABLE_LICENSE)
812                 if str:
813                     print "License:     %s" % str
814                 print "Description:\n%s" % s.lookup_str(solv.SOLVABLE_DESCRIPTION)
815                 print
816             else:
817                 print "  - %s [%s]" % (s.str(), s.repo.name)
818                 print "    %s" % s.lookup_str(solv.SOLVABLE_SUMMARY)
819     sys.exit(0)
820
821 if cmd == 'install' or cmd == 'erase' or cmd == 'up' or cmd == 'dup' or cmd == 'verify':
822     if not jobs:
823         if cmd == 'up' or cmd == 'verify':
824             jobs = [ pool.Job(Job.SOLVER_SOLVABLE_ALL, 0) ]
825         elif cmd == 'dup':
826             pass
827         else:
828             print "no package matched."
829             sys.exit(1)
830     for job in jobs:
831         if cmd == 'up':
832             # up magic: use install instead of update if no installed package matches
833             if job.how == Job.SOLVER_SOLVABLE_ALL or filter(lambda s: s.isinstalled(), pool.jobsolvables(job)):
834                 job.how |= Job.SOLVER_UPDATE
835             else:
836                 job.how |= Job.SOLVER_INSTALL
837         elif cmd == 'install':
838             job.how |= Job.SOLVER_INSTALL
839         elif cmd == 'erase':
840             job.how |= Job.SOLVER_ERASE
841         elif cmd == 'dup':
842             job.how |= Job.SOLVER_DISTUPGRADE
843         elif cmd == 'verify':
844             job.how |= Job.SOLVER_VERIFY
845
846     #pool.set_debuglevel(2)
847     solver = None
848     while True:
849         solver = pool.create_solver()
850         solver.ignorealreadyrecommended = True
851         if cmd == 'erase':
852             solver.allowuninstall = True
853         if cmd == 'dup' and not jobs:
854             solver.distupgrade = True
855             solver.updatesystem = True
856             solver.allowdowngrade = True
857             solver.allowvendorchange = True
858             solver.allowarchchange = True
859             solver.dosplitprovides = True
860         if cmd == 'up' and len(jobs) == 1 and jobs[0].how == (Job.SOLVER_UPDATE | Job.SOLVER_SOLVABLE_ALL):
861             solver.dosplitprovides = True
862         problems = solver.solve(jobs)
863         if not problems:
864             break
865         for problem in problems:
866             print "Problem %d:" % problem.id
867             r = problem.findproblemrule()
868             ri = r.info()
869             print ri.problemstr()
870             solutions = problem.solutions()
871             for solution in solutions:
872                 print "  Solution %d:" % solution.id
873                 elements = solution.elements()
874                 for element in elements:
875                     etype = element.type
876                     if etype == Solver.SOLVER_SOLUTION_JOB:
877                         print "  - do not ask to", jobs[element.jobidx].str()
878                     elif etype == Solver.SOLVER_SOLUTION_INFARCH:
879                         if element.solvable.isinstalled():
880                             print "  - keep %s despite the inferior architecture" % element.solvable.str()
881                         else:
882                             print "  - install %s despite the inferior architecture" % element.solvable.str()
883                     elif etype == Solver.SOLVER_SOLUTION_DISTUPGRADE:
884                         if element.solvable.isinstalled():
885                             print "  - keep obsolete %s" % element.solvable.str()
886                         else:
887                             print "  - install %s from excluded repository" % element.solvable.str()
888                     elif etype == Solver.SOLVER_SOLUTION_REPLACE:
889                         illegal = element.illegalreplace()
890                         if illegal & solver.POLICY_ILLEGAL_DOWNGRADE:
891                             print "  - allow downgrade of %s to %s" % (element.solvable.str(), element.replacement.str())
892                         if illegal & solver.POLICY_ILLEGAL_ARCHCHANGE:
893                             print "  - allow architecture change of %s to %s" % (element.solvable.str(), element.replacement.str())
894                         if illegal & solver.POLICY_ILLEGAL_VENDORCHANGE:
895                             if element.replacement.vendorid:
896                                 print "  - allow vendor change from '%s' (%s) to '%s' (%s)" % (element.solvable.vendor, element.solvable.str(), element.replacement.vendor, element.replacement.str())
897                             else:
898                                 print "  - allow vendor change from '%s' (%s) to no vendor (%s)" % (element.solvable.vendor, element.solvable.str(), element.replacement.str())
899                         if illegal == 0:
900                             print "  - allow replacement of %s with %s" % (element.solvable.str(), element.replacement.str())
901                     elif etype == Solver.SOLVER_SOLUTION_ERASE:
902                         print "  - allow deinstallation of %s" % element.solvable.str()
903             sol = ''
904             while not (sol == 's' or sol == 'q' or (sol.isdigit() and int(sol) >= 1 and int(sol) <= len(solutions))):
905                 sys.stdout.write("Please choose a solution: ")
906                 sys.stdout.flush()
907                 sol = sys.stdin.readline().strip()
908             if sol == 's':
909                 continue        # skip problem
910             if sol == 'q':
911                 sys.exit(1)
912             solution = solutions[int(sol) - 1]
913             for element in solution.elements():
914                 etype = element.type
915                 if etype == Solver.SOLVER_SOLUTION_JOB:
916                     jobs[element.jobidx] = pool.Job(Job.SOLVER_NOOP, 0)
917                 else:
918                     newjob = element.Job()
919                     if newjob:
920                         for job in jobs:
921                             if job.how == newjob.how and job.what == newjob.what:
922                                 newjob = None
923                                 break
924                         if newjob:
925                             jobs.append(newjob)
926     # no problems, show transaction
927     trans = solver.transaction()
928     del solver
929     if trans.isempty():
930         print "Nothing to do."
931         sys.exit(0)
932     print
933     print "Transaction summary:"
934     print
935     for ctype, pkgs, fromid, toid in trans.classify():
936         if ctype == Transaction.SOLVER_TRANSACTION_ERASE:
937             print "%d erased packages:" % len(pkgs)
938         elif ctype == Transaction.SOLVER_TRANSACTION_INSTALL:
939             print "%d installed packages:" % len(pkgs)
940         elif ctype == Transaction.SOLVER_TRANSACTION_REINSTALLED:
941             print "%d reinstalled packages:" % len(pkgs)
942         elif ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
943             print "%d downgraded packages:" % len(pkgs)
944         elif ctype == Transaction.SOLVER_TRANSACTION_CHANGED:
945             print "%d changed packages:" % len(pkgs)
946         elif ctype == Transaction.SOLVER_TRANSACTION_UPGRADED:
947             print "%d upgraded packages:" % len(pkgs)
948         elif ctype == Transaction.SOLVER_TRANSACTION_VENDORCHANGE:
949             print "%d vendor changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
950         elif ctype == Transaction.SOLVER_TRANSACTION_ARCHCHANGE:
951             print "%d arch changes from '%s' to '%s':" % (len(pkgs), pool.id2str(fromid), pool.id2str(toid))
952         else:
953             continue
954         for p in pkgs:
955             if ctype == Transaction.SOLVER_TRANSACTION_UPGRADED or ctype == Transaction.SOLVER_TRANSACTION_DOWNGRADED:
956                 op = trans.othersolvable(p)
957                 print "  - %s -> %s" % (p.str(), op.str())
958             else:
959                 print "  - %s" % p.str()
960         print
961     print "install size change: %d K" % trans.calc_installsizechange()
962     print
963     
964 # vim: sw=4 et
965     while True:
966         sys.stdout.write("OK to continue (y/n)? ")
967         sys.stdout.flush()
968         yn = sys.stdin.readline().strip()
969         if yn == 'y': break
970         if yn == 'n': sys.exit(1)
971     newpkgs = trans.newpackages()
972     newpkgsfp = {}
973     if newpkgs:
974         downloadsize = 0
975         for p in newpkgs:
976             downloadsize += p.lookup_num(solv.SOLVABLE_DOWNLOADSIZE)
977         print "Downloading %d packages, %d K" % (len(newpkgs), downloadsize)
978         for p in newpkgs:
979             repo = p.repo.appdata
980             location, medianr = p.lookup_location()
981             if not location:
982                 continue
983             if repo['type'] == 'commandline':
984                 f = solv.xfopen(location)
985                 if not f:
986                     sys.exit("\n%s: %s not found" % location)
987                 newpkgsfp[p.id] = f
988                 continue
989             if sysrepo['handle'].nsolvables and os.access('/usr/bin/applydeltarpm', os.X_OK):
990                 pname = p.name
991                 di = p.repo.dataiterator_new(solv.SOLVID_META, solv.DELTA_PACKAGE_NAME, pname, Dataiterator.SEARCH_STRING)
992                 di.prepend_keyname(solv.REPOSITORY_DELTAINFO)
993                 for d in di:
994                     d.setpos_parent()
995                     if pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_EVR) != p.evrid or pool.lookup_id(solv.SOLVID_POS, solv.DELTA_PACKAGE_ARCH) != p.archid:
996                         continue
997                     baseevrid = pool.lookup_id(solv.SOLVID_POS, solv.DELTA_BASE_EVR)
998                     candidate = None
999                     for installedp in pool.providers(p.nameid):
1000                         if installedp.isinstalled() and installedp.nameid == p.nameid and installedp.archid == p.archid and installedp.evrid == baseevrid:
1001                             candidate = installedp
1002                     if not candidate:
1003                         continue
1004                     seq = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_EVR) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_SEQ_NUM)
1005                     st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, '-c', '-s', seq])
1006                     if st:
1007                         continue
1008                     chksum = pool.lookup_checksum(solv.SOLVID_POS, solv.DELTA_CHECKSUM)
1009                     if not chksum:
1010                         continue
1011                     dloc = pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_DIR) + '/' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_NAME) + '-' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_EVR) + '.' + pool.lookup_str(solv.SOLVID_POS, solv.DELTA_LOCATION_SUFFIX)
1012                     f = repo.download(dloc, False, chksum)
1013                     if not f:
1014                         continue
1015                     nf = tempfile.TemporaryFile()
1016                     nf = os.dup(nf.fileno())
1017                     st = subprocess.call(['/usr/bin/applydeltarpm', '-a', p.arch, "/dev/fd/%d" % solv.xfileno(f), "/dev/fd/%d" % nf])
1018                     solv.xfclose(f)
1019                     os.lseek(nf, 0, os.SEEK_SET)
1020                     newpkgsfp[p.id] = solv.xfopen_fd("", nf)
1021                     break
1022                 if p.id in newpkgsfp:
1023                     sys.stdout.write("d")
1024                     sys.stdout.flush()
1025                     continue
1026                         
1027             if repo['type'] == 'yast2':
1028                 datadir = repo['handle'].lookup_str(solv.SOLVID_META, solv.SUSETAGS_DATADIR)
1029                 if not datadir:
1030                     datadir = 'suse'
1031                 location = datadir + '/' + location
1032             chksum = p.lookup_checksum(solv.SOLVABLE_CHECKSUM)
1033             f = repo.download(location, False, chksum)
1034             if not f:
1035                 sys.exit("\n%s: %s not found in repository" % (repo['alias'], location))
1036             newpkgsfp[p.id] = f
1037             sys.stdout.write(".")
1038             sys.stdout.flush()
1039         print
1040     print "Committing transaction:"
1041     print
1042     ts = rpm.TransactionSet('/')
1043     ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
1044     erasenamehelper = {}
1045     for p in trans.steps():
1046         type = trans.steptype(p, Transaction.SOLVER_TRANSACTION_RPM_ONLY)
1047         if type == Transaction.SOLVER_TRANSACTION_ERASE:
1048             rpmdbid = p.lookup_num(solv.RPM_RPMDBID)
1049             erasenamehelper[p.name] = p
1050             if not rpmdbid:
1051                 sys.exit("\ninternal error: installed package %s has no rpmdbid\n" % p.str())
1052             ts.addErase(rpmdbid)
1053         elif type == Transaction.SOLVER_TRANSACTION_INSTALL:
1054             f = newpkgsfp[p.id]
1055             h = ts.hdrFromFdno(solv.xfileno(f))
1056             os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
1057             ts.addInstall(h, p, 'u')
1058         elif type == Transaction.SOLVER_TRANSACTION_MULTIINSTALL:
1059             f = newpkgsfp[p.id]
1060             h = ts.hdrFromFdno(solv.xfileno(f))
1061             os.lseek(solv.xfileno(f), 0, os.SEEK_SET)
1062             ts.addInstall(h, p, 'i')
1063     checkproblems = ts.check()
1064     if checkproblems:
1065         print checkproblems
1066         sys.exit("Sorry.")
1067     ts.order()
1068     def runCallback(reason, amount, total, p, d):
1069         if reason == rpm.RPMCALLBACK_INST_OPEN_FILE:
1070             return solv.xfileno(newpkgsfp[p.id])
1071         if reason == rpm.RPMCALLBACK_INST_START:
1072             print "install", p.str()
1073         if reason == rpm.RPMCALLBACK_UNINST_START:
1074             # argh, p is just the name of the package
1075             if p in erasenamehelper:
1076                 p = erasenamehelper[p]
1077                 print "erase", p.str()
1078     runproblems = ts.run(runCallback, '')
1079     if runproblems:
1080         print runproblems
1081         sys.exit(1)
1082     sys.exit(0)
1083
1084 print "unknown command", cmd
1085 sys.exit(1)