faeb817872b2292ffc9d921b56ba2cb47746b708
[archive/20170607/tools/tic-core.git] / tic / repo.py
1 #!/usr/bin/python
2 # Copyright (c) 2000 - 2016 Samsung Electronics Co., Ltd. All rights reserved.
3 #
4 # Contact: 
5 # @author Chulwoo Shin <cw1.shin@samsung.com>
6
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
10 #
11 # http://www.apache.org/licenses/LICENSE-2.0
12 #
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
18 #
19 # Contributors:
20 # - S-Core Co., Ltd
21
22 import logging
23 import os
24 import base64
25 import hashlib
26 import collections
27 from lxml import etree
28 from tic.utils import file
29 from tic.utils import process
30 from tic.utils.error import TICError
31 from tic.utils.grabber import myurlgrab
32
33
34 def _get_uncompressed_data_from_url(url, filename, proxies):
35     # download file
36     filename = myurlgrab(url, filename, proxies)
37     # Check if file compressed or not
38     if filename.endswith(".gz"):
39         decompress_filename = os.path.splitext(filename)[0]
40         filename = file.decompress_gzip(filename, decompress_filename)
41     elif filename.endswith(".bz2"):
42         process.run(['bunzip2', "-f", filename])
43         filename = os.path.splitext(filename)[0]
44         
45     return filename
46
47 def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
48                             sumtype=None, checksum=None):
49     logger = logging.getLogger(__name__)
50     url = os.path.join(baseurl, filename)
51     filename_tmp = str("%s/%s" % (cachedir, os.path.basename(filename)))
52     if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
53         filename = os.path.splitext(filename_tmp)[0]
54     else:
55         filename = filename_tmp
56         
57     if sumtype and checksum and os.path.exists(filename):
58         if sumtype == 'sha256':
59             file_checksum = hashlib.sha256(open(filename, 'rb').read()).hexdigest()
60         elif sumtype == 'md5':
61             file_checksum = hashlib.md5(open(filename, 'rb').read()).hexdigest()
62         else:
63             sumcmd = "%ssum" % sumtype
64             result = process.run([sumcmd, filename])[1].strip()
65             file_checksum = result.split()[0]
66
67         if file_checksum and file_checksum == checksum:
68             logger.info('use a cache file - ' + str(url))
69             return filename
70
71     return _get_uncompressed_data_from_url(url, filename_tmp, proxies)
72
73
74 def get_repodata_from_repos(repos, cachedir):
75     my_repodata = []
76     for repo in repos:
77         reponame = repo.get('name')
78         baseurl = repo.get('url')
79         cache_dir = os.path.join(cachedir, base64.urlsafe_b64encode(baseurl))
80         cache_file = os.path.join(cache_dir, 'repomd.xml')
81         
82         # make directory for caching
83         file.make_dirs(cache_dir)
84         
85         #TODO: support proxy 
86         url = os.path.join(baseurl, 'repodata/repomd.xml')
87         repomd = myurlgrab(url, cache_file, None)
88         
89         try:
90             tree = etree.parse(repomd)
91             root = tree.getroot()
92         except etree.XMLSyntaxError:
93             raise TICError("repomd.xml syntax error.")
94
95         ns = root.tag
96         ns = ns[0:ns.rindex("}")+1]
97
98         filepaths = {}
99         checksums = {}
100         sumtypes = {}
101
102         for elm in root.findall("%sdata" % ns):
103             if elm.attrib['type'] == 'patterns':
104                 filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href']
105                 checksums['patterns'] = elm.find("%sopen-checksum" % ns).text
106                 sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type']
107             elif elm.attrib['type'] == 'group':
108                 filepaths['comps'] = elm.find("%slocation" % ns).attrib['href']
109                 checksums['comps'] = elm.find("%sopen-checksum" % ns).text
110                 sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type']
111             elif elm.attrib["type"] == 'primary':
112                 filepaths['primary'] = elm.find("%slocation" % ns).attrib['href']
113                 checksums['primary'] = elm.find("%sopen-checksum" % ns).text
114                 sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type']
115
116         for item in ("primary", "patterns", "comps"):
117             if item not in filepaths:
118                 filepaths[item] = None
119                 continue
120             filepaths[item] = _get_metadata_from_repo(baseurl,
121                                                       None,
122                                                       cache_dir,
123                                                       reponame,
124                                                       filepaths[item],
125                                                       sumtypes[item],
126                                                       checksums[item])
127  
128         my_repodata.append({"name":reponame,
129                             "baseurl":baseurl,
130                             "repomd":repomd,
131                             "primary":filepaths['primary'],
132                             "cachedir":cache_dir,
133                             "proxies":None,
134                             "patterns":filepaths['patterns'],
135                             "comps":filepaths['comps']})
136         
137     return my_repodata
138
139
140 RepoType = collections.namedtuple('Repo', 'name, url')
141 def Repo(name, baseurl):
142     return RepoType(name, baseurl)
143
144 if __name__ == '__main__':
145     repo_url_1 = 'https://download.tizen.org/snapshots/tizen/base/latest/repos/arm64/packagesaaa'
146     repo_url_2 = 'https://download.tizen.org/snapshots/tizen/mobile/latest/repos/arm64-wayland/packages'
147     repos = []
148     repos.append(Repo('repo_1', repo_url_1))
149     repos.append(Repo('repo_2', repo_url_2))
150     cachedir = '/var/tmp/tic-core/cached'
151     repodata = get_repodata_from_repos(repos, cachedir)
152     print(repodata)
153     
154     
155