2 # Copyright (c) 2000 - 2016 Samsung Electronics Co., Ltd. All rights reserved.
5 # @author Chulwoo Shin <cw1.shin@samsung.com>
7 # Licensed under the Apache License, Version 2.0 (the "License");
8 # you may not use this file except in compliance with the License.
9 # You may obtain a copy of the License at
11 # http://www.apache.org/licenses/LICENSE-2.0
13 # Unless required by applicable law or agreed to in writing, software
14 # distributed under the License is distributed on an "AS IS" BASIS,
15 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 # See the License for the specific language governing permissions and
17 # limitations under the License.
26 from urlgrabber import grabber
27 from tic.utils.error import TICError, RepoError
28 from tic.utils import process
29 from tic.utils.file import copyfile
31 def myurlgrab2(url, filename):
32 logger = logging.getLogger(__name__)
33 if url.startswith("file:/"):
34 filepath = "/%s" % url.replace("file:", "").lstrip('/')
35 if not os.path.exists(filepath):
36 raise RepoError("URLGrabber error: can't find file %s" % url)
37 if url.endswith('.rpm'):
40 copyfile(filepath, filename)
41 logger.info('copy file ' + filepath)
44 with contextlib.closing(urllib2.urlopen(url)) as op:
45 with open(filename, 'w') as f:
47 logger.info('download file from ' + str(url))
48 except urllib2.HTTPError as err:
50 msg = 'The requested url was not found (%s)' % url
56 def myurlgrab(url, filename, proxies, progress_obj = None):
57 logger = logging.getLogger(__name__)
58 g = grabber.URLGrabber()
59 if url.startswith("file:/"):
60 filepath = "/%s" % url.replace("file:", "").lstrip('/')
61 if not os.path.exists(filepath):
62 raise RepoError("URLGrabber error: can't find file %s" % url)
63 if url.endswith('.rpm'):
66 # untouch repometadata in source path
67 process.run(['cp', '-f', filepath, filename])
68 logger.info('copy file ' + filepath)
71 # cast url to str here, sometimes it can be unicode,
72 # but pycurl only accept str
73 filename = g.urlgrab(url=str(url),
75 ssl_verify_host=False,
76 ssl_verify_peer=False,
78 http_headers=(('Pragma', 'no-cache'),),
80 progress_obj=progress_obj)
81 logger.info('download file from ' + str(url))
82 except grabber.URLGrabError as err:
84 msg = 'The requested url was not found (%s)' % url
93 if __name__ == '__main__':