--- /dev/null
+'''
+Created on Mar 7, 2017
+
+@author: shinchulwoo
+'''
+
+import os
+import threading
+from tic.utils.file import make_dirs, decompress_gzip, copyfile_flock
+from tic.utils.grabber import myurlgrab2
+import contextlib
+
+class MyThread(threading.Thread):
+ def __init__(self, name, url):
+ threading.Thread.__init__(self)
+ self.name = name
+ self.url = url
+ self.__suspend = False
+ self.__exit = False
+ self.__path = './test/thread/'
+ self.__cachepath = './test/thread/cache'
+ def run(self):
+ print('%s Thread start !!!' % self.name)
+ path=os.path.join(self.__path, self.name)
+ make_dirs(path)
+ filename=os.path.join(path, os.path.basename(self.url))
+ myurlgrab2(self.url, filename)
+ print('%s Thread download file !!!' % self.name)
+ # Check if file compressed or not
+ if filename.endswith(".gz"):
+ decompress_filename = os.path.splitext(filename)[0]
+ filename = decompress_gzip(filename, decompress_filename)
+ print('%s Thread decompress file !!!' % self.name)
+
+ cachefile = os.path.join(self.__cachepath, os.path.basename(filename))
+ copyfile_flock(filename, cachefile)
+ print('%s Thread finish !!!' % self.name)
+
+ def mySuspend(self):
+ self.__suspend = True
+ def myResume(self):
+ self.__suspend = False
+ def myExit(self):
+ self.__exit = True
+
+if __name__ == '__main__':
+ url = ''
+ th1 = MyThread('th1', url)
+ th1.start()
+# th2 = MyThread('th2', url)
+# th2.start()
\ No newline at end of file
import os
import logging
-import time
from tic.dependency import get_installed_packages
from tic.parser.recipe_parser import get_default_recipe, convert_recipe_to_yaml
from tic.repo import get_repodata_from_repos
from tic.pykickstarter import KSoption, kswriter
from tic.utils import process
+from tic.utils import misc
-
-DEFAULT_CACHEDIR='/var/tmp/tic-core/cached'
+DEFAULT_CACHEDIR='/var/tmp/tic-core'
DEFAULT_KICKSTARTDIR='/var/tmp/tic-core/kickstart'
-current_milli_time = lambda: int(round(time.time() * 1000))
-
def analyze(repo_list, recipe_list=None):
logger = logging.getLogger(__name__)
-
if not repo_list and not recipe_list:
raise TICError('No repositories defined')
-
repos = []
recipe = None
#TODO Repository check
if recipe_list and recipe_list[0] == 'default':
recipe = get_default_recipe()
for repo_url in recipe.get('Repositories'):
- repos.append({'name': repo_url.get('Name'),
+ repos.append({'name': repo_url.get('Name'),
'url': repo_url.get('Url')})
else:
number=1
for repo_url in repo_list:
- repos.append({'name': 'repository_%d' % number,
+ repos.append({'name': 'repository_%d' % number,
'url': repo_url})
number = number + 1
-
- start_time = current_milli_time()
+ start_time = misc.get_timestamp()
#Download repodata from repositories (Remote/Local)
repoinfo = get_repodata_from_repos(repos, DEFAULT_CACHEDIR)
- logger.info('time to get repodata from repo: %d ms', current_milli_time() - start_time)
-
- start_time = current_milli_time()
+ logger.info('time to get repodata from repo: %d ms', misc.get_timestamp() - start_time)
+
+ start_time = misc.get_timestamp()
# Parse the xml files for the analysis of package (.rpm)
repo_parser = RepodataParser('armv7l', repoinfo)
pkg_group = repo_parser.parse()
logger.info('packages: %d, provides: %d, files: %d', len(pkg_group['pkg_dict']), len(pkg_group['provides']), len(pkg_group['files']))
- logger.info('time to parse repodata: %d ms', current_milli_time() - start_time)
-
- start_time = current_milli_time()
+ logger.info('time to parse repodata: %d ms', misc.get_timestamp() - start_time)
+
+ start_time = misc.get_timestamp()
# Make a data for TIC (Tizen image creation)
view_data = make_view_data(pkg_group)
# analyze install-dependency
inst_packages = get_installed_packages(recipe, repoinfo, pkg_group)
logger.info('installed package: %d', len(inst_packages))
- logger.info('time to analyze dependency: %d ms', current_milli_time() - start_time)
-
-
- start_time = current_milli_time()
-
+ logger.info('time to analyze dependency: %d ms', misc.get_timestamp() - start_time)
+
result = {'view': view_data,
'data': {'packages': pkg_group.get('pkg_dict'),
'provides': pkg_group.get('provides'),
'conflicts': pkg_group.get('conflicts')},
'repos': repos,
'defaultpackages': inst_packages}
-
return result
def exports(export_type, recipe, packages, outdir, filename=None):
logger = logging.getLogger(__name__)
-
#TODO validation should be checked before request
if not export_type:
export_type='ks'
logger.info('set default export format(.ks)')
-
+
if not recipe:
raise TICError('No recipe defined')
if not packages or type(packages) is not list:
raise TICError('No packages defined')
-
+
#TODO recipe parsing
# Temporary code for 1st prototype release
if recipe.get('name') == 'default':
Options='--ssl_verify=no')
],
Partitions=[
- dict(Name='mobile-mbr',
- Contents='part / --fstype="ext4" --size=3584 --ondisk=sda --active --label platform --fsoptions=defaults,noatime')
+ dict(Name='headless',
+ Contents='part / --size=2000 --ondisk mmcblk0p --fstype=ext4 --label=rootfs --extoptions=\"-J size=16\" \
+ part /opt/ --size=1000 --ondisk mmcblk0p --fstype=ext4 --label=system-data --extoptions="-m 0" \
+ part /boot/kernel/mod_tizen_tm1/lib/modules --size=12 --ondisk mmcblk0p --fstype=ext4 --label=modules')
]
)
return recipe
from tic.utils import file
from tic.utils import process
from tic.utils.error import TICError
-from tic.utils.grabber import myurlgrab
+from tic.utils.grabber import myurlgrab2
+from tic.utils import misc
-
-def _get_uncompressed_data_from_url(url, filename, proxies):
+def _get_uncompressed_data_from_url(url, filename, proxies=None):
# download file
- filename = myurlgrab(url, filename, proxies)
+ filename = myurlgrab2(url, filename)
# Check if file compressed or not
if filename.endswith(".gz"):
decompress_filename = os.path.splitext(filename)[0]
elif filename.endswith(".bz2"):
process.run(['bunzip2', "-f", filename])
filename = os.path.splitext(filename)[0]
-
return filename
-def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
+def _get_metadata_from_repo(baseurl, proxies, tempdir, cachedir, reponame, filehref,
sumtype=None, checksum=None):
logger = logging.getLogger(__name__)
- url = os.path.join(baseurl, filename)
- filename_tmp = str("%s/%s" % (cachedir, os.path.basename(filename)))
+ url = os.path.join(baseurl, filehref)
+ filename_tmp = str("%s/%s" % (cachedir, os.path.basename(filehref)))
if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
filename = os.path.splitext(filename_tmp)[0]
else:
filename = filename_tmp
-
if sumtype and checksum and os.path.exists(filename):
if sumtype == 'sha256':
file_checksum = hashlib.sha256(open(filename, 'rb').read()).hexdigest()
sumcmd = "%ssum" % sumtype
result = process.run([sumcmd, filename])[1].strip()
file_checksum = result.split()[0]
-
+ # use cached file
if file_checksum and file_checksum == checksum:
- logger.info('use a cache file - ' + str(url))
+ logger.info('use a cache file - ' + str(filename))
return filename
- return _get_uncompressed_data_from_url(url, filename_tmp, proxies)
-
+ temp_file = os.path.join(tempdir, os.path.basename(filehref))
+ file_path =_get_uncompressed_data_from_url(url, temp_file, proxies)
+ return file.copyfile_flock(file_path, filename)
def get_repodata_from_repos(repos, cachedir):
my_repodata = []
+ temp_path = os.path.join(cachedir, 'temp', str(misc.get_timestamp()))
for repo in repos:
reponame = repo.get('name')
baseurl = repo.get('url')
- cache_dir = os.path.join(cachedir, base64.urlsafe_b64encode(baseurl))
- cache_file = os.path.join(cache_dir, 'repomd.xml')
-
- # make directory for caching
- file.make_dirs(cache_dir)
-
- #TODO: support proxy
+
+ # make temp_dir
+ base64url = base64.urlsafe_b64encode(baseurl)
+ temp_dir = os.path.join(temp_path, base64url);
+ repomd_file = os.path.join(temp_dir, 'repomd.xml')
+ file.make_dirs(temp_dir);
+
+ #TODO: support proxy
url = os.path.join(baseurl, 'repodata/repomd.xml')
- repomd = myurlgrab(url, cache_file, None)
-
+ repomd = myurlgrab2(url, repomd_file)
+
try:
tree = etree.parse(repomd)
root = tree.getroot()
except etree.XMLSyntaxError:
raise TICError("repomd.xml syntax error.")
+ # make cache_dir
+ repo_checksum = hashlib.sha256(open(repomd_file, 'rb').read()).hexdigest();
+ cache_dir = os.path.join(cachedir, 'cached', base64url, repo_checksum)
+ file.make_dirs(cache_dir)
+
ns = root.tag
ns = ns[0:ns.rindex("}")+1]
continue
filepaths[item] = _get_metadata_from_repo(baseurl,
None,
+ temp_dir,
cache_dir,
reponame,
filepaths[item],
sumtypes[item],
checksums[item])
-
my_repodata.append({"name":reponame,
"baseurl":baseurl,
"repomd":repomd,
"proxies":None,
"patterns":filepaths['patterns'],
"comps":filepaths['comps']})
-
return my_repodata
RepoType = collections.namedtuple('Repo', 'name, url')
def Repo(name, baseurl):
- return RepoType(name, baseurl)
-
-if __name__ == '__main__':
- repo_url_1 = 'https://download.tizen.org/snapshots/tizen/base/latest/repos/arm64/packagesaaa'
- repo_url_2 = 'https://download.tizen.org/snapshots/tizen/mobile/latest/repos/arm64-wayland/packages'
- repos = []
- repos.append(Repo('repo_1', repo_url_1))
- repos.append(Repo('repo_2', repo_url_2))
- cachedir = '/var/tmp/tic-core/cached'
- repodata = get_repodata_from_repos(repos, cachedir)
- print(repodata)
-
-
-
+ return RepoType(name, baseurl)
\ No newline at end of file
import logging
from tic import command
from tic.utils import error
-#from flask_cors import CORS
app = Flask(__name__)
-#CORS(app)
@app.route('/')
def index():
import os
import shutil
-def make_dirs(dirname):
+class FileLockException(Exception):
+ pass
+class FileLock(object):
+ def __init__(self, file_name, timeout=10, delay=.05):
+ self.is_locked = False
+ self.lockfile = os.path.join(os.getcwd(), "%s.lock" % file_name)
+ self.file_name = file_name
+ self.timeout = timeout
+ self.delay = delay
+ def acquire(self):
+ self.fd = os.open(self.lockfile, os.O_CREAT|os.O_EXCL|os.O_RDWR)
+ self.is_locked = True
+ def release(self):
+ if self.is_locked:
+ os.close(self.fd)
+ os.unlink(self.lockfile)
+ self.is_locked = False
+ def __enter__(self):
+ if not self.is_locked:
+ self.acquire()
+ return self
+ def __exit__(self, type, value, traceback):
+ if self.is_locked:
+ self.release()
+ def __del__(self):
+ self.release()
+
+def make_dirs(path):
try:
- os.makedirs(dirname)
+ os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST:
raise
make_dirs(os.path.dirname(path))
with(open(path, 'w')) as f:
f.write(data)
-
+
def decompress_gzip(intput_path, output_path):
with(gzip.open(intput_path, 'rb')) as fobj:
f = open(output_path, 'wb')
f.close()
return output_path
+def copyfile_flock(src, dest):
+ ret = dest;
+ try:
+ with FileLock(dest):
+ shutil.copy(src, dest)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ print(e)
+ ret = src
+ return ret
+
def copyfile(src, dst, filename=None):
abs_dst=os.path.abspath(os.path.expanduser(dst))
make_dirs(abs_dst)
import os
import logging
+import urllib2
+import contextlib
from urlgrabber import grabber
from tic.utils.error import TICError, RepoError
from tic.utils import process
+from tic.utils.file import copyfile
+def myurlgrab2(url, filename):
+ logger = logging.getLogger(__name__)
+ if url.startswith("file:/"):
+ filepath = "/%s" % url.replace("file:", "").lstrip('/')
+ if not os.path.exists(filepath):
+ raise RepoError("URLGrabber error: can't find file %s" % url)
+ if url.endswith('.rpm'):
+ return filepath
+ else:
+ copyfile(filepath, filename)
+ logger.info('copy file ' + filepath)
+ else:
+ try:
+ with contextlib.closing(urllib2.urlopen(url)) as op:
+ with open(filename, 'w') as f:
+ f.write(op.read())
+ logger.info('download file from ' + str(url))
+ except urllib2.HTTPError as err:
+ if err.code == 404:
+ msg = 'The requested url was not found (%s)' % url
+ else:
+ msg = str(err)
+ raise TICError(msg)
+ return filename
def myurlgrab(url, filename, proxies, progress_obj = None):
logger = logging.getLogger(__name__)
g = grabber.URLGrabber()
-
if url.startswith("file:/"):
filepath = "/%s" % url.replace("file:", "").lstrip('/')
if not os.path.exists(filepath):
return filename
if __name__ == '__main__':
- # file url
- full_url = 'file://home/shinchulwoo/project/tic_view.json'
- filename = '/var/tmp/tic_view.json'
- myurlgrab(full_url, filename, None)
- # http url
- full_url = 'https://download.tizen.org/snapshots/tizen/mobile/latest/repos/arm64-wayland/packages/repodata/repomd.xml'
- filename = '/var/tmp/repomd.xml'
- myurlgrab(full_url, filename, None)
-
-
+ pass
\ No newline at end of file
--- /dev/null
+import time
+
+get_timestamp = lambda: int(round(time.time() * 1000))
\ No newline at end of file