From: wangbiao Date: Thu, 21 Mar 2024 11:28:49 +0000 (+0900) Subject: apply for python3.x code X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fbbfd4c79c4a56ab388328f5ab3edf950a904c20;p=tools%2Fmic.git apply for python3.x code Change-Id: Ib9d4a5e9bee9afeb06da3d8b9bdee5826d0c710a Signed-off-by: wangbiao --- fbbfd4c79c4a56ab388328f5ab3edf950a904c20 diff --cc mic/imager/loop.py index 70932ab,ed0dced..897e211 --- a/mic/imager/loop.py +++ b/mic/imager/loop.py @@@ -428,7 -428,103 +428,6 @@@ class LoopImageCreator(BaseImageCreator return env - def _stage_final_image(self): - - if self.pack_to or self.shrink_image: - self._resparse(0) - else: - self._resparse() - - for item in self._instloops: - imgfile = os.path.join(self._imgdir, item['name']) - - if item['aft_fstype'] in list(AFTER_MNT_FS.keys()): - mountpoint = misc.mkdtemp() - ext4img = os.path.join(self._imgdir, item['name']) - runner.show('mount -t ext4 %s %s' % (ext4img, mountpoint)) - runner.show('ls -al %s' % (mountpoint)) -# item['loop'].mount(None, 'not_create') -# point_mnt = os.path.join(self._instroot, item['mountpoint'].lstrip('/')) - - fs_suffix = AFTER_MNT_FS[item['aft_fstype']] - if item['aft_fstype'] == "squashfs": -# fs.mksquashfs(mountpoint, self._outdir+"/"+item['label']+fs_suffix) - args = "mksquashfs " + mountpoint + " " + self._imgdir+"/"+item['label']+fs_suffix - if item['squashfsopts']: - squashfsopts=item['squashfsopts'].replace(',', ' ') - runner.show("mksquashfs --help") - runner.show("%s %s" % (args, squashfsopts)) - else: - runner.show("%s " % args) - - if item['squashfsoptions_maxsize']: - squashfsoptions_maxsize=int(item['squashfsoptions_maxsize']) * 1024 * 1024 - imgsize = os.stat(self._imgdir+"/"+item['label']+fs_suffix).st_size - if imgsize > squashfsoptions_maxsize: - msger.error("squashfs img size is too large (%d > %d)" % (imgsize, squashfsoptions_maxsize)) - sys.exit() - - if item['aft_fstype'] == "vdfs": - ##FIXME temporary code - replace this with fs.mkvdfs() - if item['vdfsopts']: - vdfsopts=item['vdfsopts'].replace(',', ' ') - else: - vdfsopts="-i -z 1024M" - - fullpathmkvdfs = "mkfs.vdfs" #find_binary_path("mkfs.vdfs") - runner.show("%s --help" % fullpathmkvdfs) -# fs.mkvdfs(mountpoint, self._outdir+"/"+item['label']+fs_suffix, vdfsopts) - ret = runner.show('%s %s -r %s %s' % (fullpathmkvdfs, vdfsopts, mountpoint, self._imgdir+"/"+item['label']+fs_suffix)) - if ret != 0: - runner.show("mkfs.vdfs return error") - raise VdfsError("' %s' exited with error (%d)" % (fullpathmkvdfs, ret)) - - runner.show('umount %s' % mountpoint) -# os.unlink(mountpoint) - runner.show('mv %s %s' % (self._imgdir+"/"+item['label']+fs_suffix, self._imgdir+"/"+item['label']+".img") ) - runner.show('ls -al %s' % self._imgdir) - - if item['fstype'] == "ext4": - if not item['cpioopts']: - runner.show('/sbin/tune2fs -O ^huge_file,extents,uninit_bg %s ' - % imgfile) - runner.quiet(["/sbin/e2fsck", "-f", "-y", imgfile]) - self.image_files.setdefault('partitions', {}).update( - {item['mountpoint']: item['label']}) - if self.compress_image: - compressing(imgfile, self.compress_image) - self.image_files.setdefault('image_files', []).append( - '.'.join([item['name'], self.compress_image])) - else: - self.image_files.setdefault('image_files', []).append(item['name']) - - for item in os.listdir(self._imgdir): - imgfile = os.path.join(self._imgdir, item) - imgsize = os.path.getsize(imgfile) - msger.info("filesystem size of %s : %s bytes" % (item, imgsize)) - - self.run_sign_scripts() - if not self.pack_to: - for item in os.listdir(self._imgdir): - shutil.move(os.path.join(self._imgdir, item), - os.path.join(self._outdir, item)) - else: - msger.info("Pack all loop images together to %s" % self.pack_to) - dstfile = os.path.join(self._outdir, self.pack_to) - packing(dstfile, self._imgdir) - self.image_files['image_files'] = [self.pack_to] - - - if self.pack_to: - mountfp_xml = os.path.splitext(self.pack_to)[0] - mountfp_xml = misc.strip_end(mountfp_xml, '.tar') + ".xml" - else: - mountfp_xml = self.name + ".xml" - # save mount points mapping file to xml - save_mountpoints(os.path.join(self._outdir, mountfp_xml), - self._instloops, - self.target_arch) -- def copy_attachment(self): if not hasattr(self, '_attachment') or not self._attachment: return diff --cc mic/utils/misc.py index ef3e9db,17136e7..5b17e8c --- a/mic/utils/misc.py +++ b/mic/utils/misc.py @@@ -703,7 -699,85 +699,6 @@@ def get_arch(repometadata) return uniq_arch, archlist -def get_package(pkg, repometadata, arch = None): - ver = "" - priority = 99 - target_repo = None - if not arch: - arches = [] - elif arch not in rpmmisc.archPolicies: - arches = [arch] - else: - arches = rpmmisc.archPolicies[arch].split(':') - arches.append('noarch') - - for repo in repometadata: - if repo["primary"].endswith(".xml"): - root = xmlparse(repo["primary"]) - ns = root.getroot().tag - ns = ns[0:ns.rindex("}")+1] - for elm in root.iter("%spackage" % ns): - if elm.find("%sname" % ns).text == pkg and elm.find("%sarch" % ns).text in arches: - if repo["priority"] != None: - tmpprior = int(repo["priority"]) - if tmpprior < priority: - priority = tmpprior - location = elm.find("%slocation" % ns) - pkgpath = "%s" % location.attrib['href'] - target_repo = repo - break - elif tmpprior > priority: - break - version = elm.find("%sversion" % ns) - tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel']) - if tmpver > ver: - ver = tmpver - location = elm.find("%slocation" % ns) - pkgpath = "%s" % location.attrib['href'] - target_repo = repo - break - if repo["primary"].endswith(".sqlite"): - con = sqlite.connect(repo["primary"]) - if arch: - sql = 'select version, release, location_href from packages ' \ - 'where name = "%s" and arch IN ("%s")' % \ - (pkg, '","'.join(arches)) - for row in con.execute(sql): - tmpver = "%s-%s" % (row[0], row[1]) - if tmpver > ver: - ver = tmpver - pkgpath = "%s" % row[2] - target_repo = repo - break - else: - sql = 'select version, release, location_href from packages ' \ - 'where name = "%s"' % pkg - for row in con.execute(sql): - tmpver = "%s-%s" % (row[0], row[1]) - if tmpver > ver: - ver = tmpver - pkgpath = "%s" % row[2] - target_repo = repo - break - con.close() - if target_repo: - makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"])) - url = target_repo["baseurl"].join(pkgpath) - filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath))) - if os.path.exists(filename): - ret = rpmmisc.checkRpmIntegrity('rpm', filename) - if ret == 0: - return filename - - msger.warning("package %s is damaged: %s" % - (os.path.basename(filename), filename)) - os.unlink(filename) - - pkg = myurlgrab(url.full, filename, target_repo["proxies"]) - return pkg - else: - return None -- def get_source_name(pkg, repometadata): def get_bin_name(pkg): diff --cc mic/utils/partitionedfs.py index 4ef8d9f,ddd29ee..bb2dc07 --- a/mic/utils/partitionedfs.py +++ b/mic/utils/partitionedfs.py @@@ -230,9 -399,100 +230,9 @@@ class PartitionedMount(Mount) del gpt_parser - def __map_partitions(self): - """Load it if dm_snapshot isn't loaded. """ - load_module("dm_snapshot") - - for dev in list(self.disks.keys()): - d = self.disks[dev] - if d['mapped']: - continue - - msger.debug("Running kpartx on %s" % d['disk'].device ) - rc, kpartx_output = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device]) - kpartx_output = kpartx_output.splitlines() - - if rc != 0: - raise MountError("Failed to query partition mapping for '%s'" % - d['disk'].device) - - # Strip trailing blank and mask verbose output - i = 0 - while i < len(kpartx_output) and kpartx_output[i][0:4] != "loop": - i = i + 1 - kpartx_output = kpartx_output[i:] - - # Make sure kpartx reported the right count of partitions - if len(kpartx_output) != d['numpart']: - # If this disk has more than 3 partitions, then in case of MBR - # paritions there is an extended parition. Different versions - # of kpartx behave differently WRT the extended partition - - # some map it, some ignore it. This is why we do the below hack - # - if kpartx reported one more partition and the partition - # table type is "msdos" and the amount of partitions is more - # than 3, we just assume kpartx mapped the extended parition - # and we remove it. - if len(kpartx_output) == d['numpart'] + 1 \ - and d['ptable_format'] == 'msdos' and len(kpartx_output) > 3: - kpartx_output.pop(3) - else: - raise MountError("Unexpected number of partitions from " \ - "kpartx: %d != %d" % \ - (len(kpartx_output), d['numpart'])) - - for i in range(len(kpartx_output)): - line = kpartx_output[i] - newdev = line.split()[0] - mapperdev = "/dev/mapper/" + newdev - loopdev = d['disk'].device + newdev[-1] - - msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev)) - pnum = d['partitions'][i] - self.partitions[pnum]['device'] = loopdev - self.partitions[pnum]['mapper_device'] = mapperdev - - # grub's install wants partitions to be named - # to match their parent device + partition num - # kpartx doesn't work like this, so we add compat - # symlinks to point to /dev/mapper - if os.path.lexists(loopdev): - os.unlink(loopdev) - os.symlink(mapperdev, loopdev) - - msger.debug("Adding partx mapping for %s" % d['disk'].device) - rc = runner.show([self.kpartx, "-v", "-sa", d['disk'].device]) - - if rc != 0: - # Make sure that the device maps are also removed on error case. - # The d['mapped'] isn't set to True if the kpartx fails so - # failed mapping will not be cleaned on cleanup either. - runner.quiet([self.kpartx, "-sd", d['disk'].device]) - raise MountError("Failed to map partitions for '%s'" % - d['disk'].device) - - for p in self.partitions: - if p['mapper_device'] and os.path.islink(p['mapper_device']): - p['mpath_device'] = resolve_ref(p['mapper_device']) - else: - p['mpath_device'] = '' - - # FIXME: need a better way to fix the latency - import time - time.sleep(1) - - if not os.path.exists(mapperdev): - # load mapper device if not updated - runner.quiet([self.dmsetup, "mknodes"]) - # still not updated, roll back - if not os.path.exists(mapperdev): - runner.quiet([self.kpartx, "-sd", d['disk'].device]) - raise MountError("Failed to load mapper devices for '%s'" % - d['disk'].device) - - d['mapped'] = True def __unmap_partitions(self): - for dev in self.disks.keys(): + for dev in list(self.disks.keys()): d = self.disks[dev] if not d['mapped']: continue diff --cc mic/utils/proxy.py index 27773ae,af6ab3e..baaab01 --- a/mic/utils/proxy.py +++ b/mic/utils/proxy.py @@@ -98,8 -98,72 +98,8 @@@ def _isip(host) return True return False -def _set_noproxy_list(): - global _my_noproxy, _my_noproxy_list - _my_noproxy_list = [] - if not _my_noproxy: - return - - #solve in /etc/enviroment contains command like `echo 165.xxx.xxx.{1..255} | sed 's/ /,/g'`` - _my_noproxy_bak = _my_noproxy - start = _my_noproxy.find("`") - while(start < len(_my_noproxy) and start != -1): - start = _my_noproxy.find("`",start) - end = _my_noproxy.find("`",start+1) - cmd = _my_noproxy[start+1:end] - pstr = _my_noproxy[start:end+1] - start = end + 1 - - _my_noproxy=_my_noproxy.replace(pstr,len(pstr)*" ") - try: - c_result = os.popen(cmd).readlines() - if len(c_result) == 0: - continue - except Exception as e: - msger.warning(str(e)) - continue - to_list = c_result[0].strip("\n").split(",") - _my_noproxy_list.extend(to_list) - - for item in _my_noproxy.split(","): - item = item.strip() - if not item: - continue - - if item[0] != '.' and item.find("/") == -1: - # Need to match it - _my_noproxy_list.append({"match":0, "needle":item}) - - elif item[0] == '.': - # Need to match at tail - _my_noproxy_list.append({"match":1, "needle":item}) - - elif item.find("/") > 3: - # IP/MASK, need to match at head - needle = item[0:item.find("/")].strip() - ip = _ip_to_int(needle) - netmask = 0 - mask = item[item.find("/")+1:].strip() - - if mask.isdigit(): - netmask = int(mask) - netmask = ~((1<<(32-netmask)) - 1) - ip &= netmask - else: - shift = 24 - netmask = 0 - for dec in mask.split("."): - if not dec.isdigit(): - continue - netmask |= int(dec) << shift - shift -= 8 - ip &= netmask - - _my_noproxy_list.append({"match":2, "needle":ip, "netmask":netmask}) - _my_noproxy = _my_noproxy_bak - def _isnoproxy(url): - host = urlparse.urlparse(url)[1] + host = urllib.parse.urlparse(url)[1] # urlparse.urlparse(url) returns (scheme, host, path, parm, query, frag) if '@' in host: diff --cc plugins/imager/fs_plugin.py index 371fddc,d08e3ff..95a9fdc mode 100755,100644..100644 --- a/plugins/imager/fs_plugin.py +++ b/plugins/imager/fs_plugin.py diff --cc plugins/imager/loop_plugin.py index 6796b03,dc0bfd9..2b38c70 mode 100755,100644..100644 --- a/plugins/imager/loop_plugin.py +++ b/plugins/imager/loop_plugin.py diff --cc plugins/imager/qcow_plugin.py index 399abf7,2329958..399abf7 mode 100755,100644..100644 --- a/plugins/imager/qcow_plugin.py +++ b/plugins/imager/qcow_plugin.py diff --cc plugins/imager/raw_plugin.py index 524c378,e5b35dc..d54961c mode 100755,100644..100644 --- a/plugins/imager/raw_plugin.py +++ b/plugins/imager/raw_plugin.py