apply for python3.x code
authorwangbiao <biao716.wang@samsung.com>
Thu, 21 Mar 2024 11:28:49 +0000 (20:28 +0900)
committerwangbiao <biao716.wang@samsung.com>
Thu, 21 Mar 2024 11:28:49 +0000 (20:28 +0900)
Change-Id: Ib9d4a5e9bee9afeb06da3d8b9bdee5826d0c710a
Signed-off-by: wangbiao <biao716.wang@samsung.com>
13 files changed:
1  2 
mic/3rdparty/pykickstart/parser.py
mic/imager/loop.py
mic/utils/fs_related.py
mic/utils/misc.py
mic/utils/partitionedfs.py
mic/utils/proxy.py
mic/utils/rpmmisc.py
plugins/backend/yumpkgmgr.py
plugins/backend/zypppkgmgr.py
plugins/imager/fs_plugin.py
plugins/imager/loop_plugin.py
plugins/imager/qcow_plugin.py
plugins/imager/raw_plugin.py

Simple merge
index 70932ab13c36615d11c7f34f3f837632d1f3e3bc,ed0dcedb415d59eced078b001ddf43f08298b528..897e211ad7c823e2962f375dca26c9a220484f5d
@@@ -428,7 -428,103 +428,6 @@@ class LoopImageCreator(BaseImageCreator
  
          return env
  
 -    def _stage_final_image(self):
 -
 -        if self.pack_to or self.shrink_image:
 -            self._resparse(0)
 -        else:
 -            self._resparse()
 -
 -        for item in self._instloops:
 -            imgfile = os.path.join(self._imgdir, item['name'])
 -
 -            if item['aft_fstype'] in list(AFTER_MNT_FS.keys()):
 -                mountpoint = misc.mkdtemp()
 -                ext4img = os.path.join(self._imgdir, item['name'])
 -                runner.show('mount -t ext4 %s %s' % (ext4img, mountpoint))
 -                runner.show('ls -al %s' % (mountpoint))
 -#                item['loop'].mount(None, 'not_create')
 -#                point_mnt = os.path.join(self._instroot, item['mountpoint'].lstrip('/'))
 -
 -                fs_suffix = AFTER_MNT_FS[item['aft_fstype']]
 -                if item['aft_fstype'] == "squashfs":
 -#                    fs.mksquashfs(mountpoint, self._outdir+"/"+item['label']+fs_suffix)
 -                    args = "mksquashfs " + mountpoint + " " + self._imgdir+"/"+item['label']+fs_suffix
 -                    if item['squashfsopts']:
 -                        squashfsopts=item['squashfsopts'].replace(',', ' ')
 -                        runner.show("mksquashfs --help")
 -                        runner.show("%s %s" % (args, squashfsopts))
 -                    else:
 -                        runner.show("%s " % args)
 -
 -                    if item['squashfsoptions_maxsize']:
 -                        squashfsoptions_maxsize=int(item['squashfsoptions_maxsize']) * 1024 * 1024
 -                        imgsize = os.stat(self._imgdir+"/"+item['label']+fs_suffix).st_size
 -                        if imgsize > squashfsoptions_maxsize:
 -                            msger.error("squashfs img size is too large (%d > %d)" % (imgsize, squashfsoptions_maxsize))
 -                            sys.exit()
 -
 -                if item['aft_fstype'] == "vdfs":
 -                    ##FIXME temporary code - replace this with fs.mkvdfs()
 -                    if item['vdfsopts']:
 -                        vdfsopts=item['vdfsopts'].replace(',', ' ')
 -                    else:
 -                        vdfsopts="-i -z 1024M"
 -
 -                    fullpathmkvdfs = "mkfs.vdfs" #find_binary_path("mkfs.vdfs")
 -                    runner.show("%s --help" % fullpathmkvdfs)
 -#                    fs.mkvdfs(mountpoint, self._outdir+"/"+item['label']+fs_suffix, vdfsopts)
 -                    ret = runner.show('%s %s -r %s %s' % (fullpathmkvdfs, vdfsopts, mountpoint, self._imgdir+"/"+item['label']+fs_suffix))
 -                    if ret != 0:
 -                        runner.show("mkfs.vdfs return error")
 -                        raise VdfsError("' %s' exited with error (%d)" % (fullpathmkvdfs, ret))
 -
 -                runner.show('umount %s' % mountpoint)
 -#               os.unlink(mountpoint)
 -                runner.show('mv %s %s' % (self._imgdir+"/"+item['label']+fs_suffix, self._imgdir+"/"+item['label']+".img") )
 -                runner.show('ls -al %s' % self._imgdir)
 -
 -            if item['fstype'] == "ext4":
 -                if not item['cpioopts']:
 -                    runner.show('/sbin/tune2fs -O ^huge_file,extents,uninit_bg %s '
 -                            % imgfile)
 -                    runner.quiet(["/sbin/e2fsck", "-f", "-y", imgfile])
 -            self.image_files.setdefault('partitions', {}).update(
 -                    {item['mountpoint']: item['label']})
 -            if self.compress_image:
 -                compressing(imgfile, self.compress_image)
 -                self.image_files.setdefault('image_files', []).append(
 -                                '.'.join([item['name'], self.compress_image]))
 -            else:
 -                self.image_files.setdefault('image_files', []).append(item['name'])
 -
 -        for item in os.listdir(self._imgdir):
 -            imgfile = os.path.join(self._imgdir, item)
 -            imgsize = os.path.getsize(imgfile)
 -            msger.info("filesystem size of %s : %s bytes" % (item, imgsize))
 -
 -        self.run_sign_scripts()
 -        if not self.pack_to:
 -            for item in os.listdir(self._imgdir):
 -                shutil.move(os.path.join(self._imgdir, item),
 -                            os.path.join(self._outdir, item))
 -        else:
 -            msger.info("Pack all loop images together to %s" % self.pack_to)
 -            dstfile = os.path.join(self._outdir, self.pack_to)
 -            packing(dstfile, self._imgdir)
 -            self.image_files['image_files'] = [self.pack_to]
 -
 -
 -        if self.pack_to:
 -            mountfp_xml = os.path.splitext(self.pack_to)[0]
 -            mountfp_xml = misc.strip_end(mountfp_xml, '.tar') + ".xml"
 -        else:
 -            mountfp_xml = self.name + ".xml"
 -        # save mount points mapping file to xml
 -        save_mountpoints(os.path.join(self._outdir, mountfp_xml),
 -                         self._instloops,
 -                         self.target_arch)
--
      def copy_attachment(self):
          if not hasattr(self, '_attachment') or not self._attachment:
              return
Simple merge
index ef3e9db5f317d2880a0b5ed80d9f9342a3e5e7b5,17136e71880a18da5cd17c1a8dfcc26c41fe08c4..5b17e8c3e3fd367cd3930ef3a5e708241ef75e19
@@@ -703,7 -699,85 +699,6 @@@ def get_arch(repometadata)
  
      return uniq_arch, archlist
  
 -def get_package(pkg, repometadata, arch = None):
 -    ver = ""
 -    priority = 99
 -    target_repo = None
 -    if not arch:
 -        arches = []
 -    elif arch not in rpmmisc.archPolicies:
 -        arches = [arch]
 -    else:
 -        arches = rpmmisc.archPolicies[arch].split(':')
 -        arches.append('noarch')
 -
 -    for repo in repometadata:
 -        if repo["primary"].endswith(".xml"):
 -            root = xmlparse(repo["primary"])
 -            ns = root.getroot().tag
 -            ns = ns[0:ns.rindex("}")+1]
 -            for elm in root.iter("%spackage" % ns):
 -                if elm.find("%sname" % ns).text == pkg and elm.find("%sarch" % ns).text in arches:
 -                    if repo["priority"] != None:
 -                        tmpprior = int(repo["priority"])
 -                        if tmpprior < priority:
 -                            priority = tmpprior
 -                            location = elm.find("%slocation" % ns)
 -                            pkgpath = "%s" % location.attrib['href']
 -                            target_repo = repo
 -                            break
 -                        elif tmpprior > priority:
 -                            break
 -                    version = elm.find("%sversion" % ns)
 -                    tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
 -                    if tmpver > ver:
 -                        ver = tmpver
 -                        location = elm.find("%slocation" % ns)
 -                        pkgpath = "%s" % location.attrib['href']
 -                        target_repo = repo
 -                    break
 -        if repo["primary"].endswith(".sqlite"):
 -            con = sqlite.connect(repo["primary"])
 -            if arch:
 -                sql = 'select version, release, location_href from packages ' \
 -                      'where name = "%s" and arch IN ("%s")' % \
 -                      (pkg, '","'.join(arches))
 -                for row in con.execute(sql):
 -                    tmpver = "%s-%s" % (row[0], row[1])
 -                    if tmpver > ver:
 -                        ver = tmpver
 -                        pkgpath = "%s" % row[2]
 -                        target_repo = repo
 -                    break
 -            else:
 -                sql = 'select version, release, location_href from packages ' \
 -                      'where name = "%s"' % pkg
 -                for row in con.execute(sql):
 -                    tmpver = "%s-%s" % (row[0], row[1])
 -                    if tmpver > ver:
 -                        ver = tmpver
 -                        pkgpath = "%s" % row[2]
 -                        target_repo = repo
 -                    break
 -            con.close()
 -    if target_repo:
 -        makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"]))
 -        url = target_repo["baseurl"].join(pkgpath)
 -        filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
 -        if os.path.exists(filename):
 -            ret = rpmmisc.checkRpmIntegrity('rpm', filename)
 -            if ret == 0:
 -                return filename
 -
 -            msger.warning("package %s is damaged: %s" %
 -                          (os.path.basename(filename), filename))
 -            os.unlink(filename)
 -
 -        pkg = myurlgrab(url.full, filename, target_repo["proxies"])
 -        return pkg
 -    else:
 -        return None
--
  def get_source_name(pkg, repometadata):
  
      def get_bin_name(pkg):
index 4ef8d9f4c73257a9509a5785744a6e77a0e5a9b8,ddd29ee378e1b976ffb130a30ea43c80b6554b6c..bb2dc0706c4ce88c691dae1e4f0331f1cf91665a
@@@ -230,9 -399,100 +230,9 @@@ class PartitionedMount(Mount)
  
              del gpt_parser
  
 -    def __map_partitions(self):
 -        """Load it if dm_snapshot isn't loaded. """
 -        load_module("dm_snapshot")
 -
 -        for dev in list(self.disks.keys()):
 -            d = self.disks[dev]
 -            if d['mapped']:
 -                continue
 -
 -            msger.debug("Running kpartx on %s" % d['disk'].device )
 -            rc, kpartx_output = runner.runtool([self.kpartx, "-l", "-v", d['disk'].device])
 -            kpartx_output = kpartx_output.splitlines()
 -
 -            if rc != 0:
 -                raise MountError("Failed to query partition mapping for '%s'" %
 -                                 d['disk'].device)
 -
 -            # Strip trailing blank and mask verbose output
 -            i = 0
 -            while i < len(kpartx_output) and kpartx_output[i][0:4] != "loop":
 -                i = i + 1
 -            kpartx_output = kpartx_output[i:]
 -
 -            # Make sure kpartx reported the right count of partitions
 -            if len(kpartx_output) != d['numpart']:
 -                # If this disk has more than 3 partitions, then in case of MBR
 -                # paritions there is an extended parition. Different versions
 -                # of kpartx behave differently WRT the extended partition -
 -                # some map it, some ignore it. This is why we do the below hack
 -                # - if kpartx reported one more partition and the partition
 -                # table type is "msdos" and the amount of partitions is more
 -                # than 3, we just assume kpartx mapped the extended parition
 -                # and we remove it.
 -                if len(kpartx_output) == d['numpart'] + 1 \
 -                   and d['ptable_format'] == 'msdos' and len(kpartx_output) > 3:
 -                    kpartx_output.pop(3)
 -                else:
 -                    raise MountError("Unexpected number of partitions from " \
 -                                     "kpartx: %d != %d" % \
 -                                        (len(kpartx_output), d['numpart']))
 -
 -            for i in range(len(kpartx_output)):
 -                line = kpartx_output[i]
 -                newdev = line.split()[0]
 -                mapperdev = "/dev/mapper/" + newdev
 -                loopdev = d['disk'].device + newdev[-1]
 -
 -                msger.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev))
 -                pnum = d['partitions'][i]
 -                self.partitions[pnum]['device'] = loopdev
 -                self.partitions[pnum]['mapper_device'] = mapperdev
 -
 -                # grub's install wants partitions to be named
 -                # to match their parent device + partition num
 -                # kpartx doesn't work like this, so we add compat
 -                # symlinks to point to /dev/mapper
 -                if os.path.lexists(loopdev):
 -                    os.unlink(loopdev)
 -                os.symlink(mapperdev, loopdev)
 -
 -            msger.debug("Adding partx mapping for %s" % d['disk'].device)
 -            rc = runner.show([self.kpartx, "-v", "-sa", d['disk'].device])
 -
 -            if rc != 0:
 -                # Make sure that the device maps are also removed on error case.
 -                # The d['mapped'] isn't set to True if the kpartx fails so
 -                # failed mapping will not be cleaned on cleanup either.
 -                runner.quiet([self.kpartx, "-sd", d['disk'].device])
 -                raise MountError("Failed to map partitions for '%s'" %
 -                                 d['disk'].device)
 -
 -            for p in self.partitions:
 -                if p['mapper_device'] and os.path.islink(p['mapper_device']):
 -                    p['mpath_device'] = resolve_ref(p['mapper_device'])
 -                else:
 -                    p['mpath_device'] = ''
 -
 -            # FIXME: need a better way to fix the latency
 -            import time
 -            time.sleep(1)
 -
 -            if not os.path.exists(mapperdev):
 -                # load mapper device if not updated
 -                runner.quiet([self.dmsetup, "mknodes"])
 -                # still not updated, roll back
 -                if not os.path.exists(mapperdev):
 -                    runner.quiet([self.kpartx, "-sd", d['disk'].device])
 -                    raise MountError("Failed to load mapper devices for '%s'" %
 -                                     d['disk'].device)
 -
 -            d['mapped'] = True
  
      def __unmap_partitions(self):
-         for dev in self.disks.keys():
+         for dev in list(self.disks.keys()):
              d = self.disks[dev]
              if not d['mapped']:
                  continue
index 27773aeb3aa36b1df7f477b6496a7ed5bc539fc4,af6ab3e504e6e803b1910770424a3c712b197ef7..baaab017bde9bbff0758f4af960c6218e196a57a
@@@ -98,8 -98,72 +98,8 @@@ def _isip(host)
          return True
      return False
  
 -def _set_noproxy_list():
 -    global _my_noproxy, _my_noproxy_list
 -    _my_noproxy_list = []
 -    if not _my_noproxy:
 -        return
 -
 -    #solve in /etc/enviroment contains command like `echo 165.xxx.xxx.{1..255} | sed 's/ /,/g'``
 -    _my_noproxy_bak = _my_noproxy
 -    start = _my_noproxy.find("`")
 -    while(start < len(_my_noproxy) and start != -1):
 -        start = _my_noproxy.find("`",start)
 -        end = _my_noproxy.find("`",start+1)
 -        cmd = _my_noproxy[start+1:end]
 -        pstr = _my_noproxy[start:end+1]
 -        start = end + 1
 -
 -        _my_noproxy=_my_noproxy.replace(pstr,len(pstr)*" ")
 -        try:
 -            c_result = os.popen(cmd).readlines()
 -            if len(c_result) == 0:
 -                continue
 -        except Exception as e:
 -            msger.warning(str(e))
 -            continue
 -        to_list = c_result[0].strip("\n").split(",")
 -        _my_noproxy_list.extend(to_list)
 -
 -    for item in _my_noproxy.split(","):
 -        item = item.strip()
 -        if not item:
 -            continue
 -
 -        if item[0] != '.' and item.find("/") == -1:
 -            # Need to match it
 -            _my_noproxy_list.append({"match":0, "needle":item})
 -
 -        elif item[0] == '.':
 -            # Need to match at tail
 -            _my_noproxy_list.append({"match":1, "needle":item})
 -
 -        elif item.find("/") > 3:
 -            # IP/MASK, need to match at head
 -            needle = item[0:item.find("/")].strip()
 -            ip = _ip_to_int(needle)
 -            netmask = 0
 -            mask = item[item.find("/")+1:].strip()
 -
 -            if mask.isdigit():
 -                netmask = int(mask)
 -                netmask = ~((1<<(32-netmask)) - 1)
 -                ip &= netmask
 -            else:
 -                shift = 24
 -                netmask = 0
 -                for dec in mask.split("."):
 -                    if not dec.isdigit():
 -                        continue
 -                    netmask |= int(dec) << shift
 -                    shift -= 8
 -                ip &= netmask
 -
 -            _my_noproxy_list.append({"match":2, "needle":ip, "netmask":netmask})
 -    _my_noproxy = _my_noproxy_bak
 -
  def _isnoproxy(url):
-     host = urlparse.urlparse(url)[1]
+     host = urllib.parse.urlparse(url)[1]
      # urlparse.urlparse(url) returns (scheme, host, path, parm, query, frag)
  
      if '@' in host:
Simple merge
Simple merge
Simple merge
index 371fddc7a8515c818b67bc7716d3dd04cda19a71,d08e3ffd862d23dafb6ecf22fad2a01030848c1c..95a9fdc400a937d95610efec7186fda8e94b8625
mode 100755,100644..100644
index 6796b0390bd5a390a3e00bcbc99d4dbd8b82b199,dc0bfd95503180dfb5a50f139fcd4e9811fe124e..2b38c705c8147cec3e5222ff498519bc1ece79fc
mode 100755,100644..100644
index 399abf7c6477372a41362c350ea23b233886ec6f,2329958f67dc9e07c667a791a748487e20b59ecb..399abf7c6477372a41362c350ea23b233886ec6f
mode 100755,100644..100644
index 524c378e4ba2d5d37cc143388882f6e8fae0a305,e5b35dc16d3f99cb4fb0c1065cd635e8be1646be..d54961c66488669cf4f0461f329b6634ea8fc563
mode 100755,100644..100644