Initial checkin of micng
authorZhou Shuangquan <shuangquan.zhou@intel.com>
Wed, 10 Aug 2011 02:20:36 +0000 (10:20 +0800)
committerZhou Shuangquan <shuangquan.zhou@intel.com>
Wed, 10 Aug 2011 02:20:36 +0000 (10:20 +0800)
Note: create fs/loop/raw/livecd/liveusb
      chroot fs/loop/raw/livecd/liveusb

Signed-off-by: Zhou Shuangquan <shuangquan.zhou@intel.com>
67 files changed:
distfiles/micng.conf
micng/chroot.py
micng/configmgr.py
micng/creator.py [new file with mode: 0644]
micng/imager/__init__.py
micng/imager/baseimager.py [moved from micng/imager/BaseImageCreator.py with 65% similarity]
micng/imager/fs.py
micng/imager/futhercreator.py [new file with mode: 0644]
micng/imager/livecd.py
micng/imager/liveusb.py [new file with mode: 0644]
micng/imager/loop.py [new file with mode: 0644]
micng/imager/raw.py [new file with mode: 0644]
micng/micng.conf [new file with mode: 0644]
micng/pluginbase/backend_plugin.py
micng/pluginbase/base_plugin.py [deleted file]
micng/pluginbase/hook_plugin.py
micng/pluginbase/imager_plugin.py [changed mode: 0755->0644]
micng/pluginmgr.py
micng/utils/argparse.py [deleted file]
micng/utils/errors.py
micng/utils/misc.py [changed mode: 0755->0644]
micng/utils/partitionedfs.py [new file with mode: 0644]
micng/utils/pkgmanagers/__init__.py [deleted file]
micng/utils/pkgmanagers/yumpkgmgr.py [deleted file]
micng/utils/pkgmanagers/zypppkgmgr.py [deleted file]
plugins/backend/yumpkgmgr.py
plugins/backend/zypppkgmgr.py
plugins/hook/.py [moved from plugins/hook/_hook.py with 100% similarity]
plugins/imager/fs_plugin.py
plugins/imager/livecd_plugin.py
plugins/imager/liveusb_plugin.py [new file with mode: 0644]
plugins/imager/loop_plugin.py [new file with mode: 0644]
plugins/imager/raw_plugin.py [new file with mode: 0644]
setup.py
tests/addcase.sh [new file with mode: 0755]
tests/mic-test.py [new file with mode: 0644]
tests/mic_cases/base/test.ks [new file with mode: 0644]
tests/mic_cases/test-bootstrap/expect [new file with mode: 0644]
tests/mic_cases/test-bootstrap/ks.p [moved from micng/utils/logger.py with 100% similarity]
tests/mic_cases/test-bootstrap/options [new file with mode: 0644]
tests/mic_cases/test-creatprc/expect [new file with mode: 0644]
tests/mic_cases/test-creatprc/ks.p [new file with mode: 0644]
tests/mic_cases/test-creatprc/options [new file with mode: 0644]
tests/mic_cases/test-genimg/ks.p [new file with mode: 0644]
tests/mic_cases/test-genimg/options [new file with mode: 0644]
tests/mic_cases/test-imageformat/expect [new file with mode: 0644]
tests/mic_cases/test-imageformat/ks.p [new file with mode: 0644]
tests/mic_cases/test-imageformat/options [new file with mode: 0644]
tests/mic_cases/test-invalidrepostr/expect [new file with mode: 0644]
tests/mic_cases/test-invalidrepostr/ks.p [new file with mode: 0644]
tests/mic_cases/test-invalidrepostr/options [new file with mode: 0644]
tests/mic_cases/test-misspkgs/expect [new file with mode: 0644]
tests/mic_cases/test-misspkgs/ks.p [new file with mode: 0644]
tests/mic_cases/test-misspkgs/options [new file with mode: 0644]
tests/mic_cases/test-norepo/expect [new file with mode: 0644]
tests/mic_cases/test-norepo/ks.p [new file with mode: 0644]
tests/mic_cases/test-norepo/options [new file with mode: 0644]
tests/mic_cases/test-root/expect [new file with mode: 0644]
tests/mic_cases/test-root/ks.p [moved from micng/utils/error.py with 100% similarity]
tests/mic_cases/test-root/options [new file with mode: 0644]
tests/mic_cases/test-runmode/expect [new file with mode: 0644]
tests/mic_cases/test-runmode/ks.p [new file with mode: 0644]
tests/mic_cases/test-runmode/options [new file with mode: 0644]
tests/testbase.py [new file with mode: 0644]
tools/mic-image-create [deleted file]
tools/micng [changed mode: 0755->0644]
tools/micng.ref [deleted file]

index 7ae705f..53d3a59 100644 (file)
@@ -1,12 +1,14 @@
-[main]
-cachedir= /var/tmp/cache
+[common]
+
+[create]
 tmpdir= /var/tmp
+cachedir= /var/tmp/cache
 outdir= .
-distro_name=MeeGo
-#proxy=http://proxy.yourcompany.com:8080/
-#no_proxy=localhost,127.0.0.0/8,.yourcompany.com
+name=meego
+pkgmgr=zypp
+arch=i586
+#proxy=http://proxy.com
 
-use_comps=1
+[convert]
 
-#run mode: 0 - native, 1 - bootstrap
-run_mode=0
+[chroot]
index e69de29..c322723 100644 (file)
@@ -0,0 +1,242 @@
+#/usr/bin/python -t
+import os
+import sys
+import glob
+import shutil
+import shlex
+import subprocess
+import micng.utils.fs_related as fs_related
+import micng.utils.misc as misc
+import micng.utils.errors as errors
+
+def cleanup_after_chroot(targettype,imgmount,tmpdir,tmpmnt):
+    if imgmount and targettype == "img":
+        imgmount.cleanup()
+    if tmpdir:
+        shutil.rmtree(tmpdir, ignore_errors = True)
+    if tmpmnt:
+        shutil.rmtree(tmpmnt, ignore_errors = True)
+
+def check_bind_mounts(chrootdir, bindmounts):
+    chrootmounts = []
+    mounts = bindmounts.split(";")
+    for mount in mounts:
+        if mount == "":
+            continue
+        srcdst = mount.split(":")
+        if len(srcdst) == 1:
+           srcdst.append("none")
+        if not os.path.isdir(srcdst[0]):
+            return False
+        if srcdst[1] == "" or srcdst[1] == "none":
+            srcdst[1] = None
+        if srcdst[0] in ("/proc", "/proc/sys/fs/binfmt_misc", "/", "/sys", "/dev", "/dev/pts", "/dev/shm", "/var/lib/dbus", "/var/run/dbus", "/var/lock"):
+            continue
+        if chrootdir:
+            if not srcdst[1]:
+                srcdst[1] = os.path.abspath(os.path.expanduser(srcdst[0]))
+            else:
+                srcdst[1] = os.path.abspath(os.path.expanduser(srcdst[1]))
+            tmpdir = chrootdir + "/" + srcdst[1]
+            if os.path.isdir(tmpdir):
+                print "Warning: dir %s has existed."  % tmpdir
+    return True
+
+def cleanup_mounts(chrootdir):
+    checkpoints = ["/proc/sys/fs/binfmt_misc", "/proc", "/sys", "/dev/pts", "/dev/shm", "/dev", "/var/lib/dbus", "/var/run/dbus", "/var/lock"]
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    umountcmd = misc.find_binary_path("umount")
+    for point in checkpoints:
+        print point
+        args = [ umountcmd, "-l", chrootdir + point ]
+        subprocess.call(args, stdout=dev_null, stderr=dev_null)
+    catcmd = misc.find_binary_path("cat")
+    args = [ catcmd, "/proc/mounts" ]
+    proc_mounts = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null)
+    outputs = proc_mounts.communicate()[0].strip().split("\n")
+    for line in outputs:
+        if line.find(os.path.abspath(chrootdir)) >= 0:
+            if os.path.abspath(chrootdir) == line.split()[1]:
+                continue
+            point = line.split()[1]
+            print point
+            args = [ umountcmd, "-l", point ]
+            ret = subprocess.call(args, stdout=dev_null, stderr=dev_null)
+            if ret != 0:
+                print "ERROR: failed to unmount %s" % point
+                os.close(dev_null)
+                return ret
+    os.close(dev_null)
+    return 0
+
+def setup_chrootenv(chrootdir, bindmounts = None):##move to mic/utils/misc
+    global chroot_lockfd, chroot_lock
+    def get_bind_mounts(chrootdir, bindmounts):
+        chrootmounts = []
+        if bindmounts in ("", None):
+            bindmounts = ""
+        mounts = bindmounts.split(";")
+        for mount in mounts:
+            if mount == "":
+                continue
+            srcdst = mount.split(":")
+            srcdst[0] = os.path.abspath(os.path.expanduser(srcdst[0]))
+            if len(srcdst) == 1:
+               srcdst.append("none")
+            if not os.path.isdir(srcdst[0]):
+                continue
+            if srcdst[0] in ("/proc", "/proc/sys/fs/binfmt_misc", "/", "/sys", "/dev", "/dev/pts", "/dev/shm", "/var/lib/dbus", "/var/run/dbus", "/var/lock"):
+                pwarning("%s will be mounted by default." % srcdst[0])
+                continue
+            if srcdst[1] == "" or srcdst[1] == "none":
+                srcdst[1] = None
+            else:
+                srcdst[1] = os.path.abspath(os.path.expanduser(srcdst[1]))
+                if os.path.isdir(chrootdir + "/" + srcdst[1]):
+                    pwarning("%s has existed in %s , skip it." % (srcdst[1], chrootdir))
+                    continue
+            chrootmounts.append(fs_related.BindChrootMount(srcdst[0], chrootdir, srcdst[1]))
+    
+        """Default bind mounts"""
+        chrootmounts.append(fs_related.BindChrootMount("/proc", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/proc/sys/fs/binfmt_misc", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/sys", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/dev", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/dev/pts", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/dev/shm", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/var/lib/dbus", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/var/run/dbus", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/var/lock", chrootdir, None))
+        chrootmounts.append(fs_related.BindChrootMount("/", chrootdir, "/parentroot", "ro"))
+        for kernel in os.listdir("/lib/modules"):
+            chrootmounts.append(fs_related.BindChrootMount("/lib/modules/" + kernel, chrootdir, None, "ro"))
+    
+        return chrootmounts
+
+    def bind_mount(chrootmounts):
+        for b in chrootmounts:
+            print "bind_mount: %s -> %s" % (b.src, b.dest)
+            b.mount()
+
+    def setup_resolv(chrootdir):
+        shutil.copyfile("/etc/resolv.conf", chrootdir + "/etc/resolv.conf")
+
+    globalmounts = get_bind_mounts(chrootdir, bindmounts)
+    bind_mount(globalmounts)
+    setup_resolv(chrootdir)
+    mtab = "/etc/mtab"
+    dstmtab = chrootdir + mtab
+    if not os.path.islink(dstmtab):
+        shutil.copyfile(mtab, dstmtab)
+    chroot_lock = os.path.join(chrootdir, ".chroot.lock")
+    chroot_lockfd = open(chroot_lock, "w")
+    return globalmounts    
+
+def cleanup_chrootenv(chrootdir, bindmounts = None, globalmounts = []):
+    global chroot_lockfd, chroot_lock
+    def bind_unmount(chrootmounts):
+        chrootmounts.reverse()
+        for b in chrootmounts:
+            print "bind_unmount: %s -> %s" % (b.src, b.dest)
+            b.unmount()
+
+    def cleanup_resolv(chrootdir):
+        fd = open(chrootdir + "/etc/resolv.conf", "w")
+        fd.truncate(0)
+        fd.close()
+
+    def kill_processes(chrootdir):
+        for file in glob.glob("/proc/*/root"):
+            try:
+                if os.readlink(file) == chrootdir:
+                    pid = int(file.split("/")[2])
+                    os.kill(pid, 9)
+            except:
+                pass
+
+    def cleanup_mountdir(chrootdir, bindmounts):
+        if bindmounts == "" or bindmounts == None:
+            return
+        chrootmounts = []
+        mounts = bindmounts.split(";")
+        for mount in mounts:
+            if mount == "":
+                continue
+            srcdst = mount.split(":")
+            if len(srcdst) == 1:
+               srcdst.append("none")
+            if srcdst[1] == "" or srcdst[1] == "none":
+                srcdst[1] = srcdst[0]
+            srcdst[1] = os.path.abspath(os.path.expanduser(srcdst[1]))
+            tmpdir = chrootdir + "/" + srcdst[1]
+            if os.path.isdir(tmpdir):
+                if len(os.listdir(tmpdir)) == 0:
+                    shutil.rmtree(tmpdir, ignore_errors = True)
+                else:
+                    print "Warning: dir %s isn't empty." % tmpdir
+    
+    chroot_lockfd.close()
+    bind_unmount(globalmounts)
+    if not fs_related.my_fuser(chroot_lock):
+        tmpdir = chrootdir + "/parentroot"
+        if len(os.listdir(tmpdir)) == 0:
+            shutil.rmtree(tmpdir, ignore_errors = True)
+        cleanup_resolv(chrootdir)
+        if os.path.exists(chrootdir + "/etc/mtab"):
+            os.unlink(chrootdir + "/etc/mtab")
+        kill_processes(chrootdir)
+    cleanup_mountdir(chrootdir, bindmounts)
+
+def chroot(chrootdir, bindmounts = None, execute = "/bin/bash"):
+    def mychroot():
+        os.chroot(chrootdir)
+        os.chdir("/")
+
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    files_to_check = ["/bin/bash", "/sbin/init"]
+    
+    architecture_found = False
+
+    """ Register statically-linked qemu-arm if it is an ARM fs """
+    qemu_emulator = None
+
+    for ftc in files_to_check:
+        ftc = "%s/%s" % (chrootdir,ftc)
+        
+        # Return code of 'file' is "almost always" 0 based on some man pages
+        # so we need to check the file existance first.
+        if not os.path.exists(ftc):
+            continue
+
+        filecmd = misc.find_binary_path("file")
+        initp1 = subprocess.Popen([filecmd, ftc], stdout=subprocess.PIPE, stderr=dev_null)
+        fileOutput = initp1.communicate()[0].strip().split("\n")
+        
+        for i in range(len(fileOutput)):
+            if fileOutput[i].find("ARM") > 0:
+                qemu_emulator = misc.setup_qemu_emulator(chrootdir, "arm")
+                architecture_found = True
+                break
+            if fileOutput[i].find("Intel") > 0:
+                architecture_found = True
+                break
+                
+        if architecture_found:
+            break
+                
+    os.close(dev_null)
+    if not architecture_found:
+        raise errors.CreatorError("Failed to get architecture from any of the following files %s from chroot." % files_to_check)
+
+    try:
+        print "Launching shell. Exit to continue."
+        print "----------------------------------"
+        globalmounts = setup_chrootenv(chrootdir, bindmounts)
+        args = shlex.split(execute)
+        subprocess.call(args, preexec_fn = mychroot)
+    except OSError, (err, msg):
+        raise errors.CreatorError("Failed to chroot: %s" % msg)
+    finally:
+        cleanup_chrootenv(chrootdir, bindmounts, globalmounts)
+        if qemu_emulator:
+            os.unlink(chrootdir + qemu_emulator)        
index 6a35c21..cbf5460 100644 (file)
@@ -1,62 +1,90 @@
 #!/usr/bin/python -t
 
 import os
+import sys
+import logging
 import micng.utils as utils
 
+DEFAULT_NAME='meego'
 DEFAULT_OUTDIR='.'
-DEFAULT_TMPDIR='/tmp'
-DEFAULT_CACHE='/var/tmp'
+DEFAULT_TMPDIR='/var/tmp'
+DEFAULT_CACHEDIR='/var/cache'
 DEFAULT_GSITECONF='/etc/micng/micng.conf'
-DEFAULT_USITECONF='~/.micng.conf'
+#DEFAULT_USITECONF='~/.micng.conf'
 
 class ConfigMgr(object):
-    def __init__(self, siteconf=None, ksfile=None):
-        self.outdir = DEFAULT_OUTDIR
-        self.tmpdir = DEFAULT_TMPDIR
-        self.cache = DEFAULT_CACHE
+    def __init__(self, siteconf=None, ksconf=None):
+        self.common = {}
+        self.create = {}
+        self.convert = {}
+        self.chroot = {}
+
         self.siteconf = siteconf
-        self.name = 'meego'
-        self.ksfile = ksfile
-        self.kickstart = None
-        self.ksrepos = None
-        self.repometadata = None
+        self.ksconf = ksconf
+
+        self.create['name'] = DEFAULT_NAME
+        self.create["tmpdir"] = DEFAULT_TMPDIR
+        self.create["cachedir"] = DEFAULT_CACHEDIR
+        self.create["outdir"] = DEFAULT_OUTDIR
+
         self.init_siteconf(self.siteconf)
-        self.init_kickstart(self.ksfile)
+        self.init_kickstart(self.ksconf)
 
     def init_siteconf(self, siteconf = None):
         from ConfigParser import SafeConfigParser
         siteconf_parser = SafeConfigParser()
-        siteconf_files = [DEFAULT_GSITECONF, DEFAULT_USITECONF]
+        siteconf_files = [DEFAULT_GSITECONF]
+
+        if not os.path.exists(DEFAULT_GSITECONF):
+            logging.debug("Not exists file: %s" % DEFAULT_GSITECONF)
+            return
 
         if siteconf:
             self.siteconf = siteconf
             siteconf_files = [self.siteconf]
         siteconf_parser.read(siteconf_files)
 
-        for option in siteconf_parser.options('main'):
-            value = siteconf_parser.get('main', option)
-            setattr(self, option, value)
+        for option in siteconf_parser.options('common'):
+            value = siteconf_parser.get('common', option)
+            self.common[option] = value
+
+        for option in siteconf_parser.options('create'):
+            value = siteconf_parser.get('create', option)
+            self.create[option] = value
+
+        for option in siteconf_parser.options('convert'):
+            value = siteconf_parser.get('convert', option)
+            self.convert[option] = value
+
+        for option in siteconf_parser.options('chroot'):
+            value = siteconf_parser.get('chroot', option)
+            self.chroot[option] = value
+
 
-    def init_kickstart(self, ksfile=None):
-        if not ksfile:
+    def init_kickstart(self, ksconf=None):
+        if not ksconf:
+            self.create['ks'] = None
+            self.create['repomd'] = None
             return
-        self.ksfile = ksfile
+        self.ksconf = ksconf
         try:
-            self.kickstart = utils.kickstart.read_kickstart(self.ksfile)
+            self.kickstart = utils.kickstart.read_kickstart(self.ksconf)
             self.ksrepos = utils.misc.get_repostrs_from_ks(self.kickstart)
             print "retrieving repo metadata..."
-            self.repometadata = utils.misc.get_metadata_from_repos(self.ksrepos, self.cache)
+            self.repometadata = utils.misc.get_metadata_from_repos(self.ksrepos, self.create['cachedir'])
+            self.create['ks'] = self.kickstart
+            self.create['repomd'] = self.repometadata
         except OSError, e:
             raise Exception("failed to create image: %s" % e)
         except Exception, e:
-            raise Exception("unable to load kickstart file '%s': %s" % (self.ksfile, e))
+            raise Exception("unable to load kickstart file '%s': %s" % (self.ksconf, e))
 
 
     def setProperty(self, name, value):
         if not hasattr(self, name):
             return None
         #print ">>", name, value
-        if name == 'ksfile':
+        if name == 'ksconf':
             self.init_kickstart(value)
             return True
         if name == 'siteconf':
@@ -69,19 +97,47 @@ class ConfigMgr(object):
             return None
         return getattr(self, name)
 
-configmgr = ConfigMgr()
+    def setCategoryProperty(self, category, name, value):
+        if not hasattr(self, category):
+            raise Exception("Error to parse %s", category)
+        categ = getattr(self, category)
+        categ[name] = value
+
+    def getCategoryProperty(self, category, name):
+        if not hasattr(self, category):
+            raise Exception("Error to parse %s", category)
+        categ = getattr(self, category)
+        return categ[name]
+
+    def getCreateOption(self, name):
+        if not self.create.has_key(name):
+            raise Exception("Attribute Error: not such attribe %s" % name)
+        return self.create[name]
+
+    def getConvertOption(self, name):
+        if not self.convert.has_key(name):
+            raise Exception("Attribute Error: not such attribe %s" % name)
+        return self.convert[name]
+
+    def getChrootOption(self, name):
+        if not self.chroot.has_key(name):
+            raise Exception("Attribute Error: not such attribe %s" % name)
+        return self.chroot[name]
+
+    def dumpAllConfig(self):
+        sys.stdout.write("create options:\n")
+        for key in self.create.keys():
+            sys.stdout.write("%-8s= %s\n" % (key, self.create[key]))
+        sys.stdout.write("convert options:\n")
+        for key in self.convert.keys():
+            sys.stdout.write("%-8s= %s\n" % (key, self.ccnvert[key]))
+        sys.stdout.write("chroot options:\n")
+        for key in self.chroot.keys():
+            sys.stdout.write("%-8s= %s\n" % (key, self.chroot[key]))
 
 def getConfigMgr():
     return configmgr
 
-def setProperty(cinfo, name):
-    if not isinstance(cinfo, ConfigMgr):
-        return None
-    if not hasattr(cinfo, name):
-        return None
-
-def getProperty(cinfo, name):
-    if not isinstance(cinfo, ConfigMgr):
-        return None
-    if not hasattr(cinfo, name):
-        return None
+configmgr = ConfigMgr()
+#configmgr.dumpAllConfig()
+
diff --git a/micng/creator.py b/micng/creator.py
new file mode 100644 (file)
index 0000000..ba66b92
--- /dev/null
@@ -0,0 +1,96 @@
+#!/usr/bin/python -t
+
+from __future__ import with_statement
+import os
+import sys
+import string
+import optparse
+import logging
+
+import micng.utils.cmdln as cmdln
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+
+class Creator(cmdln.Cmdln):
+    """
+    ${command_list}
+    ${help_list}
+    ${option_list}
+    """
+    conf = None
+    man_header = None
+    man_footer = None
+
+    def __init__(self, *args, **kwargs):
+        cmdln.Cmdln.__init__(self, *args, **kwargs)
+        # load configmgr
+        self.configmgr = configmgr.getConfigMgr()
+        # load pluginmgr
+        self.pluginmgr = pluginmgr.PluginMgr()
+        self.pluginmgr.loadPlugins()
+        self.plugincmds = self.pluginmgr.getImagerPlugins()
+        # mix-in do_subcmd interface
+        for subcmd, klass in self.plugincmds:
+            if not hasattr(klass, 'do_create'):
+                logging.warn("Unsurpport subcmd: %s" % subcmd)
+                continue
+            func = getattr(klass, 'do_create')
+            setattr(self.__class__, "do_"+subcmd, func)
+
+    def get_optparser(self):
+        optparser = cmdln.CmdlnOptionParser(self)
+        optparser.add_option('-d', '--debug', action='store_true', help='print debug info')
+        optparser.add_option('-v', '--verbose', action='store_true', help='verbose output')
+        #optparser.add_option('-o', '--outdir', type='string', action='store', dest='outdir', default=None, help='output directory')
+        return optparser 
+
+    def preoptparse(self, argv):
+        pass
+
+    def postoptparse(self):
+        if self.options.verbose is True:
+            logging.getLogger().setLevel(logging.INFO)
+        if self.options.debug is True:
+            logging.getLogger().setLevel(logging.DEBUG)
+        #if self.options.outdir is not None:
+        #    self.configmgr.create['outdir'] = self.options.outdir
+
+    def main(self, argv=None):
+        if argv is None:
+            argv = sys.argv
+        else:
+            argv = argv[:] # don't modify caller's list
+
+        self.optparser = self.get_optparser()
+        if self.optparser: # i.e. optparser=None means don't process for opts
+            try:
+                self.preoptparse(argv)
+                self.options, args = self.optparser.parse_args(argv)
+            except cmdln.CmdlnUserError, ex:
+                msg = "%s: %s\nTry '%s help' for info.\n"\
+                      % (self.name, ex, self.name)
+                self.stderr.write(self._str(msg))
+                self.stderr.flush()
+                return 1
+            except cmdln.StopOptionProcessing, ex:
+                return 0
+        else:
+            self.options, args = None, argv[1:]
+        self.postoptparse()
+
+        if args:
+            if os.geteuid() != 0:
+                print >> sys.stderr, "You must run %s as root" % sys.argv[0]
+                return 1
+            try:
+                return self.cmd(args)
+            except Exception, e:
+                print e
+        else:
+            return self.emptyline()
+
+#if __name__ == "__main__":
+#    logging.getLogger().setLevel(logging.ERROR)
+#    create = Creator()
+#    ret = create.main(sys.argv)
+#    sys.exit(ret)
index 9f9e0e3..e69de29 100644 (file)
@@ -1,2 +0,0 @@
-import BaseImageCreator
-import livecd
similarity index 65%
rename from micng/imager/BaseImageCreator.py
rename to micng/imager/baseimager.py
index a5862c6..fb007a9 100644 (file)
@@ -32,15 +32,11 @@ import rpm
 
 from micng.utils.errors import *
 from micng.utils.fs_related import *
-from micng.utils import kickstart
-from micng.utils import pkgmanagers
 from micng.utils.rpmmisc import *
 from micng.utils.misc import *
+from micng.utils import kickstart
 
-FSLABEL_MAXLEN = 32
-"""The maximum string length supported for LoopImageCreator.fslabel."""
-
-class ImageCreator(object):
+class BaseImageCreator(object):
     """Installs a system to a chroot directory.
 
     ImageCreator is the simplest creator class available; it will install and
@@ -54,7 +50,7 @@ class ImageCreator(object):
 
     """
 
-    def __init__(self, ks, name):
+    def __init__(self, createopts = None, pkgmgr = None):
         """Initialize an ImageCreator instance.
 
         ks -- a pykickstart.KickstartParser instance; this instance will be
@@ -65,68 +61,63 @@ class ImageCreator(object):
                 filesystem labels
 
         """
+        print "@@@@@",pkgmgr
+        self.pkgmgr = pkgmgr 
 
-        """ Initialize package managers """
-#package plugin manager
-        self.pkgmgr = pkgmanagers.pkgManager()
-        self.pkgmgr.load_pkg_managers()
-
-        self.ks = ks
-        """A pykickstart.KickstartParser instance."""
-
-        self.name = name
-        """A name for the image."""
-
-        self.distro_name = "MeeGo"
-
-        """Output image file names"""
-        self.outimage = []
+        if createopts:
+            # A pykickstart.KickstartParser instance."""
+            self.ks = createopts['ks']
+            self.repometadata = createopts['repomd']
 
-        """A flag to generate checksum"""
-        self._genchecksum = False
+            # A name for the image."""
+            self.name = createopts['name']
 
-        self.tmpdir = "/var/tmp"
-        """The directory in which all temporary files will be created."""
+            # The directory in which all temporary files will be created."""
+            self.tmpdir = createopts['tmpdir']
 
-        self.cachedir = None
-
-        self._alt_initrd_name = None
+            self.cachedir = createopts['cachedir']
 
+            self.destdir = createopts['outdir']
+        else:
+            self.ks = None
+            self.repometadata = None
+            self.name = None
+            self.tmpdir = None
+            self.cachedir = None
+            self.destdir = None
         self.__builddir = None
         self.__bindmounts = []
 
-        """ Contains the compression method that is used to compress
-        the disk image after creation, e.g., bz2.
-        This value is set with compression_method function. """
-        self.__img_compression_method = None
-
-        # dependent commands to check
         self._dep_checks = ["ls", "bash", "cp", "echo", "modprobe", "passwd"]
 
+        ### to be obsolete
+        self.distro_name = "MeeGo"
+        # Output image file names"""
+        self.outimage = []
+        # A flag to generate checksum"""
+        self._genchecksum = False
+        self._alt_initrd_name = None
+        # the disk image after creation, e.g., bz2.
+        # This value is set with compression_method function. """
+        self.__img_compression_method = None
+        # dependent commands to check
         self._recording_pkgs = None
-
         self._include_src = None
-
         self._local_pkgs_path = None
-
         # available size in root fs, init to 0
         self._root_fs_avail = 0
-
         # target arch for non-x86 image
         self.target_arch = None
-
-        """ Name of the disk image file that is created. """
+        # Name of the disk image file that is created. """
         self._img_name = None
-
-        """ Image format """
+        # Image format """
         self.image_format = None
-
-        """ Save qemu emulator file name in order to clean up it finally """
+        # Save qemu emulator file name in order to clean up it finally """
         self.qemu_emulator = None
-
-        """ No ks provided when called by convertor, so skip the dependency check """
+        # No ks provided when called by convertor, so skip the dependency check """
         if self.ks:
-            """ If we have btrfs partition we need to check that we have toosl for those """
+            # If we have btrfs partition we need to check that we have toosl for those """
             for part in self.ks.handler.partition.partitions:
                 if part.fstype and part.fstype == "btrfs":
                     self._dep_checks.append("mkfs.btrfs")
@@ -774,8 +765,6 @@ class ImageCreator(object):
 
 
         # initialize pkg list to install
-        #import pdb
-        #pdb.set_trace()
         if self.ks:
             self.__sanity_check()
 
@@ -813,8 +802,6 @@ class ImageCreator(object):
 
         try:
             try:
-                #import pdb
-                #pdb.set_trace()
                 self.__select_packages(pkg_manager)
                 self.__select_groups(pkg_manager)
                 self.__deselect_packages(pkg_manager)
@@ -1011,11 +998,11 @@ class ImageCreator(object):
         configure(), unmount and package().
 
         """
-        self.mount()
+        self.mount(None, self.cachedir)
         self.install()
-        self.configure()
+        self.configure(self.repometadata)
         self.unmount()
-        self.package()
+        self.package(self.destdir)
 
     def print_outimage_info(self):
         print "Your new image can be found here:"
@@ -1080,524 +1067,7 @@ class ImageCreator(object):
 
         self.__img_compression_method = compression_method
 
-    def set_pkg_manager(self, name):
-        self.pkgmgr.set_default_pkg_manager(name)
-
     def get_pkg_manager(self, recording_pkgs=None):
-        pkgmgr_instance = self.pkgmgr.get_default_pkg_manager()
-        if not pkgmgr_instance:
-            raise CreatorError("No package manager available")
-        return pkgmgr_instance(creator = self, recording_pkgs = recording_pkgs)
-
-class LoopImageCreator(ImageCreator):
-    """Installs a system into a loopback-mountable filesystem image.
-
-    LoopImageCreator is a straightforward ImageCreator subclass; the system
-    is installed into an ext3 filesystem on a sparse file which can be
-    subsequently loopback-mounted.
-
-    """
-
-    def __init__(self, ks, name, fslabel = None):
-        """Initialize a LoopImageCreator instance.
-
-        This method takes the same arguments as ImageCreator.__init__() with
-        the addition of:
-
-        fslabel -- A string used as a label for any filesystems created.
-
-        """
-        ImageCreator.__init__(self, ks, name)
-
-        self.__fslabel = None
-        self.fslabel = fslabel
-
-        self.__minsize_KB = 0
-        self.__blocksize = 4096
-        if self.ks:
-            self.__fstype = kickstart.get_image_fstype(self.ks, "ext3")
-            self.__fsopts = kickstart.get_image_fsopts(self.ks, "defaults,noatime")
-        else:
-            self.__fstype = None
-            self.__fsopts = None
-
-        self.__instloop = None
-        self.__imgdir = None
-
-        if self.ks:
-            self.__image_size = kickstart.get_image_size(self.ks,
-                                                         4096L * 1024 * 1024)
-        else:
-            self.__image_size = 0
-
-        self._img_name = self.name + ".img"
-
-    def _set_fstype(self, fstype):
-        self.__fstype = fstype
-
-    def _set_image_size(self, imgsize):
-        self.__image_size = imgsize
-
-    #
-    # Properties
-    #
-    def __get_fslabel(self):
-        if self.__fslabel is None:
-            return self.name
-        else:
-            return self.__fslabel
-    def __set_fslabel(self, val):
-        if val is None:
-            self.__fslabel = None
-        else:
-            self.__fslabel = val[:FSLABEL_MAXLEN]
-    fslabel = property(__get_fslabel, __set_fslabel)
-    """A string used to label any filesystems created.
-
-    Some filesystems impose a constraint on the maximum allowed size of the
-    filesystem label. In the case of ext3 it's 16 characters, but in the case
-    of ISO9660 it's 32 characters.
-
-    mke2fs silently truncates the label, but mkisofs aborts if the label is too
-    long. So, for convenience sake, any string assigned to this attribute is
-    silently truncated to FSLABEL_MAXLEN (32) characters.
-
-    """
-
-    def __get_image(self):
-        if self.__imgdir is None:
-            raise CreatorError("_image is not valid before calling mount()")
-        return self.__imgdir + "/meego.img"
-    _image = property(__get_image)
-    """The location of the image file.
-
-    This is the path to the filesystem image. Subclasses may use this path
-    in order to package the image in _stage_final_image().
-
-    Note, this directory does not exist before ImageCreator.mount() is called.
-
-    Note also, this is a read-only attribute.
-
-    """
-
-    def __get_blocksize(self):
-        return self.__blocksize
-    def __set_blocksize(self, val):
-        if self.__instloop:
-            raise CreatorError("_blocksize must be set before calling mount()")
-        try:
-            self.__blocksize = int(val)
-        except ValueError:
-            raise CreatorError("'%s' is not a valid integer value "
-                               "for _blocksize" % val)
-    _blocksize = property(__get_blocksize, __set_blocksize)
-    """The block size used by the image's filesystem.
-
-    This is the block size used when creating the filesystem image. Subclasses
-    may change this if they wish to use something other than a 4k block size.
-
-    Note, this attribute may only be set before calling mount().
-
-    """
-
-    def __get_fstype(self):
-        return self.__fstype
-    def __set_fstype(self, val):
-        if val != "ext2" and val != "ext3":
-            raise CreatorError("Unknown _fstype '%s' supplied" % val)
-        self.__fstype = val
-    _fstype = property(__get_fstype, __set_fstype)
-    """The type of filesystem used for the image.
-
-    This is the filesystem type used when creating the filesystem image.
-    Subclasses may change this if they wish to use something other ext3.
-
-    Note, only ext2 and ext3 are currently supported.
-
-    Note also, this attribute may only be set before calling mount().
-
-    """
-
-    def __get_fsopts(self):
-        return self.__fsopts
-    def __set_fsopts(self, val):
-        self.__fsopts = val
-    _fsopts = property(__get_fsopts, __set_fsopts)
-    """Mount options of filesystem used for the image.
-
-    This can be specified by --fsoptions="xxx,yyy" in part command in
-    kickstart file.
-    """
-
-    #
-    # Helpers for subclasses
-    #
-    def _resparse(self, size = None):
-        """Rebuild the filesystem image to be as sparse as possible.
-
-        This method should be used by subclasses when staging the final image
-        in order to reduce the actual space taken up by the sparse image file
-        to be as little as possible.
-
-        This is done by resizing the filesystem to the minimal size (thereby
-        eliminating any space taken up by deleted files) and then resizing it
-        back to the supplied size.
-
-        size -- the size in, in bytes, which the filesystem image should be
-                resized to after it has been minimized; this defaults to None,
-                causing the original size specified by the kickstart file to
-                be used (or 4GiB if not specified in the kickstart).
-
-        """
-        return self.__instloop.resparse(size)
-
-    def _base_on(self, base_on):
-        shutil.copyfile(base_on, self._image)
-
-    #
-    # Actual implementation
-    #
-    def _mount_instroot(self, base_on = None):
-        self.__imgdir = self._mkdtemp()
-
-        if not base_on is None:
-            self._base_on(base_on)
-
-        if self.__fstype in ("ext2", "ext3", "ext4"):
-            MyDiskMount = ExtDiskMount
-        elif self.__fstype == "btrfs":
-            MyDiskMount = BtrfsDiskMount
-
-        self.__instloop = MyDiskMount(SparseLoopbackDisk(self._image, self.__image_size),
-                                       self._instroot,
-                                       self.__fstype,
-                                       self.__blocksize,
-                                       self.fslabel)
-
-        try:
-            self.__instloop.mount()
-        except MountError, e:
-            raise CreatorError("Failed to loopback mount '%s' : %s" %
-                               (self._image, e))
-
-    def _unmount_instroot(self):
-        if not self.__instloop is None:
-            self.__instloop.cleanup()
+        return self.pkgmgr(creator = self, recording_pkgs = recording_pkgs)
 
-    def _stage_final_image(self):
-        self._resparse()
-        shutil.move(self._image, self._outdir + "/" + self._img_name)
-
-class LiveImageCreatorBase(LoopImageCreator):
-    """A base class for LiveCD image creators.
-
-    This class serves as a base class for the architecture-specific LiveCD
-    image creator subclass, LiveImageCreator.
-
-    LiveImageCreator creates a bootable ISO containing the system image,
-    bootloader, bootloader configuration, kernel and initramfs.
-
-    """
-
-    def __init__(self, *args):
-        """Initialise a LiveImageCreator instance.
-
-        This method takes the same arguments as ImageCreator.__init__().
-
-        """
-        LoopImageCreator.__init__(self, *args)
-
-        self.skip_compression = False
-        """Controls whether to use squashfs to compress the image."""
-
-        self.skip_minimize = False
-        """Controls whether an image minimizing snapshot should be created.
-
-        This snapshot can be used when copying the system image from the ISO in
-        order to minimize the amount of data that needs to be copied; simply,
-        it makes it possible to create a version of the image's filesystem with
-        no spare space.
-
-        """
-
-        self.actasconvertor = False
-        """A flag which indicates i act as a convertor"""
-
-        if self.ks:
-            self._timeout = kickstart.get_timeout(self.ks, 10)
-        else:
-            self._timeout = 10
-        """The bootloader timeout from kickstart."""
-
-        if self.ks:
-            self._default_kernel = kickstart.get_default_kernel(self.ks, "kernel")
-        else:
-            self._default_kernel = None
-        """The default kernel type from kickstart."""
-
-        self.__isodir = None
-
-        self.__modules = ["=ata", "sym53c8xx", "aic7xxx", "=usb", "=firewire", "=mmc", "=pcmcia", "mptsas"]
-        if self.ks:
-            self.__modules.extend(kickstart.get_modules(self.ks))
-
-        self._dep_checks.extend(["isohybrid", "unsquashfs", "mksquashfs", "dd", "genisoimage"])
-
-    #
-    # Hooks for subclasses
-    #
-    def _configure_bootloader(self, isodir):
-        """Create the architecture specific booloader configuration.
-
-        This is the hook where subclasses must create the booloader
-        configuration in order to allow a bootable ISO to be built.
-
-        isodir -- the directory where the contents of the ISO are to be staged
-
-        """
-        raise CreatorError("Bootloader configuration is arch-specific, "
-                           "but not implemented for this arch!")
-    def _get_menu_options(self):
-        """Return a menu options string for syslinux configuration.
-
-        """
-        r = kickstart.get_menu_args(self.ks)
-        return r
-
-    def _get_kernel_options(self):
-        """Return a kernel options string for bootloader configuration.
-
-        This is the hook where subclasses may specify a set of kernel options
-        which should be included in the images bootloader configuration.
-
-        A sensible default implementation is provided.
-
-        """
-        r = kickstart.get_kernel_args(self.ks)
-        if os.path.exists(self._instroot + "/usr/bin/rhgb") or \
-           os.path.exists(self._instroot + "/usr/bin/plymouth"):
-            r += " rhgb"
-        return r
-
-    def _get_mkisofs_options(self, isodir):
-        """Return the architecture specific mkisosfs options.
-
-        This is the hook where subclasses may specify additional arguments to
-        mkisofs, e.g. to enable a bootable ISO to be built.
-
-        By default, an empty list is returned.
-
-        """
-        return []
-
-    #
-    # Helpers for subclasses
-    #
-    def _has_checkisomd5(self):
-        """Check whether checkisomd5 is available in the install root."""
-        def exists(instroot, path):
-            return os.path.exists(instroot + path)
-
-        if (exists(self._instroot, "/usr/lib/moblin-installer-runtime/checkisomd5") or
-            exists(self._instroot, "/usr/bin/checkisomd5")):
-            if (os.path.exists("/usr/bin/implantisomd5") or
-               os.path.exists("/usr/lib/anaconda-runtime/implantisomd5")):
-                return True
-
-        return False
-
-    def _uncompress_squashfs(self, squashfsimg, outdir):
-        """Uncompress file system from squshfs image"""
-        unsquashfs = find_binary_path("unsquashfs")
-        args = [unsquashfs, "-d", outdir, squashfsimg ]
-        rc = subprocess.call(args)
-        if (rc != 0):
-            raise CreatorError("Failed to uncompress %s." % squashfsimg)
-    #
-    # Actual implementation
-    #
-    def _base_on(self, base_on):
-        """Support Image Convertor"""
-        if self.actasconvertor:
-            if os.path.exists(base_on) and not os.path.isfile(base_on):
-                ddcmd = find_binary_path("dd")
-                args = [ ddcmd, "if=%s" % base_on, "of=%s" % self._image ]
-                print "dd %s -> %s" % (base_on, self._image)
-                rc = subprocess.call(args)
-                if rc != 0:
-                    raise CreatorError("Failed to dd from %s to %s" % (base_on, self._image))
-                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
-            if os.path.isfile(base_on):
-                print "Copying file system..."
-                shutil.copyfile(base_on, self._image)
-                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
-            return
-
-        """helper function to extract ext3 file system from a live CD ISO"""
-        isoloop = DiskMount(LoopbackDisk(base_on, 0), self._mkdtemp())
-
-        try:
-            isoloop.mount()
-        except MountError, e:
-            raise CreatorError("Failed to loopback mount '%s' : %s" %
-                               (base_on, e))
-
-        # legacy LiveOS filesystem layout support, remove for F9 or F10
-        if os.path.exists(isoloop.mountdir + "/squashfs.img"):
-            squashimg = isoloop.mountdir + "/squashfs.img"
-        else:
-            squashimg = isoloop.mountdir + "/LiveOS/squashfs.img"
-
-        tmpoutdir = self._mkdtemp()
-        # unsquashfs requires outdir mustn't exist
-        shutil.rmtree(tmpoutdir, ignore_errors = True)
-        self._uncompress_squashfs(squashimg, tmpoutdir)
-
-        try:
-            # legacy LiveOS filesystem layout support, remove for F9 or F10
-            if os.path.exists(tmpoutdir + "/os.img"):
-                os_image = tmpoutdir + "/os.img"
-            else:
-                os_image = tmpoutdir + "/LiveOS/ext3fs.img"
-
-            if not os.path.exists(os_image):
-                raise CreatorError("'%s' is not a valid live CD ISO : neither "
-                                   "LiveOS/ext3fs.img nor os.img exist" %
-                                   base_on)
-
-            print "Copying file system..."
-            shutil.copyfile(os_image, self._image)
-            self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
-        finally:
-            shutil.rmtree(tmpoutdir, ignore_errors = True)
-            isoloop.cleanup()
-
-    def _mount_instroot(self, base_on = None):
-        LoopImageCreator._mount_instroot(self, base_on)
-        self.__write_initrd_conf(self._instroot + "/etc/sysconfig/mkinitrd")
-
-    def _unmount_instroot(self):
-        try:
-            os.unlink(self._instroot + "/etc/sysconfig/mkinitrd")
-        except:
-            pass
-        LoopImageCreator._unmount_instroot(self)
-
-    def __ensure_isodir(self):
-        if self.__isodir is None:
-            self.__isodir = self._mkdtemp("iso-")
-        return self.__isodir
-
-    def _get_isodir(self):
-        return self.__ensure_isodir()
-
-    def _set_isodir(self, isodir = None):
-        self.__isodir = isodir
-
-    def _create_bootconfig(self):
-        """Configure the image so that it's bootable."""
-        self._configure_bootloader(self.__ensure_isodir())
-
-    def _get_post_scripts_env(self, in_chroot):
-        env = LoopImageCreator._get_post_scripts_env(self, in_chroot)
-
-        if not in_chroot:
-            env["LIVE_ROOT"] = self.__ensure_isodir()
-
-        return env
-
-    def __write_initrd_conf(self, path):
-        content = ""
-        if not os.path.exists(os.path.dirname(path)):
-            makedirs(os.path.dirname(path))
-        f = open(path, "w")
-
-        content += 'LIVEOS="yes"\n'
-        content += 'PROBE="no"\n'
-        content += 'MODULES+="squashfs ext3 ext2 vfat msdos "\n'
-        content += 'MODULES+="sr_mod sd_mod ide-cd cdrom "\n'
-
-        for module in self.__modules:
-            if module == "=usb":
-                content += 'MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n'
-                content += 'MODULES+="usb_storage usbhid "\n'
-            elif module == "=firewire":
-                content += 'MODULES+="firewire-sbp2 firewire-ohci "\n'
-                content += 'MODULES+="sbp2 ohci1394 ieee1394 "\n'
-            elif module == "=mmc":
-                content += 'MODULES+="mmc_block sdhci sdhci-pci "\n'
-            elif module == "=pcmcia":
-                content += 'MODULES+="pata_pcmcia  "\n'
-            else:
-                content += 'MODULES+="' + module + ' "\n'
-        f.write(content)
-        f.close()
-
-    def __create_iso(self, isodir):
-        iso = self._outdir + "/" + self.name + ".iso"
-        genisoimage = find_binary_path("genisoimage")
-        args = [genisoimage,
-                "-J", "-r",
-                "-hide-rr-moved", "-hide-joliet-trans-tbl",
-                "-V", self.fslabel,
-                "-o", iso]
-
-        args.extend(self._get_mkisofs_options(isodir))
-
-        args.append(isodir)
-
-        if subprocess.call(args) != 0:
-            raise CreatorError("ISO creation failed!")
-
-        """ It should be ok still even if you haven't isohybrid """
-        isohybrid = None
-        try:
-            isohybrid = find_binary_path("isohybrid")
-        except:
-            pass
-
-        if isohybrid:
-            args = [isohybrid, "-partok", iso ]
-            if subprocess.call(args) != 0:
-               raise CreatorError("Hybrid ISO creation failed!")
-
-        self.__implant_md5sum(iso)
-
-    def __implant_md5sum(self, iso):
-        """Implant an isomd5sum."""
-        if os.path.exists("/usr/bin/implantisomd5"):
-            implantisomd5 = "/usr/bin/implantisomd5"
-        elif os.path.exists("/usr/lib/anaconda-runtime/implantisomd5"):
-            implantisomd5 = "/usr/lib/anaconda-runtime/implantisomd5"
-        else:
-            logging.warn("isomd5sum not installed; not setting up mediacheck")
-            implantisomd5 = ""
-            return
-
-        subprocess.call([implantisomd5, iso], stdout=sys.stdout, stderr=sys.stderr)
-
-    def _stage_final_image(self):
-        try:
-            makedirs(self.__ensure_isodir() + "/LiveOS")
-
-            minimal_size = self._resparse()
-
-            if not self.skip_minimize:
-                create_image_minimizer(self.__isodir + "/LiveOS/osmin.img",
-                                       self._image, minimal_size)
-
-            if self.skip_compression:
-                shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img")
-            else:
-                makedirs(os.path.join(os.path.dirname(self._image), "LiveOS"))
-                shutil.move(self._image,
-                            os.path.join(os.path.dirname(self._image),
-                                         "LiveOS", "ext3fs.img"))
-                mksquashfs(os.path.dirname(self._image),
-                           self.__isodir + "/LiveOS/squashfs.img")
-
-            self.__create_iso(self.__isodir)
-        finally:
-            shutil.rmtree(self.__isodir, ignore_errors = True)
-            self.__isodir = None
 
index b46c594..b3e4749 100644 (file)
@@ -33,36 +33,31 @@ import rpm
 from micng.utils.errors import *
 from micng.utils.fs_related import *
 from micng.utils import kickstart
-from micng.utils import pkgmanagers
 from micng.utils.rpmmisc import *
 from micng.utils.misc import *
-from BaseImageCreator import ImageCreator
+from baseimager import BaseImageCreator
 
 
-class FsImageCreator(ImageCreator):
-    def __init__(self, ks, name):
-        """Initialize a LoopImageCreator instance.
-
-        This method takes the same arguments as ImageCreator.__init__()
-        """
-        ImageCreator.__init__(self, ks, name)
-
+class FsImageCreator(BaseImageCreator):
+    def __init__(self, cfgmgr = None, pkgmgr = None):
+        BaseImageCreator.__init__(self, cfgmgr, pkgmgr)
         self._fstype = None
         self._fsopts = None
 
     def _stage_final_image(self):
-        """ nothing to do """
+        """ nothing to do"""
         pass
 
     def package(self, destdir = "."):
         self._stage_final_image()
 
+        if not os.path.exists(destdir):
+            mkdirs(destdir)
         destdir = os.path.abspath(os.path.expanduser(destdir))
         if self._recording_pkgs:
             self._save_recording_pkgs(destdir)
 
-        print "Copying %s to %s, please be patient to wait (it is slow if they are on different file systems/partitons/disks)" \
-               % (self._instroot, destdir + "/" + self.name)
+        logging.info("Copying %s to %s, please be patient to wait" % (self._instroot, destdir + "/" + self.name))
 
         copycmd = find_binary_path("cp")
         args = [ copycmd, "-af", self._instroot, destdir + "/" + self.name ]
diff --git a/micng/imager/futhercreator.py b/micng/imager/futhercreator.py
new file mode 100644 (file)
index 0000000..b7b5e59
--- /dev/null
@@ -0,0 +1,899 @@
+#
+# futhercreator.py : ImageCreator and LoopImageCreator base classes
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import stat
+import sys
+import tempfile
+import shutil
+import logging
+import subprocess
+
+import selinux
+import yum
+import rpm
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.utils.misc import *
+from micng.utils import kickstart as kickstart
+
+FSLABEL_MAXLEN = 32
+"""The maximum string length supported for LoopImageCreator.fslabel."""
+
+class ImageCreator(object):
+    """Installs a system to a chroot directory.
+
+    ImageCreator is the simplest creator class available; it will install and
+    configure a system image according to the supplied kickstart file.
+
+    e.g.
+
+      import imgcreate
+      ks = imgcreate.read_kickstart("foo.ks")
+      imgcreate.ImageCreator(ks, "foo").create()
+
+    """
+
+    def __init__(self, cfgmgr = None, pkgmgr = None):
+        """Initialize an ImageCreator instance.
+
+        ks -- a pykickstart.KickstartParser instance; this instance will be
+              used to drive the install by e.g. providing the list of packages
+              to be installed, the system configuration and %post scripts
+
+        name -- a name for the image; used for e.g. image filenames or
+                filesystem labels
+
+        releasever -- Value to substitute for $releasever in repo urls
+
+        tmpdir -- Top level directory to use for temporary files and dirs
+        """
+        if not cfgmgr:
+            return
+
+        self.ks = cfgmgr.create['ks']
+        """A pykickstart.KickstartParser instance."""
+
+        self.name = cfgmgr.create['name']
+        """A name for the image."""
+
+        self.tmpdir = cfgmgr.creaete['tmpdir']
+        """The directory in which all temporary files will be created."""
+        if not os.path.exists(self.tmpdir):
+            makedirs(self.tmpdir)
+
+        self.__builddir = None
+        self.__bindmounts = []
+
+        self.__sanity_check()
+
+    def __del__(self):
+        self.cleanup()
+
+    #
+    # Properties
+    #
+    def __get_instroot(self):
+        if self.__builddir is None:
+            raise CreatorError("_instroot is not valid before calling mount()")
+        return self.__builddir + "/install_root"
+    _instroot = property(__get_instroot)
+    """The location of the install root directory.
+
+    This is the directory into which the system is installed. Subclasses may
+    mount a filesystem image here or copy files to/from here.
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    def __get_outdir(self):
+        if self.__builddir is None:
+            raise CreatorError("_outdir is not valid before calling mount()")
+        return self.__builddir + "/out"
+    _outdir = property(__get_outdir)
+    """The staging location for the final image.
+
+    This is where subclasses should stage any files that are part of the final
+    image. ImageCreator.package() will copy any files found here into the
+    requested destination directory.
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    #
+    # Hooks for subclasses
+    #
+    def _mount_instroot(self, base_on = None):
+        """Mount or prepare the install root directory.
+
+        This is the hook where subclasses may prepare the install root by e.g.
+        mounting creating and loopback mounting a filesystem image to
+        _instroot.
+
+        There is no default implementation.
+
+        base_on -- this is the value passed to mount() and can be interpreted
+                   as the subclass wishes; it might e.g. be the location of
+                   a previously created ISO containing a system image.
+
+        """
+        pass
+
+    def _unmount_instroot(self):
+        """Undo anything performed in _mount_instroot().
+
+        This is the hook where subclasses must undo anything which was done
+        in _mount_instroot(). For example, if a filesystem image was mounted
+        onto _instroot, it should be unmounted here.
+
+        There is no default implementation.
+
+        """
+        pass
+
+    def _create_bootconfig(self):
+        """Configure the image so that it's bootable.
+
+        This is the hook where subclasses may prepare the image for booting by
+        e.g. creating an initramfs and bootloader configuration.
+
+        This hook is called while the install root is still mounted, after the
+        packages have been installed and the kickstart configuration has been
+        applied, but before the %post scripts have been executed.
+
+        There is no default implementation.
+
+        """
+        pass
+
+    def _stage_final_image(self):
+        """Stage the final system image in _outdir.
+
+        This is the hook where subclasses should place the image in _outdir
+        so that package() can copy it to the requested destination directory.
+
+        By default, this moves the install root into _outdir.
+
+        """
+        shutil.move(self._instroot, self._outdir + "/" + self.name)
+
+    def _get_required_packages(self):
+        """Return a list of required packages.
+
+        This is the hook where subclasses may specify a set of packages which
+        it requires to be installed.
+
+        This returns an empty list by default.
+
+        Note, subclasses should usually chain up to the base class
+        implementation of this hook.
+
+        """
+        return []
+
+    def _get_excluded_packages(self):
+        """Return a list of excluded packages.
+
+        This is the hook where subclasses may specify a set of packages which
+        it requires _not_ to be installed.
+
+        This returns an empty list by default.
+
+        Note, subclasses should usually chain up to the base class
+        implementation of this hook.
+
+        """
+        return []
+
+    def _get_fstab(self):
+        """Return the desired contents of /etc/fstab.
+
+        This is the hook where subclasses may specify the contents of
+        /etc/fstab by returning a string containing the desired contents.
+
+        A sensible default implementation is provided.
+
+        """
+        s =  "/dev/root  /         %s    defaults,noatime 0 0\n" %(self._fstype)
+        s += self._get_fstab_special()
+        return s
+
+    def _get_fstab_special(self):
+        s = "devpts     /dev/pts  devpts  gid=5,mode=620   0 0\n"
+        s += "tmpfs      /dev/shm  tmpfs   defaults         0 0\n"
+        s += "proc       /proc     proc    defaults         0 0\n"
+        s += "sysfs      /sys      sysfs   defaults         0 0\n"
+        return s
+
+    def _get_post_scripts_env(self, in_chroot):
+        """Return an environment dict for %post scripts.
+
+        This is the hook where subclasses may specify some environment
+        variables for %post scripts by return a dict containing the desired
+        environment.
+
+        By default, this returns an empty dict.
+
+        in_chroot -- whether this %post script is to be executed chroot()ed
+                     into _instroot.
+
+        """
+        return {}
+
+    def _get_kernel_versions(self):
+        """Return a dict detailing the available kernel types/versions.
+
+        This is the hook where subclasses may override what kernel types and
+        versions should be available for e.g. creating the booloader
+        configuration.
+
+        A dict should be returned mapping the available kernel types to a list
+        of the available versions for those kernels.
+
+        The default implementation uses rpm to iterate over everything
+        providing 'kernel', finds /boot/vmlinuz-* and returns the version
+        obtained from the vmlinuz filename. (This can differ from the kernel
+        RPM's n-v-r in the case of e.g. xen)
+
+        """
+        def get_version(header):
+            version = None
+            for f in header['filenames']:
+                if f.startswith('/boot/vmlinuz-'):
+                    version = f[14:]
+            return version
+
+        ts = rpm.TransactionSet(self._instroot)
+
+        ret = {}
+        for header in ts.dbMatch('provides', 'kernel'):
+            version = get_version(header)
+            if version is None:
+                continue
+
+            name = header['name']
+            if not name in ret:
+                ret[name] = [version]
+            elif not version in ret[name]:
+                ret[name].append(version)
+
+        return ret
+
+    #
+    # Helpers for subclasses
+    #
+    def _do_bindmounts(self):
+        """Mount various system directories onto _instroot.
+
+        This method is called by mount(), but may also be used by subclasses
+        in order to re-mount the bindmounts after modifying the underlying
+        filesystem.
+
+        """
+        for b in self.__bindmounts:
+            b.mount()
+
+    def _undo_bindmounts(self):
+        """Unmount the bind-mounted system directories from _instroot.
+
+        This method is usually only called by unmount(), but may also be used
+        by subclasses in order to gain access to the filesystem obscured by
+        the bindmounts - e.g. in order to create device nodes on the image
+        filesystem.
+
+        """
+        self.__bindmounts.reverse()
+        for b in self.__bindmounts:
+            b.unmount()
+
+    def _chroot(self):
+        """Chroot into the install root.
+
+        This method may be used by subclasses when executing programs inside
+        the install root e.g.
+
+          subprocess.call(["/bin/ls"], preexec_fn = self.chroot)
+
+        """
+        os.chroot(self._instroot)
+        os.chdir("/")
+
+    def _mkdtemp(self, prefix = "tmp-"):
+        """Create a temporary directory.
+
+        This method may be used by subclasses to create a temporary directory
+        for use in building the final image - e.g. a subclass might create
+        a temporary directory in order to bundle a set of files into a package.
+
+        The subclass may delete this directory if it wishes, but it will be
+        automatically deleted by cleanup().
+
+        The absolute path to the temporary directory is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the directory;
+                  defaults to "tmp-".
+
+        """
+        self.__ensure_builddir()
+        return tempfile.mkdtemp(dir = self.__builddir, prefix = prefix)
+
+    def _mkstemp(self, prefix = "tmp-"):
+        """Create a temporary file.
+
+        This method may be used by subclasses to create a temporary file
+        for use in building the final image - e.g. a subclass might need
+        a temporary location to unpack a compressed file.
+
+        The subclass may delete this file if it wishes, but it will be
+        automatically deleted by cleanup().
+
+        A tuple containing a file descriptor (returned from os.open() and the
+        absolute path to the temporary directory is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the file;
+                  defaults to "tmp-".
+
+        """
+        self.__ensure_builddir()
+        return tempfile.mkstemp(dir = self.__builddir, prefix = prefix)
+
+    def _mktemp(self, prefix = "tmp-"):
+        """Create a temporary file.
+
+        This method simply calls _mkstemp() and closes the returned file
+        descriptor.
+
+        The absolute path to the temporary file is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the file;
+                  defaults to "tmp-".
+
+        """
+
+        (f, path) = self._mkstemp(prefix)
+        os.close(f)
+        return path
+
+    #
+    # Actual implementation
+    #
+    def __ensure_builddir(self):
+        if not self.__builddir is None:
+            return
+
+        try:
+            self.__builddir = tempfile.mkdtemp(dir =  os.path.abspath(self.tmpdir),
+                                               prefix = "imgcreate-")
+        except OSError, e:
+            raise CreatorError("Failed create build directory in %s: %s" %
+                               (self.tmpdir, e.strerror))
+
+    def __sanity_check(self):
+        """Ensure that the config we've been given is sane."""
+        if not (kickstart.get_packages(self.ks) or
+                kickstart.get_groups(self.ks)):
+            raise CreatorError("No packages or groups specified")
+
+        kickstart.convert_method_to_repo(self.ks)
+
+        if not kickstart.get_repos(self.ks):
+            raise CreatorError("No repositories specified")
+
+    def __write_fstab(self):
+        fstab = open(self._instroot + "/etc/fstab", "w")
+        fstab.write(self._get_fstab())
+        fstab.close()
+
+    def __create_minimal_dev(self):
+        """Create a minimal /dev so that we don't corrupt the host /dev"""
+        origumask = os.umask(0000)
+        devices = (('null',   1, 3, 0666),
+                   ('urandom',1, 9, 0666),
+                   ('random', 1, 8, 0666),
+                   ('full',   1, 7, 0666),
+                   ('ptmx',   5, 2, 0666),
+                   ('tty',    5, 0, 0666),
+                   ('zero',   1, 5, 0666))
+        links = (("/proc/self/fd", "/dev/fd"),
+                 ("/proc/self/fd/0", "/dev/stdin"),
+                 ("/proc/self/fd/1", "/dev/stdout"),
+                 ("/proc/self/fd/2", "/dev/stderr"))
+
+        for (node, major, minor, perm) in devices:
+            if not os.path.exists(self._instroot + "/dev/" + node):
+                os.mknod(self._instroot + "/dev/" + node, perm | stat.S_IFCHR, os.makedev(major,minor))
+        for (src, dest) in links:
+            if not os.path.exists(self._instroot + dest):
+                os.symlink(src, self._instroot + dest)
+        os.umask(origumask)
+
+    def mount(self, base_on = None, cachedir = None):
+        """Setup the target filesystem in preparation for an install.
+
+        This function sets up the filesystem which the ImageCreator will
+        install into and configure. The ImageCreator class merely creates an
+        install root directory, bind mounts some system directories (e.g. /dev)
+        and writes out /etc/fstab. Other subclasses may also e.g. create a
+        sparse file, format it and loopback mount it to the install root.
+
+        base_on -- a previous install on which to base this install; defaults
+                   to None, causing a new image to be created
+
+        cachedir -- a directory in which to store the Yum cache; defaults to
+                    None, causing a new cache to be created; by setting this
+                    to another directory, the same cache can be reused across
+                    multiple installs.
+
+        """
+        self.__ensure_builddir()
+
+        makedirs(self._instroot)
+        makedirs(self._outdir)
+
+        self._mount_instroot(base_on)
+
+        for d in ("/dev/pts", "/etc", "/boot", "/var/log", "/var/cache/yum", "/sys", "/proc", "/selinux/booleans"):
+            makedirs(self._instroot + d)
+
+        cachesrc = cachedir or (self.__builddir + "/yum-cache")
+        makedirs(cachesrc)
+
+        # bind mount system directories into _instroot
+        for (f, dest) in [("/sys", None), ("/proc", None),
+                          ("/dev/pts", None), ("/dev/shm", None),
+                          (cachesrc, "/var/cache/yum")]:
+            self.__bindmounts.append(BindChrootMount(f, self._instroot, dest))
+
+        self.__create_selinuxfs()
+
+        self._do_bindmounts()
+
+        self.__create_minimal_dev()
+
+        os.symlink("/proc/self/mounts", self._instroot + "/etc/mtab")
+
+        self.__write_fstab()
+
+    def unmount(self):
+        """Unmounts the target filesystem.
+
+        The ImageCreator class detaches the system from the install root, but
+        other subclasses may also detach the loopback mounted filesystem image
+        from the install root.
+
+        """
+        self.__destroy_selinuxfs()
+
+        self._undo_bindmounts()
+
+        self._unmount_instroot()
+
+    def cleanup(self):
+        """Unmounts the target filesystem and deletes temporary files.
+
+        This method calls unmount() and then deletes any temporary files and
+        directories that were created on the host system while building the
+        image.
+
+        Note, make sure to call this method once finished with the creator
+        instance in order to ensure no stale files are left on the host e.g.:
+
+          creator = ImageCreator(ks, name)
+          try:
+              creator.create()
+          finally:
+              creator.cleanup()
+
+        """
+        if not self.__builddir:
+            return
+
+        self.unmount()
+
+        shutil.rmtree(self.__builddir, ignore_errors = True)
+        self.__builddir = None
+
+    def __select_packages(self, ayum):
+        skipped_pkgs = []
+        for pkg in kickstart.get_packages(self.ks,
+                                          self._get_required_packages()):
+            try:
+                ayum.selectPackage(pkg)
+            except yum.Errors.InstallError, e:
+                if kickstart.ignore_missing(self.ks):
+                    skipped_pkgs.append(pkg)
+                else:
+                    raise CreatorError("Failed to find package '%s' : %s" %
+                                       (pkg, e))
+
+        for pkg in skipped_pkgs:
+            logging.warn("Skipping missing package '%s'" % (pkg,))
+
+    def __select_groups(self, ayum):
+        skipped_groups = []
+        for group in kickstart.get_groups(self.ks):
+            try:
+                ayum.selectGroup(group.name, group.include)
+            except (yum.Errors.InstallError, yum.Errors.GroupsError), e:
+                if kickstart.ignore_missing(self.ks):
+                    raise CreatorError("Failed to find group '%s' : %s" %
+                                       (group.name, e))
+                else:
+                    skipped_groups.append(group)
+
+        for group in skipped_groups:
+            logging.warn("Skipping missing group '%s'" % (group.name,))
+
+    def __deselect_packages(self, ayum):
+        for pkg in kickstart.get_excluded(self.ks,
+                                          self._get_excluded_packages()):
+            ayum.deselectPackage(pkg)
+
+    # if the system is running selinux and the kickstart wants it disabled
+    # we need /usr/sbin/lokkit
+    def __can_handle_selinux(self, ayum):
+        file = "/usr/sbin/lokkit"
+        if not kickstart.selinux_enabled(self.ks) and os.path.exists("/selinux/enforce") and not ayum.installHasFile(file):
+            raise CreatorError("Unable to disable SELinux because the installed package set did not include the file %s" % (file))
+
+    def install(self, repo_urls = {}):
+        """Install packages into the install root.
+
+        This function installs the packages listed in the supplied kickstart
+        into the install root. By default, the packages are installed from the
+        repository URLs specified in the kickstart.
+
+        repo_urls -- a dict which maps a repository name to a repository URL;
+                     if supplied, this causes any repository URLs specified in
+                     the kickstart to be overridden.
+
+        """
+        yum_conf = self._mktemp(prefix = "yum.conf-")
+
+        ayum = LiveCDYum(releasever=self.releasever)
+        ayum.setup(yum_conf, self._instroot)
+
+        for repo in kickstart.get_repos(self.ks, repo_urls):
+            (name, baseurl, mirrorlist, proxy, inc, exc) = repo
+
+            yr = ayum.addRepository(name, baseurl, mirrorlist)
+            if inc:
+                yr.includepkgs = inc
+            if exc:
+                yr.exclude = exc
+            if proxy:
+                yr.proxy = proxy
+
+        if kickstart.exclude_docs(self.ks):
+            rpm.addMacro("_excludedocs", "1")
+        if not kickstart.selinux_enabled(self.ks):
+            rpm.addMacro("__file_context_path", "%{nil}")
+        if kickstart.inst_langs(self.ks) != None:
+            rpm.addMacro("_install_langs", kickstart.inst_langs(self.ks))
+
+        try:
+            self.__select_packages(ayum)
+            self.__select_groups(ayum)
+            self.__deselect_packages(ayum)
+
+            self.__can_handle_selinux(ayum)
+
+            ayum.runInstall()
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+        finally:
+            ayum.closeRpmDB()
+            ayum.close()
+            os.unlink(yum_conf)
+
+        # do some clean up to avoid lvm info leakage.  this sucks.
+        for subdir in ("cache", "backup", "archive"):
+            lvmdir = self._instroot + "/etc/lvm/" + subdir
+            try:
+                for f in os.listdir(lvmdir):
+                    os.unlink(lvmdir + "/" + f)
+            except:
+                pass
+
+    def __run_post_scripts(self):
+        for s in kickstart.get_post_scripts(self.ks):
+            (fd, path) = tempfile.mkstemp(prefix = "ks-script-",
+                                          dir = self._instroot + "/tmp")
+
+            os.write(fd, s.script)
+            os.close(fd)
+            os.chmod(path, 0700)
+
+            env = self._get_post_scripts_env(s.inChroot)
+
+            if not s.inChroot:
+                env["INSTALL_ROOT"] = self._instroot
+                preexec = None
+                script = path
+            else:
+                preexec = self._chroot
+                script = "/tmp/" + os.path.basename(path)
+
+            try:
+                subprocess.check_call([s.interp, script],
+                                      preexec_fn = preexec, env = env)
+            except OSError, e:
+                raise CreatorError("Failed to execute %%post script "
+                                   "with '%s' : %s" % (s.interp, e.strerror))
+            except subprocess.CalledProcessError, err:
+                if s.errorOnFail:
+                    raise CreatorError("%%post script failed with code %d "
+                                       % err.returncode)
+                logging.warning("ignoring %%post failure (code %d)"
+                                % err.returncode)
+            finally:
+                os.unlink(path)
+
+    def configure(self):
+        """Configure the system image according to the kickstart.
+
+        This method applies the (e.g. keyboard or network) configuration
+        specified in the kickstart and executes the kickstart %post scripts.
+
+        If neccessary, it also prepares the image to be bootable by e.g.
+        creating an initrd and bootloader configuration.
+
+        """
+        ksh = self.ks.handler
+
+        kickstart.LanguageConfig(self._instroot).apply(ksh.lang)
+        kickstart.KeyboardConfig(self._instroot).apply(ksh.keyboard)
+        kickstart.TimezoneConfig(self._instroot).apply(ksh.timezone)
+        kickstart.AuthConfig(self._instroot).apply(ksh.authconfig)
+        kickstart.FirewallConfig(self._instroot).apply(ksh.firewall)
+        kickstart.RootPasswordConfig(self._instroot).apply(ksh.rootpw)
+        kickstart.ServicesConfig(self._instroot).apply(ksh.services)
+        kickstart.XConfig(self._instroot).apply(ksh.xconfig)
+        kickstart.NetworkConfig(self._instroot).apply(ksh.network)
+        kickstart.RPMMacroConfig(self._instroot).apply(self.ks)
+
+        self._create_bootconfig()
+
+        self.__run_post_scripts()
+        kickstart.SelinuxConfig(self._instroot).apply(ksh.selinux)
+
+    def launch_shell(self):
+        """Launch a shell in the install root.
+
+        This method is launches a bash shell chroot()ed in the install root;
+        this can be useful for debugging.
+
+        """
+        subprocess.call(["/bin/bash"], preexec_fn = self._chroot)
+
+    def package(self, destdir = "."):
+        """Prepares the created image for final delivery.
+
+        In its simplest form, this method merely copies the install root to the
+        supplied destination directory; other subclasses may choose to package
+        the image by e.g. creating a bootable ISO containing the image and
+        bootloader configuration.
+
+        destdir -- the directory into which the final image should be moved;
+                   this defaults to the current directory.
+
+        """
+        self._stage_final_image()
+
+        for f in os.listdir(self._outdir):
+            shutil.move(os.path.join(self._outdir, f),
+                        os.path.join(destdir, f))
+
+    def create(self):
+        """Install, configure and package an image.
+
+        This method is a utility method which creates and image by calling some
+        of the other methods in the following order - mount(), install(),
+        configure(), unmount and package().
+
+        """
+        self.mount()
+        self.install()
+        self.configure()
+        self.unmount()
+        self.package()
+
+class LoopImageCreator(ImageCreator):
+    """Installs a system into a loopback-mountable filesystem image.
+
+    LoopImageCreator is a straightforward ImageCreator subclass; the system
+    is installed into an ext3 filesystem on a sparse file which can be
+    subsequently loopback-mounted.
+
+    """
+
+    def __init__(self, ks, name, fslabel=None, releasever=None, tmpdir="/tmp"):
+        """Initialize a LoopImageCreator instance.
+
+        This method takes the same arguments as ImageCreator.__init__() with
+        the addition of:
+
+        fslabel -- A string used as a label for any filesystems created.
+
+        """
+        ImageCreator.__init__(self, ks, name, releasever=releasever, tmpdir=tmpdir)
+
+        self.__fslabel = None
+        self.fslabel = fslabel
+
+        self.__minsize_KB = 0
+        self.__blocksize = 4096
+        self.__fstype = kickstart.get_image_fstype(self.ks, "ext3")
+
+        self.__instloop = None
+        self.__imgdir = None
+
+        self.__image_size = kickstart.get_image_size(self.ks,
+                                                     4096L * 1024 * 1024)
+
+    #
+    # Properties
+    #
+    def __get_fslabel(self):
+        if self.__fslabel is None:
+            return self.name
+        else:
+            return self.__fslabel
+    def __set_fslabel(self, val):
+        if val is None:
+            self.__fslabel = None
+        else:
+            self.__fslabel = val[:FSLABEL_MAXLEN]
+    fslabel = property(__get_fslabel, __set_fslabel)
+    """A string used to label any filesystems created.
+
+    Some filesystems impose a constraint on the maximum allowed size of the
+    filesystem label. In the case of ext3 it's 16 characters, but in the case
+    of ISO9660 it's 32 characters.
+
+    mke2fs silently truncates the label, but mkisofs aborts if the label is too
+    long. So, for convenience sake, any string assigned to this attribute is
+    silently truncated to FSLABEL_MAXLEN (32) characters.
+
+    """
+
+    def __get_image(self):
+        if self.__imgdir is None:
+            raise CreatorError("_image is not valid before calling mount()")
+        return self.__imgdir + "/ext3fs.img"
+    _image = property(__get_image)
+    """The location of the image file.
+
+    This is the path to the filesystem image. Subclasses may use this path
+    in order to package the image in _stage_final_image().
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    def __get_blocksize(self):
+        return self.__blocksize
+    def __set_blocksize(self, val):
+        if self.__instloop:
+            raise CreatorError("_blocksize must be set before calling mount()")
+        try:
+            self.__blocksize = int(val)
+        except ValueError:
+            raise CreatorError("'%s' is not a valid integer value "
+                               "for _blocksize" % val)
+    _blocksize = property(__get_blocksize, __set_blocksize)
+    """The block size used by the image's filesystem.
+
+    This is the block size used when creating the filesystem image. Subclasses
+    may change this if they wish to use something other than a 4k block size.
+
+    Note, this attribute may only be set before calling mount().
+
+    """
+
+    def __get_fstype(self):
+        return self.__fstype
+    def __set_fstype(self, val):
+        if val not in ("ext2", "ext3", "ext4"):
+            raise CreatorError("Unknown _fstype '%s' supplied" % val)
+        self.__fstype = val
+    _fstype = property(__get_fstype, __set_fstype)
+    """The type of filesystem used for the image.
+
+    This is the filesystem type used when creating the filesystem image.
+    Subclasses may change this if they wish to use something other ext3.
+
+    Note, only ext2, ext3, ext4 are currently supported.
+
+    Note also, this attribute may only be set before calling mount().
+
+    """
+
+    #
+    # Helpers for subclasses
+    #
+    def _resparse(self, size = None):
+        """Rebuild the filesystem image to be as sparse as possible.
+
+        This method should be used by subclasses when staging the final image
+        in order to reduce the actual space taken up by the sparse image file
+        to be as little as possible.
+
+        This is done by resizing the filesystem to the minimal size (thereby
+        eliminating any space taken up by deleted files) and then resizing it
+        back to the supplied size.
+
+        size -- the size in, in bytes, which the filesystem image should be
+                resized to after it has been minimized; this defaults to None,
+                causing the original size specified by the kickstart file to
+                be used (or 4GiB if not specified in the kickstart).
+
+        """
+        return self.__instloop.resparse(size)
+
+    def _base_on(self, base_on):
+        shutil.copyfile(base_on, self._image)
+        
+    #
+    # Actual implementation
+    #
+    def _mount_instroot(self, base_on = None):
+        self.__imgdir = self._mkdtemp()
+
+        if not base_on is None:
+            self._base_on(base_on)
+
+        self.__instloop = ExtDiskMount(SparseLoopbackDisk(self._image,
+                                                          self.__image_size),
+                                       self._instroot,
+                                       self.__fstype,
+                                       self.__blocksize,
+                                       self.fslabel,
+                                       self.tmpdir)
+
+        try:
+            self.__instloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (self._image, e))
+
+    def _unmount_instroot(self):
+        if not self.__instloop is None:
+            self.__instloop.cleanup()
+
+    def _stage_final_image(self):
+        self._resparse()
+        shutil.move(self._image, self._outdir + "/" + self.name + ".img")
index 7fb754d..011d8c6 100644 (file)
@@ -1,5 +1,5 @@
 #
-#live.py : LiveImageCreator class for creating Live CD images
+# livecd.py : LiveCDImageCreator class for creating Live CD images
 #
 # Copyright 2007, Red Hat  Inc.
 #
@@ -17,7 +17,7 @@
 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 
 import os
-import os.path
+import sys
 import glob
 import shutil
 import subprocess
@@ -25,14 +25,333 @@ import logging
 import re
 import time
 
+import micng.utils.kickstart as kickstart 
+import micng.utils.fs_related as fs_related
+import micng.utils.rpmmisc as rpmmisc
+import micng.utils.misc as misc
 from micng.utils.errors import *
-from micng.utils.fs_related import *
-from micng.utils.rpmmisc import *
-from BaseImageCreator import LiveImageCreatorBase
+from loop import LoopImageCreator
 
-class LivecdImageCreator(LiveImageCreatorBase):
+class LiveImageCreatorBase(LoopImageCreator):
+    """A base class for LiveCD image creators.
+
+        This class serves as a base class for the architecture-specific LiveCD
+        image creator subclass, LiveImageCreator.
+    
+        LiveImageCreator creates a bootable ISO containing the system image,
+        bootloader, bootloader configuration, kernel and initramfs.
+    """
+
+    def __init__(self, creatoropts = None, pkgmgr = None):
+        """Initialise a LiveImageCreator instance.
+
+           This method takes the same arguments as ImageCreator.__init__().
+        """
+        LoopImageCreator.__init__(self, creatoropts, pkgmgr)
+
+        #Controls whether to use squashfs to compress the image.
+        self.skip_compression = False 
+
+        #Controls whether an image minimizing snapshot should be created.
+        #
+        #This snapshot can be used when copying the system image from the ISO in
+        #order to minimize the amount of data that needs to be copied; simply,
+        #it makes it possible to create a version of the image's filesystem with
+        #no spare space.
+        self.skip_minimize = False 
+
+        #A flag which indicates i act as a convertor default false
+        if creatoropts.has_key('actasconvertor') and not creatoropts['actasconvertor']:
+            self.actasconvertor = creatoropts['actasconvertor']
+        else:
+            self.actasconvertor = False
+        
+        #The bootloader timeout from kickstart.
+        if self.ks:
+            self._timeout = kickstart.get_timeout(self.ks, 10)
+        else:
+            self._timeout = 10
+
+        #The default kernel type from kickstart.
+        if self.ks:
+            self._default_kernel = kickstart.get_default_kernel(self.ks, "kernel")
+        else:
+            self._default_kernel = None
+
+
+        self.__isodir = None
+
+        self.__modules = ["=ata", "sym53c8xx", "aic7xxx", "=usb", "=firewire", "=mmc", "=pcmcia", "mptsas"]
+        if self.ks:
+            self.__modules.extend(kickstart.get_modules(self.ks))
+
+        self._dep_checks.extend(["isohybrid", "unsquashfs", "mksquashfs", "dd", "genisoimage"])
+
+    #
+    # Hooks for subclasses
+    #
+    def _configure_bootloader(self, isodir):
+        """Create the architecture specific booloader configuration.
+
+            This is the hook where subclasses must create the booloader
+            configuration in order to allow a bootable ISO to be built.
+    
+            isodir -- the directory where the contents of the ISO are to be staged
+        """
+        raise CreatorError("Bootloader configuration is arch-specific, "
+                           "but not implemented for this arch!")
+    def _get_menu_options(self):
+        """Return a menu options string for syslinux configuration.
+        """
+        if self.actasconvertor:
+            return "bootinstall autoinst"
+        r = kickstart.get_menu_args(self.ks)
+        return r
+
+    def _get_kernel_options(self):
+        """Return a kernel options string for bootloader configuration.
+
+            This is the hook where subclasses may specify a set of kernel options
+            which should be included in the images bootloader configuration.
+    
+            A sensible default implementation is provided.
+        """
+        if self.actasconvertor:
+            r = "ro liveimg quiet"
+            if os.path.exists(instroot + "/usr/bin/rhgb"):
+                r += " rhgb"
+            if os.path.exists(instroot + "/usr/bin/plymouth"):
+                r += " rhgb"
+            return r
+        r = kickstart.get_kernel_args(self.ks)
+        if os.path.exists(self._instroot + "/usr/bin/rhgb") or \
+           os.path.exists(self._instroot + "/usr/bin/plymouth"):
+            r += " rhgb"
+        return r
+
+    def _get_mkisofs_options(self, isodir):
+        """Return the architecture specific mkisosfs options.
+
+            This is the hook where subclasses may specify additional arguments to
+            mkisofs, e.g. to enable a bootable ISO to be built.
+    
+            By default, an empty list is returned.
+        """
+        return []
+
+    #
+    # Helpers for subclasses
+    #
+    def _has_checkisomd5(self):
+        """Check whether checkisomd5 is available in the install root."""
+        def exists(instroot, path):
+            return os.path.exists(instroot + path)
+
+        if (exists(self._instroot, "/usr/lib/moblin-installer-runtime/checkisomd5") or
+            exists(self._instroot, "/usr/bin/checkisomd5")):
+            if (os.path.exists("/usr/bin/implantisomd5") or
+               os.path.exists("/usr/lib/anaconda-runtime/implantisomd5")):
+                return True
+
+        return False
+
+    def _uncompress_squashfs(self, squashfsimg, outdir):
+        """Uncompress file system from squshfs image"""
+        unsquashfs = fs_related.find_binary_path("unsquashfs")
+        args = [unsquashfs, "-d", outdir, squashfsimg ]
+        rc = subprocess.call(args)
+        if (rc != 0):
+            raise CreatorError("Failed to uncompress %s." % squashfsimg)
+    #
+    # Actual implementation
+    #
+    def _base_on(self, base_on):
+        """Support Image Convertor"""
+        if self.actasconvertor:
+            if os.path.exists(base_on) and not os.path.isfile(base_on):
+                ddcmd = fs_related.find_binary_path("dd")
+                args = [ ddcmd, "if=%s" % base_on, "of=%s" % self._image ]
+                print "dd %s -> %s" % (base_on, self._image)
+                rc = subprocess.call(args)
+                if rc != 0:
+                    raise CreatorError("Failed to dd from %s to %s" % (base_on, self._image))
+                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+            if os.path.isfile(base_on):
+                print "Copying file system..."
+                shutil.copyfile(base_on, self._image)
+                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+            return
+
+        #helper function to extract ext3 file system from a live CD ISO
+        isoloop = fs_related.DiskMount(fs_related.LoopbackDisk(base_on, 0), self._mkdtemp())
+
+        try:
+            isoloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (base_on, e))
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(isoloop.mountdir + "/squashfs.img"):
+            squashimg = isoloop.mountdir + "/squashfs.img"
+        else:
+            squashimg = isoloop.mountdir + "/LiveOS/squashfs.img"
+
+        tmpoutdir = self._mkdtemp()
+        # unsquashfs requires outdir mustn't exist
+        shutil.rmtree(tmpoutdir, ignore_errors = True)
+        self._uncompress_squashfs(squashimg, tmpoutdir)
+
+        try:
+            # legacy LiveOS filesystem layout support, remove for F9 or F10
+            if os.path.exists(tmpoutdir + "/os.img"):
+                os_image = tmpoutdir + "/os.img"
+            else:
+                os_image = tmpoutdir + "/LiveOS/ext3fs.img"
+
+            if not os.path.exists(os_image):
+                raise CreatorError("'%s' is not a valid live CD ISO : neither "
+                                   "LiveOS/ext3fs.img nor os.img exist" %
+                                   base_on)
+
+            print "Copying file system..."
+            shutil.copyfile(os_image, self._image)
+            self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+        finally:
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            isoloop.cleanup()
+
+    def _mount_instroot(self, base_on = None):
+        LoopImageCreator._mount_instroot(self, base_on)
+        self.__write_initrd_conf(self._instroot + "/etc/sysconfig/mkinitrd")
+
+    def _unmount_instroot(self):
+        try:
+            os.unlink(self._instroot + "/etc/sysconfig/mkinitrd")
+        except:
+            pass
+        LoopImageCreator._unmount_instroot(self)
+
+    def __ensure_isodir(self):
+        if self.__isodir is None:
+            self.__isodir = self._mkdtemp("iso-")
+        return self.__isodir
+
+    def _get_isodir(self):
+        return self.__ensure_isodir()
+
+    def _set_isodir(self, isodir = None):
+        self.__isodir = isodir
+
+    def _create_bootconfig(self):
+        """Configure the image so that it's bootable."""
+        self._configure_bootloader(self.__ensure_isodir())
+
+    def _get_post_scripts_env(self, in_chroot):
+        env = LoopImageCreator._get_post_scripts_env(self, in_chroot)
+
+        if not in_chroot:
+            env["LIVE_ROOT"] = self.__ensure_isodir()
+
+        return env
+
+    def __write_initrd_conf(self, path):
+        content = ""
+        if not os.path.exists(os.path.dirname(path)):
+            fs_related.makedirs(os.path.dirname(path))
+        f = open(path, "w")
+
+        content += 'LIVEOS="yes"\n'
+        content += 'PROBE="no"\n'
+        content += 'MODULES+="squashfs ext3 ext2 vfat msdos "\n'
+        content += 'MODULES+="sr_mod sd_mod ide-cd cdrom "\n'
+
+        for module in self.__modules:
+            if module == "=usb":
+                content += 'MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n'
+                content += 'MODULES+="usb_storage usbhid "\n'
+            elif module == "=firewire":
+                content += 'MODULES+="firewire-sbp2 firewire-ohci "\n'
+                content += 'MODULES+="sbp2 ohci1394 ieee1394 "\n'
+            elif module == "=mmc":
+                content += 'MODULES+="mmc_block sdhci sdhci-pci "\n'
+            elif module == "=pcmcia":
+                content += 'MODULES+="pata_pcmcia  "\n'
+            else:
+                content += 'MODULES+="' + module + ' "\n'
+        f.write(content)
+        f.close()
+
+    def __create_iso(self, isodir):
+        iso = self._outdir + "/" + self.name + ".iso"
+        genisoimage = fs_related.find_binary_path("genisoimage")
+        args = [genisoimage,
+                "-J", "-r",
+                "-hide-rr-moved", "-hide-joliet-trans-tbl",
+                "-V", self.fslabel,
+                "-o", iso]
+
+        args.extend(self._get_mkisofs_options(isodir))
+
+        args.append(isodir)
+
+        if subprocess.call(args) != 0:
+            raise CreatorError("ISO creation failed!")
+
+        """ It should be ok still even if you haven't isohybrid """
+        isohybrid = None
+        try:
+            isohybrid = fs_related.find_binary_path("isohybrid")
+        except:
+            pass
+
+        if isohybrid:
+            args = [isohybrid, "-partok", iso ]
+            if subprocess.call(args) != 0:
+               raise CreatorError("Hybrid ISO creation failed!")
+
+        self.__implant_md5sum(iso)
+
+    def __implant_md5sum(self, iso):
+        """Implant an isomd5sum."""
+        if os.path.exists("/usr/bin/implantisomd5"):
+            implantisomd5 = "/usr/bin/implantisomd5"
+        elif os.path.exists("/usr/lib/anaconda-runtime/implantisomd5"):
+            implantisomd5 = "/usr/lib/anaconda-runtime/implantisomd5"
+        else:
+            logging.warn("isomd5sum not installed; not setting up mediacheck")
+            implantisomd5 = ""
+            return
+
+        subprocess.call([implantisomd5, iso], stdout=sys.stdout, stderr=sys.stderr)
+
+    def _stage_final_image(self):
+        try:
+            fs_related.makedirs(self.__ensure_isodir() + "/LiveOS")
+
+            minimal_size = self._resparse()
+
+            if not self.skip_minimize:
+                fs_related.create_image_minimizer(self.__isodir + "/LiveOS/osmin.img",
+                                       self._image, minimal_size)
+
+            if self.skip_compression:
+                shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img")
+            else:
+                fs_related.makedirs(os.path.join(os.path.dirname(self._image), "LiveOS"))
+                shutil.move(self._image,
+                            os.path.join(os.path.dirname(self._image),
+                                         "LiveOS", "ext3fs.img"))
+                fs_related.mksquashfs(os.path.dirname(self._image),
+                           self.__isodir + "/LiveOS/squashfs.img")
+
+            self.__create_iso(self.__isodir)
+        finally:
+            shutil.rmtree(self.__isodir, ignore_errors = True)
+            self.__isodir = None
+
+class x86LiveImageCreator(LiveImageCreatorBase):
     """ImageCreator for x86 machines"""
     def _get_mkisofs_options(self, isodir):
         return [ "-b", "isolinux/isolinux.bin",
                  "-c", "isolinux/boot.cat",
@@ -40,7 +359,7 @@ class LivecdImageCreator(LiveImageCreatorBase):
                  "-boot-load-size", "4" ]
 
     def _get_required_packages(self):
-        return ["syslinux", "syslinux-extlinux", "moblin-live"] + LiveImageCreatorBase._get_required_packages(self)
+        return ["syslinux", "syslinux-extlinux"] + LiveImageCreatorBase._get_required_packages(self)
 
     def _get_isolinux_stanzas(self, isodir):
         return ""
@@ -168,35 +487,45 @@ menu color cmdline 0 #ffffffff #00000000
             raise CreatorError("Unable to find valid kernels, please check the repo")
 
         kernel_options = self._get_kernel_options()
-        menu_options = self._get_menu_options()
 
+        """ menu can be customized highly, the format is
+
+              short_name1:long_name1:extra_options1;short_name2:long_name2:extra_options2
+
+            for example: autoinst:Installation only:systemd.unit=installer-graphical.service
+            but in order to keep compatible with old format, these are still ok:
+
+              liveinst autoinst
+              liveinst;autoinst
+              liveinst::;autoinst::
+        """
+        oldmenus = {"basic":{"short":"basic", "long":"Installation Only (Text based)", "extra":"basic nosplash 4"},
+                    "liveinst":{"short":"liveinst", "long":"Installation Only", "extra":"liveinst nosplash 4"},
+                    "autoinst":{"short":"autoinst", "long":"Autoinstall (Deletes all existing content)", "extra":"autoinst nosplash 4"},
+                    "netinst":{"short":"netinst", "long":"Network Installation", "extra":"netinst 4"},
+                    "verify":{"short":"check", "long":"Verify and", "extra":"check"}
+                   }
+        menu_options = self._get_menu_options()
+        menus = menu_options.split(";")
+        for i in range(len(menus)):
+            menus[i] = menus[i].split(":")
+        if len(menus) == 1 and len(menus[0]) == 1:
+            """ Keep compatible with the old usage way """
+            menus = menu_options.split()
+            for i in range(len(menus)):
+                menus[i] = [menus[i]]
 
         cfg = ""
 
         default_version = None
         default_index = None
         index = "0"
+        netinst = None
         for version in versions:
             is_xen = self.__copy_kernel_and_initramfs(isodir, version, index)
 
             default = self.__is_default_kernel(kernel, kernels)
-            liveinst = False
-            autoliveinst = False
-            netinst = False
-            checkisomd5 = False
-            basicinst = False
-            
-            if menu_options.find("bootinstall") >= 0:
-                liveinst = True
             
-            if menu_options.find("autoinst") >= 0:
-                autoliveinst = True
-                
-            if menu_options.find("verify") >= 0 and self._has_checkisomd5():
-                checkisomd5 = True 
-                               
-            if menu_options.find("netinst") >= 0:
-                netinst = True 
                 
             if default:
                 long = "Boot %s" % self.distro_name
@@ -204,6 +533,7 @@ menu color cmdline 0 #ffffffff #00000000
                 long = "Boot %s(%s)" % (self.name, kernel[7:])
             else:
                 long = "Boot %s(%s)" % (self.name, kernel)
+            oldmenus["verify"]["long"] = "%s %s" % (oldmenus["verify"]["long"], long)
 
             cfg += self.__get_image_stanza(is_xen,
                                            fslabel = self.fslabel,
@@ -217,39 +547,36 @@ menu color cmdline 0 #ffffffff #00000000
                 cfg += "menu default\n"
                 default_version = version
                 default_index = index
-            if basicinst:
-                cfg += self.__get_image_stanza(is_xen,
-                                               fslabel = self.fslabel,
-                                               liveargs = kernel_options,
-                                               long = "Installation Only (Text based)",
-                                               short = "basic" + index,
-                                               extra = "basic nosplash 4",
-                                               index = index)
-                
-            if liveinst:
-                cfg += self.__get_image_stanza(is_xen,
-                                               fslabel = self.fslabel,
-                                               liveargs = kernel_options,
-                                               long = "Installation Only",
-                                               short = "liveinst" + index,
-                                               extra = "liveinst nosplash 4",
-                                               index = index)
-            if autoliveinst:
-                cfg += self.__get_image_stanza(is_xen,
-                                               fslabel = self.fslabel,
-                                               liveargs = kernel_options,
-                                               long = "Autoinstall (Deletes all existing content)",
-                                               short = "autoinst" + index,
-                                               extra = "autoinst nosplash 4",
-                                               index = index)
 
-            if checkisomd5:
+            for menu in menus:
+                if not menu[0]:
+                    continue
+                short = menu[0] + index
+
+                if len(menu) >= 2:
+                    long = menu[1]
+                else:
+                    if menu[0] in oldmenus.keys():
+                        if menu[0] == "verify" and not self._has_checkisomd5():
+                            continue
+                        if menu[0] == "netinst":
+                            netinst = oldmenus[menu[0]]
+                            continue
+                        long = oldmenus[menu[0]]["long"]
+                        extra = oldmenus[menu[0]]["extra"]
+                    else:
+                        long = short.upper() + " X" + index
+                        extra = ""
+
+                if len(menu) >= 3:
+                    extra = menu[2]
+
                 cfg += self.__get_image_stanza(is_xen,
                                                fslabel = self.fslabel,
                                                liveargs = kernel_options,
-                                               long = "Verify and " + long,
-                                               short = "check" + index,
-                                               extra = "check",
+                                               long = long,
+                                               short = short,
+                                               extra = extra,
                                                index = index)
 
             index = str(int(index) + 1)
@@ -259,14 +586,13 @@ menu color cmdline 0 #ffffffff #00000000
         if not default_index:
             default_index = "0"
 
-        
         if netinst:
             cfg += self.__get_image_stanza(is_xen,
                                            fslabel = self.fslabel,
                                            liveargs = kernel_options,
-                                           long = "Network Installation",
-                                           short = "netinst",
-                                           extra = "netinst 4",
+                                           long = netinst["long"],
+                                           short = netinst["short"],
+                                           extra = netinst["extra"],
                                            index = default_index)
 
         return cfg
@@ -291,7 +617,7 @@ menu color cmdline 0 #ffffffff #00000000
 
     def _configure_syslinux_bootloader(self, isodir):
         """configure the boot loader"""
-        makedirs(isodir + "/isolinux")
+        fs_related.makedirs(isodir + "/isolinux")
 
         menu = self.__find_syslinux_menu()
 
@@ -370,7 +696,7 @@ hiddenmenu
 
     def _configure_efi_bootloader(self, isodir):
         """Set up the configuration for an EFI bootloader"""
-        makedirs(isodir + "/EFI/boot")
+        fs_related.makedirs(isodir + "/EFI/boot")
 
         if not self.__copy_efi_files(isodir):
             shutil.rmtree(isodir + "/EFI")
@@ -390,13 +716,13 @@ hiddenmenu
         cfgf.close()
 
         # first gen mactel machines get the bootloader name wrong apparently
-        if getBaseArch() == "i386":
+        if rpmmisc.getBaseArch() == "i386":
             os.link(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot.efi")
             os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot.conf")
 
         # for most things, we want them named boot$efiarch
         efiarch = {"i386": "ia32", "x86_64": "x64"}
-        efiname = efiarch[getBaseArch()]
+        efiname = efiarch[rpmmisc.getBaseArch()]
         os.rename(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot%s.efi" %(efiname,))
         os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot%s.conf" %(efiname,))
 
@@ -405,3 +731,194 @@ hiddenmenu
         self._configure_syslinux_bootloader(isodir)
         self._configure_efi_bootloader(isodir)
 
+class ppcLiveImageCreator(LiveImageCreatorBase):
+    def _get_mkisofs_options(self, isodir):
+        return [ "-hfs", "-nodesktop", "-part"
+                 "-map", isodir + "/ppc/mapping",
+                 "-hfs-bless", isodir + "/ppc/mac",
+                 "-hfs-volid", self.fslabel ]
+
+    def _get_required_packages(self):
+        return ["yaboot"] + \
+               LiveImageCreatorBase._get_required_packages(self)
+
+    def _get_excluded_packages(self):
+        # kind of hacky, but exclude memtest86+ on ppc so it can stay in cfg
+        return ["memtest86+"] + \
+               LiveImageCreatorBase._get_excluded_packages(self)
+
+    def __copy_boot_file(self, destdir, file):
+        for dir in ["/usr/share/ppc64-utils",
+                    "/usr/lib/moblin-installer-runtime/boot"]:
+            path = self._instroot + dir + "/" + file
+            if not os.path.exists(path):
+                continue
+
+            fs_related.makedirs(destdir)
+            shutil.copy(path, destdir)
+            return
+
+        raise CreatorError("Unable to find boot file " + file)
+
+    def __kernel_bits(self, kernel):
+        testpath = (self._instroot + "/lib/modules/" +
+                    kernel + "/kernel/arch/powerpc/platforms")
+
+        if not os.path.exists(testpath):
+            return { "32" : True, "64" : False }
+        else:
+            return { "32" : False, "64" : True }
+
+    def __copy_kernel_and_initramfs(self, destdir, version):
+        bootdir = self._instroot + "/boot"
+
+        fs_related.makedirs(destdir)
+
+        shutil.copyfile(bootdir + "/vmlinuz-" + version,
+                        destdir + "/vmlinuz")
+
+        shutil.copyfile(bootdir + "/initrd-" + version + ".img",
+                        destdir + "/initrd.img")
+
+    def __get_basic_yaboot_config(self, **args):
+        return """
+init-message = "Welcome to %(distroname)s!"
+timeout=%(timeout)d
+""" % args
+
+    def __get_image_stanza(self, **args):
+        return """
+
+image=/ppc/ppc%(bit)s/vmlinuz
+  label=%(short)s
+  initrd=/ppc/ppc%(bit)s/initrd.img
+  read-only
+  append="root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s"
+""" % args
+
+
+    def __write_yaboot_config(isodir, bit):
+        cfg = self.__get_basic_yaboot_config(name = self.name,
+                                             timeout = self._timeout * 100,
+                                             distroname = self.distro_name)
+
+        kernel_options = self._get_kernel_options()
+
+        cfg += self.__get_image_stanza(fslabel = self.fslabel,
+                                       short = "linux",
+                                       long = "Run from image",
+                                       extra = "",
+                                       bit = bit,
+                                       liveargs = kernel_options)
+
+        if self._has_checkisomd5():
+            cfg += self.__get_image_stanza(fslabel = self.fslabel,
+                                           short = "check",
+                                           long = "Verify and run from image",
+                                           extra = "check",
+                                           bit = bit,
+                                           liveargs = kernel_options)
+
+        f = open(isodir + "/ppc/ppc" + bit + "/yaboot.conf", "w")
+        f.write(cfg)
+        f.close()
+
+    def __write_not_supported(isodir, bit):
+        fs_related.makedirs(isodir + "/ppc/ppc" + bit)
+
+        message = "Sorry, this LiveCD does not support your hardware"
+
+        f = open(isodir + "/ppc/ppc" + bit + "/yaboot.conf", "w")
+        f.write('init-message = "' + message + '"')
+        f.close()
+
+
+    def __write_dualbits_yaboot_config(isodir, **args):
+        cfg = """
+init-message = "\nWelcome to %(name)s!\nUse 'linux32' for 32-bit kernel.\n\n"
+timeout=%(timeout)d
+default=linux
+
+image=/ppc/ppc64/vmlinuz
+       label=linux64
+       alias=linux
+       initrd=/ppc/ppc64/initrd.img
+       read-only
+
+image=/ppc/ppc32/vmlinuz
+       label=linux32
+       initrd=/ppc/ppc32/initrd.img
+       read-only
+""" % args
+
+        f = open(isodir + "/etc/yaboot.conf", "w")
+        f.write(cfg)
+        f.close()
+
+    def _configure_bootloader(self, isodir):
+        """configure the boot loader"""
+        havekernel = { 32: False, 64: False }
+
+        self.__copy_boot_file("mapping", isodir + "/ppc")
+        self.__copy_boot_file("bootinfo.txt", isodir + "/ppc")
+        self.__copy_boot_file("ofboot.b", isodir + "/ppc/mac")
+
+        shutil.copyfile(self._instroot + "/usr/lib/yaboot/yaboot",
+                        isodir + "/ppc/mac/yaboot")
+
+        fs_related.makedirs(isodir + "/ppc/chrp")
+        shutil.copyfile(self._instroot + "/usr/lib/yaboot/yaboot",
+                        isodir + "/ppc/chrp/yaboot")
+
+        subprocess.call(["/usr/sbin/addnote", isodir + "/ppc/chrp/yaboot"])
+
+        #
+        # FIXME: ppc should support multiple kernels too...
+        #
+        kernel = self._get_kernel_versions().values()[0][0]
+
+        kernel_bits = self.__kernel_bits(kernel)
+
+        for (bit, present) in kernel_bits.items():
+            if not present:
+                self.__write_not_supported(isodir, bit)
+                continue
+
+            self.__copy_kernel_and_initramfs(isodir + "/ppc/ppc" + bit, kernel)
+            self.__write_yaboot_config(isodir, bit)
+
+        fs_related.makedirs(isodir + "/etc")
+        if kernel_bits["32"] and not kernel_bits["64"]:
+            shutil.copyfile(isodir + "/ppc/ppc32/yaboot.conf",
+                            isodir + "/etc/yaboot.conf")
+        elif kernel_bits["64"] and not kernel_bits["32"]:
+            shutil.copyfile(isodir + "/ppc/ppc64/yaboot.conf",
+                            isodir + "/etc/yaboot.conf")
+        else:
+            self.__write_dualbits_yaboot_config(isodir,
+                                                name = self.name,
+                                                timeout = self._timeout * 100)
+
+        #
+        # FIXME: build 'netboot' images with kernel+initrd, like mk-images.ppc
+        #
+
+class ppc64LiveImageCreator(ppcLiveImageCreator):
+    def _get_excluded_packages(self):
+        # FIXME:
+        #   while kernel.ppc and kernel.ppc64 co-exist,
+        #   we can't have both
+        return ["kernel.ppc"] + \
+               ppcLiveImageCreator._get_excluded_packages(self)
+
+arch = rpmmisc.getBaseArch()
+if arch in ("i386", "x86_64"):
+    LiveCDImageCreator = x86LiveImageCreator
+elif arch in ("ppc",):
+    LiveCDImageCreator = ppcLiveImageCreator
+elif arch in ("ppc64",):
+    LiveCDImageCreator = ppc64LiveImageCreator
+elif arch.startswith("arm"):
+    LiveCDImageCreator = LiveImageCreatorBase
+else:
+    raise CreatorError("Architecture not supported!")
diff --git a/micng/imager/liveusb.py b/micng/imager/liveusb.py
new file mode 100644 (file)
index 0000000..3734ff4
--- /dev/null
@@ -0,0 +1,325 @@
+#
+# liveusb.py : LiveUSBImageCreator class for creating Live USB images
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+import os
+import os.path
+import glob
+import shutil
+import subprocess
+import logging
+import re
+import time
+
+import micng.utils.fs_related as fs_related
+import micng.utils.misc as misc
+from livecd import LiveCDImageCreator
+from micng.utils.errors import *
+from micng.utils.partitionedfs import PartitionedMount
+
+class LiveUSBImageCreator(LiveCDImageCreator):
+    def __init__(self, *args):
+        LiveCDImageCreator.__init__(self, *args)
+
+        self._dep_checks.extend(["kpartx", "parted"])
+        # remove dependency of genisoimage in parent class
+        if "genisoimage" in self._dep_checks:
+            self._dep_checks.remove("genisoimage")
+
+    def _create_usbimg(self, isodir):
+        overlaysizemb = 64 #default
+        #skipcompress = self.skip_compression?
+        fstype = "vfat"
+        homesizemb=0
+        swapsizemb=0
+        homefile="home.img"
+        plussize=128
+        kernelargs=None
+
+        if overlaysizemb > 2047 and fstype == "vfat":
+            raise CreatorError("Can't have an overlay of 2048MB or greater on VFAT")
+        if homesizemb > 2047 and fstype == "vfat":
+            raise CreatorError("Can't have an home overlay of 2048MB or greater on VFAT")
+        if swapsizemb > 2047 and fstype == "vfat":
+            raise CreatorError("Can't have an swap overlay of 2048MB or greater on VFAT")
+
+        livesize = misc.get_file_size(isodir + "/LiveOS")
+        mountcmd = fs_related.find_binary_path("mount")
+        umountcmd = fs_related.find_binary_path("umount")
+        ddcmd = fs_related.find_binary_path("dd")
+        #if skipcompress:
+        #    tmpmnt = self._mkdtemp("squashfs-mnt")
+        #    rc = subprocess.call([mountcmd, "-o", "loop", isodir + "/LiveOS/squashfs.img", tmpmnt]);
+        #    if rc:
+        #        raise CreatorError("Can't mount %s" % (isodir + "/LiveOS/squashfs.img"))
+        #    livesize = misc.get_file_size(tmpmnt + "/LiveOS/ext3fs.img")
+        #    rc = subprocess.call([umountcmd, tmpmnt]);
+        #    if rc:
+        #        raise CreatorError("Can't umount %s" % (tmpmnt))
+        usbimgsize = (overlaysizemb + homesizemb + swapsizemb + livesize + plussize) * 1024L * 1024L
+        disk = fs_related.SparseLoopbackDisk("%s/%s.usbimg" % (self._outdir, self.name), usbimgsize)
+        usbmnt = self._mkdtemp("usb-mnt")
+        usbloop = PartitionedMount({'/dev/sdb':disk}, usbmnt)
+
+        usbloop.add_partition(usbimgsize/1024/1024, "/dev/sdb", "/", fstype, boot=True)
+
+        try:
+            usbloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed mount disks : %s" % e)
+
+        try:
+            fs_related.makedirs(usbmnt + "/LiveOS")
+            #if skipcompress:
+            #    if os.path.exists(isodir + "/LiveOS/squashfs.img"):
+            #        rc = subprocess.call([mountcmd, "-o", "loop", isodir + "/LiveOS/squashfs.img", tmpmnt]);
+            #        if rc:
+            #            raise CreatorError("Can't mount %s" % (isodir + "/LiveOS/squashfs.img"))
+            #        shutil.copyfile(tmpmnt + "/LiveOS/ext3fs.img", usbmnt + "/LiveOS/ext3fs.img")
+            #        rc = subprocess.call([umountcmd, tmpmnt]);
+            #        if rc:
+            #            raise CreatorError("Can't umount %s" % (tmpmnt))
+            #    else:
+            #        shutil.copyfile(isodir + "/LiveOS/ext3fs.img", usbmnt + "/LiveOS/ext3fs.img")
+            #else:
+            if os.path.exists(isodir + "/LiveOS/squashfs.img"):
+                shutil.copyfile(isodir + "/LiveOS/squashfs.img", usbmnt + "/LiveOS/squashfs.img")
+            else:
+                fs_related.mksquashfs(os.path.dirname(self._image), usbmnt + "/LiveOS/squashfs.img")
+
+            if os.path.exists(isodir + "/LiveOS/osmin.img"):
+                shutil.copyfile(isodir + "/LiveOS/osmin.img", usbmnt + "/LiveOS/osmin.img")
+
+            if fstype == "vfat" or fstype == "msdos":
+                uuid = usbloop.partitions[0]['mount'].uuid
+                label = usbloop.partitions[0]['mount'].fslabel
+                usblabel = "UUID=%s-%s" % (uuid[0:4], uuid[4:8])
+                overlaysuffix = "-%s-%s-%s" % (label, uuid[0:4], uuid[4:8])
+            else:
+                diskmount = usbloop.partitions[0]['mount']
+                usblabel = "UUID=%s" % diskmount.uuid
+                overlaysuffix = "-%s-%s" % (diskmount.fslabel, diskmount.uuid)
+
+            copycmd = fs_related.find_binary_path("cp")
+            args = [copycmd, "-Rf", isodir + "/isolinux", usbmnt + "/syslinux"]
+            rc = subprocess.call(args)
+            if rc:
+                raise CreatorError("Can't copy isolinux directory %s" % (isodir + "/isolinux/*"))
+
+            if os.path.isfile("/usr/share/syslinux/isolinux.bin"):
+                syslinux_path = "/usr/share/syslinux"
+            elif  os.path.isfile("/usr/lib/syslinux/isolinux.bin"):
+                syslinux_path = "/usr/lib/syslinux"
+            else:
+                raise CreatorError("syslinux not installed : "
+                               "cannot find syslinux installation path")
+
+            for f in ("isolinux.bin", "vesamenu.c32"):
+                path = os.path.join(syslinux_path, f)
+                if os.path.isfile(path):
+                    args = [copycmd, path, usbmnt + "/syslinux/"]
+                    rc = subprocess.call(args)
+                    if rc:
+                        raise CreatorError("Can't copy syslinux file %s" % (path))
+                else:
+                    raise CreatorError("syslinux not installed : "
+                               "syslinux file %s not found" % path)
+
+            fd = open(isodir + "/isolinux/isolinux.cfg", "r")
+            text = fd.read()
+            fd.close()
+            pattern = re.compile('CDLABEL=[^ ]*')
+            text = pattern.sub(usblabel, text)
+            pattern = re.compile('rootfstype=[^ ]*')
+            text = pattern.sub("rootfstype=" + fstype, text)
+            if kernelargs:
+                text = text.replace("liveimg", "liveimg " + kernelargs)
+
+            if overlaysizemb > 0:
+                print "Initializing persistent overlay file"
+                overfile = "overlay" + overlaysuffix
+                if fstype == "vfat":
+                    args = [ddcmd, "if=/dev/zero", "of=" + usbmnt + "/LiveOS/" + overfile, "count=%d" % overlaysizemb, "bs=1M"]
+                else:
+                    args = [ddcmd, "if=/dev/null", "of=" + usbmnt + "/LiveOS/" + overfile, "count=1", "bs=1M", "seek=%d" % overlaysizemb]
+                rc = subprocess.call(args)
+                if rc:
+                    raise CreatorError("Can't create overlay file")
+                text = text.replace("liveimg", "liveimg overlay=" + usblabel)
+                text = text.replace(" ro ", " rw ")
+
+            if swapsizemb > 0:
+                print "Initializing swap file"
+                swapfile = usbmnt + "/LiveOS/" + "swap.img"
+                args = [ddcmd, "if=/dev/zero", "of=" + swapfile, "count=%d" % swapsizemb, "bs=1M"]
+                rc = subprocess.call(args)
+                if rc:
+                    raise CreatorError("Can't create swap file")
+                args = ["mkswap", "-f", swapfile]
+                rc = subprocess.call(args)
+                if rc:
+                    raise CreatorError("Can't mkswap on swap file")
+
+            if homesizemb > 0:
+                print "Initializing persistent /home"
+                homefile = usbmnt + "/LiveOS/" + homefile
+                if fstype == "vfat":
+                    args = [ddcmd, "if=/dev/zero", "of=" + homefile, "count=%d" % homesizemb, "bs=1M"]
+                else:
+                    args = [ddcmd, "if=/dev/null", "of=" + homefile, "count=1", "bs=1M", "seek=%d" % homesizemb]
+                rc = subprocess.call(args)
+                if rc:
+                    raise CreatorError("Can't create home file")
+
+                mkfscmd = fs_related.find_binary_path("/sbin/mkfs." + fstype)
+                if fstype == "ext2" or fstype == "ext3":
+                    args = [mkfscmd, "-F", "-j", homefile]
+                else:
+                    args = [mkfscmd, homefile]
+                rc = subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr)
+                if rc:
+                    raise CreatorError("Can't mke2fs home file")
+                if fstype == "ext2" or fstype == "ext3":
+                    tune2fs = fs_related.find_binary_path("tune2fs")
+                    args = [tune2fs, "-c0", "-i0", "-ouser_xattr,acl", homefile]
+                    rc = subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr)
+                    if rc:
+                         raise CreatorError("Can't tune2fs home file")
+
+            if fstype == "vfat" or fstype == "msdos":
+                syslinuxcmd = fs_related.find_binary_path("syslinux")
+                syslinuxcfg = usbmnt + "/syslinux/syslinux.cfg"
+                args = [syslinuxcmd, "-d", "syslinux", usbloop.partitions[0]["device"]]
+            elif fstype == "ext2" or fstype == "ext3":
+                extlinuxcmd = fs_related.find_binary_path("extlinux")
+                syslinuxcfg = usbmnt + "/syslinux/extlinux.conf"
+                args = [extlinuxcmd, "-i", usbmnt + "/syslinux"]
+            else:
+                raise CreatorError("Invalid file system type: %s" % (fstype))
+
+            os.unlink(usbmnt + "/syslinux/isolinux.cfg")
+            fd = open(syslinuxcfg, "w")
+            fd.write(text)
+            fd.close()
+            rc = subprocess.call(args)
+            if rc:
+                raise CreatorError("Can't install boot loader.")
+
+        finally:
+            usbloop.unmount()
+            usbloop.cleanup()
+
+        #Need to do this after image is unmounted and device mapper is closed
+        print "set MBR"
+        mbrfile = "/usr/lib/syslinux/mbr.bin"
+        if not os.path.exists(mbrfile):
+            mbrfile = "/usr/share/syslinux/mbr.bin"
+            if not os.path.exists(mbrfile):
+                raise CreatorError("mbr.bin file didn't exist.")
+        mbrsize = os.path.getsize(mbrfile)
+        outimg = "%s/%s.usbimg" % (self._outdir, self.name)
+        args = [ddcmd, "if=" + mbrfile, "of=" + outimg, "seek=0", "conv=notrunc", "bs=1", "count=%d" % (mbrsize)]
+        rc = subprocess.call(args)
+        if rc:
+            raise CreatorError("Can't set MBR.")
+
+    def _stage_final_image(self):
+        try:
+            isodir = self._get_isodir()
+            fs_related.makedirs(isodir + "/LiveOS")
+
+            minimal_size = self._resparse()
+
+            if not self.skip_minimize:
+                fs_related.create_image_minimizer(isodir + "/LiveOS/osmin.img",
+                                       self._image, minimal_size)
+
+            if self.skip_compression:
+                shutil.move(self._image, isodir + "/LiveOS/ext3fs.img")
+            else:
+                fs_related.makedirs(os.path.join(os.path.dirname(self._image), "LiveOS"))
+                shutil.move(self._image,
+                            os.path.join(os.path.dirname(self._image),
+                                         "LiveOS", "ext3fs.img"))
+                fs_related.mksquashfs(os.path.dirname(self._image),
+                           isodir + "/LiveOS/squashfs.img")
+
+                self._create_usbimg(isodir)
+
+        finally:
+            shutil.rmtree(isodir, ignore_errors = True)
+            self._set_isodir(None)
+
+    def _base_on(self, base_on):
+        """Support Image Convertor"""
+        if self.actasconvertor:
+            if os.path.exists(base_on) and not os.path.isfile(base_on):
+                ddcmd = fs_related.find_binary_path("dd")
+                args = [ ddcmd, "if=%s" % base_on, "of=%s" % self._image ]
+                print "dd %s -> %s" % (base_on, self._image)
+                rc = subprocess.call(args)
+                if rc != 0:
+                    raise CreatorError("Failed to dd from %s to %s" % (base_on, self._image))
+                self._set_image_size(misc.get_file_size(self._image) * 1024L * 1024L)
+            if os.path.isfile(base_on):
+                print "Copying file system..."
+                shutil.copyfile(base_on, self._image)
+                self._set_image_size(misc.get_file_size(self._image) * 1024L * 1024L)
+            return
+
+        #helper function to extract ext3 file system from a live usb image
+        usbimgsize = misc.get_file_size(base_on) * 1024L * 1024L
+        disk = fs_related.SparseLoopbackDisk(base_on, usbimgsize)
+        usbimgmnt = self._mkdtemp("usbimgmnt-")
+        usbloop = PartitionedMount({'/dev/sdb':disk}, usbimgmnt, skipformat = True)
+        usbimg_fstype = "vfat"
+        usbloop.add_partition(usbimgsize/1024/1024, "/dev/sdb", "/", usbimg_fstype, boot=False)
+        try:
+            usbloop.mount()
+        except MountError, e:
+            usbloop.cleanup()
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (base_on, e))
+
+        #legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(usbimgmnt + "/squashfs.img"):
+            squashimg = usbimgmnt + "/squashfs.img"
+        else:
+            squashimg = usbimgmnt + "/LiveOS/squashfs.img"
+
+        tmpoutdir = self._mkdtemp()
+        #unsquashfs requires outdir mustn't exist
+        shutil.rmtree(tmpoutdir, ignore_errors = True)
+        self._uncompress_squashfs(squashimg, tmpoutdir)
+
+        try:
+            # legacy LiveOS filesystem layout support, remove for F9 or F10
+            if os.path.exists(tmpoutdir + "/os.img"):
+                os_image = tmpoutdir + "/os.img"
+            else:
+                os_image = tmpoutdir + "/LiveOS/ext3fs.img"
+
+            if not os.path.exists(os_image):
+                raise CreatorError("'%s' is not a valid live CD ISO : neither "
+                                   "LiveOS/ext3fs.img nor os.img exist" %
+                                   base_on)
+
+            print "Copying file system..."
+            shutil.copyfile(os_image, self._image)
+            self._set_image_size(misc.get_file_size(self._image) * 1024L * 1024L)
+        finally:
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            usbloop.cleanup()
diff --git a/micng/imager/loop.py b/micng/imager/loop.py
new file mode 100644 (file)
index 0000000..996b169
--- /dev/null
@@ -0,0 +1,224 @@
+#
+# loop.py : LoopImageCreator classes
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import subprocess
+import micng.utils.kickstart as kickstart
+from baseimager import BaseImageCreator
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.utils.misc import *
+
+FSLABEL_MAXLEN = 32
+"""The maximum string length supported for LoopImageCreator.fslabel."""
+
+class LoopImageCreator(BaseImageCreator):
+    """Installs a system into a loopback-mountable filesystem image.
+
+        LoopImageCreator is a straightforward ImageCreator subclass; the system
+        is installed into an ext3 filesystem on a sparse file which can be
+        subsequently loopback-mounted.
+    """
+
+    def __init__(self, creatoropts = None, pkgmgr = None):
+        """Initialize a LoopImageCreator instance.
+
+            This method takes the same arguments as ImageCreator.__init__() with
+            the addition of:
+    
+            fslabel -- A string used as a label for any filesystems created.
+        """
+        BaseImageCreator.__init__(self, creatoropts, pkgmgr)
+
+        import pdb
+        pdb.set_trace()
+        self.__fslabel = None
+        self.fslabel = self.name
+
+        self.__minsize_KB = 0
+        self.__blocksize = 4096
+        if self.ks:
+            self.__fstype = kickstart.get_image_fstype(self.ks, "ext3")
+            self.__fsopts = kickstart.get_image_fsopts(self.ks, "defaults,noatime")
+        else:
+            self.__fstype = None
+            self.__fsopts = None
+
+        self.__instloop = None
+        self.__imgdir = None
+
+        if self.ks:
+            self.__image_size = kickstart.get_image_size(self.ks,
+                                                         4096L * 1024 * 1024)
+        else:
+            self.__image_size = 0
+
+        self._img_name = self.name + ".img"
+
+    def _set_fstype(self, fstype):
+        self.__fstype = fstype
+
+    def _set_image_size(self, imgsize):
+        self.__image_size = imgsize
+
+    #
+    # Properties
+    #
+    def __get_fslabel(self):
+        if self.__fslabel is None:
+            return self.name
+        else:
+            return self.__fslabel
+    def __set_fslabel(self, val):
+        if val is None:
+            self.__fslabel = None
+        else:
+            self.__fslabel = val[:FSLABEL_MAXLEN]
+    #A string used to label any filesystems created.
+    #
+    #Some filesystems impose a constraint on the maximum allowed size of the
+    #filesystem label. In the case of ext3 it's 16 characters, but in the case
+    #of ISO9660 it's 32 characters.
+    #
+    #mke2fs silently truncates the label, but mkisofs aborts if the label is too
+    #long. So, for convenience sake, any string assigned to this attribute is
+    #silently truncated to FSLABEL_MAXLEN (32) characters.
+    
+    fslabel = property(__get_fslabel, __set_fslabel)
+
+
+    def __get_image(self):
+        if self.__imgdir is None:
+            raise CreatorError("_image is not valid before calling mount()")
+        return self.__imgdir + "/meego.img"
+    #The location of the image file.
+    #
+    #This is the path to the filesystem image. Subclasses may use this path
+    #in order to package the image in _stage_final_image().
+    #    
+    #Note, this directory does not exist before ImageCreator.mount() is called.
+    #
+    #Note also, this is a read-only attribute.
+    _image = property(__get_image)
+
+
+    def __get_blocksize(self):
+        return self.__blocksize
+    def __set_blocksize(self, val):
+        if self.__instloop:
+            raise CreatorError("_blocksize must be set before calling mount()")
+        try:
+            self.__blocksize = int(val)
+        except ValueError:
+            raise CreatorError("'%s' is not a valid integer value "
+                               "for _blocksize" % val)
+    #The block size used by the image's filesystem.
+    #
+    #This is the block size used when creating the filesystem image. Subclasses
+    #may change this if they wish to use something other than a 4k block size.
+    #
+    #Note, this attribute may only be set before calling mount().
+    _blocksize = property(__get_blocksize, __set_blocksize)
+
+
+    def __get_fstype(self):
+        return self.__fstype
+    def __set_fstype(self, val):
+        if val != "ext2" and val != "ext3":
+            raise CreatorError("Unknown _fstype '%s' supplied" % val)
+        self.__fstype = val
+    #The type of filesystem used for the image.
+    #
+    #This is the filesystem type used when creating the filesystem image.
+    #Subclasses may change this if they wish to use something other ext3.
+    #
+    #Note, only ext2 and ext3 are currently supported.
+    #
+    #Note also, this attribute may only be set before calling mount(). 
+    _fstype = property(__get_fstype, __set_fstype)
+
+
+    def __get_fsopts(self):
+        return self.__fsopts
+    def __set_fsopts(self, val):
+        self.__fsopts = val
+    #Mount options of filesystem used for the image.
+    #
+    #This can be specified by --fsoptions="xxx,yyy" in part command in
+    #kickstart file.
+    _fsopts = property(__get_fsopts, __set_fsopts)
+
+
+    #
+    # Helpers for subclasses
+    #
+    def _resparse(self, size = None):
+        """Rebuild the filesystem image to be as sparse as possible.
+
+            This method should be used by subclasses when staging the final image
+            in order to reduce the actual space taken up by the sparse image file
+            to be as little as possible.
+    
+            This is done by resizing the filesystem to the minimal size (thereby
+            eliminating any space taken up by deleted files) and then resizing it
+            back to the supplied size.
+    
+            size -- the size in, in bytes, which the filesystem image should be
+                    resized to after it has been minimized; this defaults to None,
+                    causing the original size specified by the kickstart file to
+                    be used (or 4GiB if not specified in the kickstart).
+        """
+        return self.__instloop.resparse(size)
+
+    def _base_on(self, base_on):
+        shutil.copyfile(base_on, self._image)
+
+    #
+    # Actual implementation
+    #
+    def _mount_instroot(self, base_on = None):
+        self.__imgdir = self._mkdtemp()
+
+        if not base_on is None:
+            self._base_on(base_on)
+
+        if self.__fstype in ("ext2", "ext3", "ext4"):
+            MyDiskMount = ExtDiskMount
+        elif self.__fstype == "btrfs":
+            MyDiskMount = BtrfsDiskMount
+
+        self.__instloop = MyDiskMount(SparseLoopbackDisk(self._image, self.__image_size),
+                                       self._instroot,
+                                       self.__fstype,
+                                       self.__blocksize,
+                                       self.fslabel)
+
+        try:
+            self.__instloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (self._image, e))
+
+    def _unmount_instroot(self):
+        if not self.__instloop is None:
+            self.__instloop.cleanup()
+
+    def _stage_final_image(self):
+        self._resparse()
+        shutil.move(self._image, self._outdir + "/" + self._img_name)
diff --git a/micng/imager/raw.py b/micng/imager/raw.py
new file mode 100644 (file)
index 0000000..8b56804
--- /dev/null
@@ -0,0 +1,442 @@
+#
+# raw.py: RawImageCreator class
+#
+# Copyright 2007-2008, Red Hat  Inc.
+# Copyright 2008, Daniel P. Berrange
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import stat
+import glob
+import shutil
+import zipfile
+import tarfile
+import subprocess
+import logging
+
+import micng.utils.kickstart as kickstart 
+import micng.utils.fs_related as fs_related
+import urlgrabber.progress as progress
+from baseimager import BaseImageCreator
+from micng.utils.partitionedfs import PartitionedMount
+from micng.utils.errors import *
+
+
+class RawImageCreator(BaseImageCreator):
+    """Installs a system into a file containing a partitioned disk image.
+
+        ApplianceImageCreator is an advanced ImageCreator subclass; a sparse file
+        is formatted with a partition table, each partition loopback mounted
+        and the system installed into an virtual disk. The disk image can
+        subsequently be booted in a virtual machine or accessed with kpartx
+    """
+
+    def __init__(self, *args):
+        """Initialize a ApplianceImageCreator instance.
+
+            This method takes the same arguments as ImageCreator.__init__()
+        """
+        BaseImageCreator.__init__(self, *args)
+
+        self.__instloop = None
+        self.__imgdir = None
+        self.__disks = {}
+        self.__disk_format = "raw"
+        self.vmem = 512
+        self.vcpu = 1
+        self.checksum = False
+        self.appliance_version = None
+        self.appliance_release = None
+        #self.getsource = False
+        #self.listpkg = False
+
+        self._dep_checks.extend(["sync", "kpartx", "parted", "extlinux"])
+
+    def configure(self, repodata = None):
+        def chroot():
+
+            os.chroot(self._instroot)
+            os.chdir("/")
+
+        if os.path.exists(self._instroot + "/usr/bin/Xorg"):
+            subprocess.call(["/bin/chmod", "u+s", "/usr/bin/Xorg"], preexec_fn = chroot)
+        BaseImageCreator.configure(self, repodata)
+        
+    def _get_fstab(self):
+        s = ""
+        for mp in self.__instloop.mountOrder:
+            p = None
+            for p1 in self.__instloop.partitions:
+                if p1['mountpoint'] == mp:
+                    p = p1
+                    break
+
+            s += "%(device)s  %(mountpoint)s         %(fstype)s   %(fsopts)s 0 0\n" %  {
+                 'device': "/dev/%s%-d" % (p['disk'], p['num']),
+                 'mountpoint': p['mountpoint'],
+                 'fstype': p['fstype'],
+                 'fsopts': "defaults,noatime" if not p['fsopts'] else p['fsopts']}
+
+            if p['mountpoint'] == "/":
+                for subvol in self.__instloop.subvolumes:
+                    if subvol['mountpoint'] == "/":
+                        continue
+                    s += "%(device)s  %(mountpoint)s         %(fstype)s   %(fsopts)s 0 0\n" %  {
+                         'device': "/dev/%s%-d" % (p['disk'], p['num']),
+                         'mountpoint': subvol['mountpoint'],
+                         'fstype': p['fstype'],
+                         'fsopts': "defaults,noatime" if not subvol['fsopts'] else subvol['fsopts']}
+
+        s += "devpts     /dev/pts  devpts  gid=5,mode=620   0 0\n"
+        s += "tmpfs      /dev/shm  tmpfs   defaults         0 0\n"
+        s += "proc       /proc     proc    defaults         0 0\n"
+        s += "sysfs      /sys      sysfs   defaults         0 0\n"
+        return s
+
+    def _create_mkinitrd_config(self):
+        #write  to tell which modules to be included in initrd
+
+        mkinitrd = ""
+        mkinitrd += "PROBE=\"no\"\n"
+        mkinitrd += "MODULES+=\"ext3 ata_piix sd_mod libata scsi_mod\"\n"
+        mkinitrd += "rootfs=\"ext3\"\n"
+        mkinitrd += "rootopts=\"defaults\"\n"
+
+        logging.debug("Writing mkinitrd config %s/etc/sysconfig/mkinitrd" % self._instroot)
+        os.makedirs(self._instroot + "/etc/sysconfig/",mode=644)
+        cfg = open(self._instroot + "/etc/sysconfig/mkinitrd", "w")
+        cfg.write(mkinitrd)
+        cfg.close()
+
+    #
+    # Actual implementation
+    #
+    def _mount_instroot(self, base_on = None):
+        self.__imgdir = self._mkdtemp()
+
+        #Set a default partition if no partition is given out
+        if not self.ks.handler.partition.partitions:
+            partstr = "part / --size 1900 --ondisk sda --fstype=ext3"
+            args = partstr.split()
+            pd = self.ks.handler.partition.parse(args[1:])
+            if pd not in self.ks.handler.partition.partitions:
+                self.ks.handler.partition.partitions.append(pd)
+
+        #list of partitions from kickstart file
+        parts = kickstart.get_partitions(self.ks)
+
+        #list of disks where a disk is an dict with name: and size
+        disks = []
+
+        for i in range(len(parts)):
+            if parts[i].disk:
+                disk = parts[i].disk
+            else:
+                raise CreatorError("Failed to create disks, no --ondisk specified in partition line of ks file")
+
+            if not parts[i].fstype:
+                 raise CreatorError("Failed to create disks, no --fstype specified in partition line of ks file")
+
+            size =   parts[i].size * 1024L * 1024L
+            
+            found = False
+            for j in range(len(disks)):
+                if disks[j]['name'] == disk:
+                    disks[j]['size'] = disks[j]['size'] + size
+                    found = True
+                    break
+                else: 
+                    found = False
+            if not found:
+                disks.append({ 'name': disk, 'size': size })
+
+        #create disk
+        for item in disks:
+            logging.debug("Adding disk %s as %s/%s-%s.raw" % (item['name'], self.__imgdir,self.name, item['name']))
+            disk = fs_related.SparseLoopbackDisk("%s/%s-%s.raw" % (self.__imgdir,self.name, item['name']),item['size'])
+            self.__disks[item['name']] = disk
+
+        self.__instloop = PartitionedMount(self.__disks, self._instroot)
+
+        for p in parts:
+            self.__instloop.add_partition(int(p.size), p.disk, p.mountpoint, p.fstype, fsopts = p.fsopts, boot = p.active)
+
+        try:
+            self.__instloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed mount disks : %s" % e)
+
+        self._create_mkinitrd_config()
+
+    def _get_required_packages(self):
+        required_packages = BaseImageCreator._get_required_packages(self)
+        if not self.target_arch or not self.target_arch.startswith("arm"):
+            required_packages += ["syslinux", "syslinux-extlinux"]
+        return required_packages
+
+    def _get_excluded_packages(self):
+        return BaseImageCreator._get_excluded_packages(self)
+
+    def _get_syslinux_boot_config(self):
+        bootdevnum = None
+        rootdevnum = None
+        rootdev = None
+        for p in self.__instloop.partitions:
+            if p['mountpoint'] == "/boot":
+                bootdevnum = p['num'] - 1
+            elif p['mountpoint'] == "/" and bootdevnum is None:
+                bootdevnum = p['num'] - 1
+
+            if p['mountpoint'] == "/":
+                rootdevnum = p['num'] - 1
+                rootdev = "/dev/%s%-d" % (p['disk'], p['num'])
+
+        prefix = ""
+        if bootdevnum == rootdevnum:
+            prefix = "/boot"
+
+        return (bootdevnum, rootdevnum, rootdev, prefix)
+
+    def _create_syslinux_config(self):
+        #Copy splash
+        splash = "%s/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg" % self._instroot
+        if os.path.exists(splash):
+            shutil.copy(splash, "%s%s/splash.jpg" % (self._instroot, "/boot/extlinux"))
+            splashline = "menu background splash.jpg"
+        else:
+            splashline = ""
+
+        (bootdevnum, rootdevnum, rootdev, prefix) = self._get_syslinux_boot_config()
+        options = self.ks.handler.bootloader.appendLine
+
+        #XXX don't hardcode default kernel - see livecd code
+        syslinux_conf = ""
+        syslinux_conf += "prompt 0\n"
+        syslinux_conf += "timeout 1\n"
+        syslinux_conf += "\n"
+        syslinux_conf += "default vesamenu.c32\n"
+        syslinux_conf += "menu autoboot Starting %s...\n" % self.distro_name
+        syslinux_conf += "menu hidden\n"
+        syslinux_conf += "\n"
+        syslinux_conf += "%s\n" % splashline
+        syslinux_conf += "menu title Welcome to %s!\n" % self.distro_name
+        syslinux_conf += "menu color border 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color sel 7 #ffffffff #ff000000\n"
+        syslinux_conf += "menu color title 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color tabmsg 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color unsel 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color hotsel 0 #ff000000 #ffffffff\n"
+        syslinux_conf += "menu color hotkey 7 #ffffffff #ff000000\n"
+        syslinux_conf += "menu color timeout_msg 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color timeout 0 #ffffffff #00000000\n"
+        syslinux_conf += "menu color cmdline 0 #ffffffff #00000000\n"
+
+        versions = []
+        kernels = self._get_kernel_versions()
+        for kernel in kernels:
+            for version in kernels[kernel]:
+                versions.append(version)
+
+        footlabel = 0
+        for v in versions:
+            shutil.copy("%s/boot/vmlinuz-%s" %(self._instroot, v),
+                        "%s%s/vmlinuz-%s" % (self._instroot, "/boot/extlinux/", v))
+            syslinux_conf += "label %s%d\n" % (self.distro_name.lower(), footlabel)
+            syslinux_conf += "\tmenu label %s (%s)\n" % (self.distro_name, v)
+            syslinux_conf += "\tkernel vmlinuz-%s\n" % v
+            syslinux_conf += "\tappend ro root=%s quiet vga=current %s\n" % (rootdev, options)
+            if footlabel == 0:
+               syslinux_conf += "\tmenu default\n"
+            footlabel += 1;
+
+        logging.debug("Writing syslinux config %s/boot/extlinux/extlinux.conf" % self._instroot)
+        cfg = open(self._instroot + "/boot/extlinux/extlinux.conf", "w")
+        cfg.write(syslinux_conf)
+        cfg.close()
+
+    def _install_syslinux(self):
+        i = 0
+        for name in self.__disks.keys():
+            loopdev = self.__disks[name].device
+            i =i+1
+
+        logging.debug("Installing syslinux bootloader to %s" % loopdev)
+
+        (bootdevnum, rootdevnum, rootdev, prefix) = self._get_syslinux_boot_config()
+
+
+        #Set MBR
+        mbrsize = os.stat("%s/usr/share/syslinux/mbr.bin" % self._instroot)[stat.ST_SIZE]
+        ddcmd = fs_related.find_binary_path("dd")
+        rc = subprocess.call([ddcmd, "if=%s/usr/share/syslinux/mbr.bin" % self._instroot, "of=" + loopdev])
+        if rc != 0:
+            raise MountError("Unable to set MBR to %s" % loopdev)
+
+        #Set Bootable flag
+        parted = fs_related.find_binary_path("parted")
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        rc = subprocess.call([parted, "-s", loopdev, "set", "%d" % (bootdevnum + 1), "boot", "on"],
+                             stdout = dev_null, stderr = dev_null)
+        os.close(dev_null)
+        #XXX disabled return code check because parted always fails to
+        #reload part table with loop devices. Annoying because we can't
+        #distinguish this failure from real partition failures :-(
+        if rc != 0 and 1 == 0:
+            raise MountError("Unable to set bootable flag to %sp%d" % (loopdev, (bootdevnum + 1)))
+
+
+        #Ensure all data is flushed to disk before doing syslinux install
+        subprocess.call(["sync"])
+
+        fullpathsyslinux = fs_related.find_binary_path("extlinux")
+        rc = subprocess.call([fullpathsyslinux, "-i", "%s/boot/extlinux" % self._instroot])
+        if rc != 0:
+            raise MountError("Unable to install syslinux bootloader to %sp%d" % (loopdev, (bootdevnum + 1)))
+
+    def _create_bootconfig(self):
+        #If syslinux is available do the required configurations.
+        if os.path.exists("%s/usr/share/syslinux/" % (self._instroot)) \
+           and os.path.exists("%s/boot/extlinux/" % (self._instroot)):
+            self._create_syslinux_config()
+            self._install_syslinux()
+
+    def _unmount_instroot(self):
+        if not self.__instloop is None:
+            self.__instloop.cleanup()
+
+    def _resparse(self, size = None):
+        return self.__instloop.resparse(size)
+
+    def _stage_final_image(self):
+        """Stage the final system image in _outdir.
+           write meta data
+        """
+        self._resparse()
+
+        logging.debug("moving disks to stage location")
+        for name in self.__disks.keys():
+            src = "%s/%s-%s.raw" % (self.__imgdir, self.name,name)
+            self._img_name = "%s-%s.%s" % (self.name, name, self.__disk_format)
+            dst = "%s/%s" % (self._outdir, self._img_name)
+            logging.debug("moving %s to %s" % (src,dst))
+            shutil.move(src,dst) 
+        self._write_image_xml()
+
+    def _write_image_xml(self):
+        imgarch = "i686"
+        if self.target_arch and self.target_arch.startswith("arm"):
+            imgarch = "arm"
+        xml = "\n"
+
+        logging.debug("writing image XML to %s/%s.xml" %  (self._outdir, self.name))
+        cfg = open("%s/%s.xml" % (self._outdir, self.name), "w")
+        cfg.write(xml)
+        cfg.close()
+        #print "Wrote: %s.xml" % self.name
+
+    @classmethod
+    def _mount_srcimg(self, srcimg):
+        srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L
+        srcmnt = misc.mkdtemp("srcmnt")        
+        disk = fs_related.SparseLoopbackDisk(srcimg, srcimgsize)
+        srcloop = PartitionedMount({'/dev/sdb':disk}, srcmnt, skipformat = True)
+
+        srcloop.add_partition(srcimgsize/1024/1024, "/dev/sdb", "/", "ext3", boot=False)
+        try:
+            srcloop.mount()
+            return srcloop
+        except MountError, e:
+            srcloop.cleanup()
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (srcimg, e))
+    @classmethod
+    def _unmount_srcimg(self, srcloop):
+        if srcloop:
+            srcloop.cleanup()
diff --git a/micng/micng.conf b/micng/micng.conf
new file mode 100644 (file)
index 0000000..53d3a59
--- /dev/null
@@ -0,0 +1,14 @@
+[common]
+
+[create]
+tmpdir= /var/tmp
+cachedir= /var/tmp/cache
+outdir= .
+name=meego
+pkgmgr=zypp
+arch=i586
+#proxy=http://proxy.com
+
+[convert]
+
+[chroot]
index e69de29..e132727 100644 (file)
@@ -0,0 +1,9 @@
+#/usr/bin/python -t
+
+class BackendPlugin(object):
+    plugin_type="backend"
+    def addRepository(self):
+        pass
+
+#[a, b]: a is for bachend name, b is for bachend class
+mic_plugin = ["", None]
diff --git a/micng/pluginbase/base_plugin.py b/micng/pluginbase/base_plugin.py
deleted file mode 100644 (file)
index 36bd3b3..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/python
-class PluginBase(object):
-    plugin_type = None
-    def __init__(self):
-        pass
old mode 100755 (executable)
new mode 100644 (file)
index 15014d0..8dc749e
 #!/usr/bin/python
-from micng.pluginbase.base_plugin import PluginBase
-import micng.configmgr as configmgr
-import micng.utils.misc as misc
-import micng.utils.errors as errors
 
-class ImagerPlugin(PluginBase):
+class ImagerPlugin(object):
     plugin_type = "imager"
-    def __init__(self, configinfo=None):
-        if not configinfo:
-            self.configinfo = configmgr.getConfigInfo()
-            return 
-        self.configinfo = configinfo
-        """ Initialize package managers """
-        self.pkgmgr = pkgmanagers.pkgManager()#fix in next step
-        self.pkgmgr.load_pkg_managers()#fix in next steps
-
-        self.ks = ks
-        """A pykickstart.KickstartParser instance."""
-
-        self.name = name
-        """A name for the image."""
-
-        self.distro_name = "MeeGo"
-
-        """Output image file names"""
-        self.outimage = []
-
-        """A flag to generate checksum"""
-        self._genchecksum = False
-
-        self.tmpdir = "/var/tmp"
-        """The directory in which all temporary files will be created."""
-
-        self.cachedir = None
-
-        self._alt_initrd_name = None
-
-        self.__builddir = None
-        self.__bindmounts = []
-
-        """ Contains the compression method that is used to compress
-        the disk image after creation, e.g., bz2.
-        This value is set with compression_method function. """
-        self._img_compression_method = None
-
-        # dependent commands to check
-        self._dep_checks = ["ls", "bash", "cp", "echo", "modprobe", "passwd"]
-
-        self._recording_pkgs = None
-
-        self._include_src = None
-
-        self._local_pkgs_path = None
-
-        # available size in root fs, init to 0
-        self._root_fs_avail = 0
-
-        # target arch for non-x86 image
-        self.target_arch = None
-
-        """ Name of the disk image file that is created. """
-        self._img_name = None
-
-        """ Image format """
-        self.image_format = None
-
-        """ Save qemu emulator file name in order to clean up it finally """
-        self.qemu_emulator = None
-
-        """ No ks provided when called by convertor, so skip the dependency check """
-        if self.ks:
-            """ If we have btrfs partition we need to check that we have toosl for those """
-            for part in self.ks.handler.partition.partitions:
-                if part.fstype and part.fstype == "btrfs":
-                    self._dep_checks.append("mkfs.btrfs")
-                    break
-
-    """inner function"""
-    def __ensure_builddir(self):
-        if not self.__builddir is None:
-            return
-
-        try:
-            self.__builddir = tempfile.mkdtemp(dir = self.tmpdir,
-                                               prefix = "imgcreate-")
-        except OSError, (err, msg):
-            raise CreatorError("Failed create build directory in %s: %s" %
-                               (self.tmpdir, msg))
-
-    def _mkdtemp(self, prefix = "tmp-"):
-        """Create a temporary directory.
-
-        This method may be used by subclasses to create a temporary directory
-        for use in building the final image - e.g. a subclass might create
-        a temporary directory in order to bundle a set of files into a package.
-
-        The subclass may delete this directory if it wishes, but it will be
-        automatically deleted by cleanup().
-
-        The absolute path to the temporary directory is returned.
-
-        Note, this method should only be called after mount() has been called.
-
-        prefix -- a prefix which should be used when creating the directory;
-                  defaults to "tmp-".
-
-        """
-        self.__ensure_builddir()
-        return tempfile.mkdtemp(dir = self.__builddir, prefix = prefix)
-
-    def _mktemp(self, prefix = "tmp-"):
-        """Create a temporary file.
-
-        This method simply calls __mkstemp() and closes the returned file
-        descriptor.
-
-        The absolute path to the temporary file is returned.
-
-        Note, this method should only be called after mount() has been called.
-
-        prefix -- a prefix which should be used when creating the file;
-                  defaults to "tmp-".
-
-        """
-        def __mkstemp(prefix = "tmp-"):
-            """Create a temporary file.
-    
-            This method may be used by subclasses to create a temporary file
-            for use in building the final image - e.g. a subclass might need
-            a temporary location to unpack a compressed file.
-    
-            The subclass may delete this file if it wishes, but it will be
-            automatically deleted by cleanup().
-    
-            A tuple containing a file descriptor (returned from os.open() and the
-            absolute path to the temporary directory is returned.
-    
-            Note, this method should only be called after mount() has been called.
-    
-            prefix -- a prefix which should be used when creating the file;
-                      defaults to "tmp-".
-    
-            """
-            self.__ensure_builddir()
-            return tempfile.mkstemp(dir = self.__builddir, prefix = prefix)
-        
-        (f, path) = __mkstemp(prefix)
-        os.close(f)
-        return path
-
-    def _get_fstab(self):
-        """Return the desired contents of /etc/fstab.
-
-        This is the hook where subclasses may specify the contents of
-        /etc/fstab by returning a string containing the desired contents.
-
-        A sensible default implementation is provided.
-
-        """
-        def __get_fstab_special():
-            s = "devpts     /dev/pts  devpts  gid=5,mode=620   0 0\n"
-            s += "tmpfs      /dev/shm  tmpfs   defaults         0 0\n"
-            s += "proc       /proc     proc    defaults         0 0\n"
-            s += "sysfs      /sys      sysfs   defaults         0 0\n"
-            return s
-            
-        s =  "/dev/root  /         %s    %s 0 0\n" % (self._fstype, "defaults,noatime" if not self._fsopts else self._fsopts)
-        s += __get_fstab_special()
-        return s
-
-    def _get_required_packages(self):
-        """Return a list of required packages.
-
-        This is the hook where subclasses may specify a set of packages which
-        it requires to be installed.
-
-        This returns an empty list by default.
-
-        Note, subclasses should usually chain up to the base class
-        implementation of this hook.
-
-        """
-        return []
-
-    def _get_excluded_packages(self):
-        """Return a list of excluded packages.
-
-        This is the hook where subclasses may specify a set of packages which
-        it requires _not_ to be installed.
-
-        This returns an empty list by default.
-
-        Note, subclasses should usually chain up to the base class
-        implementation of this hook.
-
-        """
-        excluded_packages = []
-        for rpm_path in self._get_local_packages():
-            rpm_name = os.path.basename(rpm_path)
-            package_name = splitFilename(rpm_name)[0]
-            excluded_packages += [package_name]
-        return excluded_packages
-
-    def _get_local_packages(self):
-        """Return a list of rpm path to be local installed.
-
-        This is the hook where subclasses may specify a set of rpms which
-        it requires to be installed locally.
-
-        This returns an empty list by default.
-
-        Note, subclasses should usually chain up to the base class
-        implementation of this hook.
-
-        """
-        if self._local_pkgs_path:
-            if os.path.isdir(self._local_pkgs_path):
-                return glob.glob(
-                        os.path.join(self._local_pkgs_path, '*.rpm'))
-            elif os.path.splitext(self._local_pkgs_path)[-1] == '.rpm':
-                return [self._local_pkgs_path]
-
-        return []
-
-    def _create_bootconfig(self):
-        """Configure the image so that it's bootable.
-
-        This is the hook where subclasses may prepare the image for booting by
-        e.g. creating an initramfs and bootloader configuration.
-
-        This hook is called while the install root is still mounted, after the
-        packages have been installed and the kickstart configuration has been
-        applied, but before the %post scripts have been executed.
-
-        There is no default implementation.
-
-        """
+    def do_create(self):
         pass
 
-    def _get_post_scripts_env(self, in_chroot):
-        """Return an environment dict for %post scripts.
-
-        This is the hook where subclasses may specify some environment
-        variables for %post scripts by return a dict containing the desired
-        environment.
-
-        By default, this returns an empty dict.
-
-        in_chroot -- whether this %post script is to be executed chroot()ed
-                     into _instroot.
-
-        """
-        return {}
-        
-    def _chroot(self):
-        """Chroot into the install root.
-
-        This method may be used by subclasses when executing programs inside
-        the install root e.g.
-
-          subprocess.call(["/bin/ls"], preexec_fn = self.chroot)
-
-        """
-        os.chroot(self._instroot)
-        os.chdir("/")
-
-    def _stage_final_image(self):
-        """Stage the final system image in _outdir.
-
-        This is the hook where subclasses should place the image in _outdir
-        so that package() can copy it to the requested destination directory.
-
-        By default, this moves the install root into _outdir.
-
-        """
-        shutil.move(self._instroot, self._outdir + "/" + self.name)
-
-    def _save_recording_pkgs(self, destdir):
-        """Save the list or content of installed packages to file.
-        """
-        if self._recording_pkgs not in ('content', 'name'):
-            return
-
-        pkgs = self._pkgs_content.keys()
-        pkgs.sort() # inplace op
-
-        # save package name list anyhow
-        if not os.path.exists(destdir):
-            makedirs(destdir)
-
-        namefile = os.path.join(destdir, self.name + '-pkgs.txt')
-        f = open(namefile, "w")
-        content = '\n'.join(pkgs)
-        f.write(content)
-        f.close()
-        self.outimage.append(namefile);
-
-        # if 'content', save more details
-        if self._recording_pkgs == 'content':
-            contfile = os.path.join(destdir, self.name + '-pkgs-content.txt')
-            f = open(contfile, "w")
-
-            for pkg in pkgs:
-                content = pkg + '\n'
-
-                pkgcont = self._pkgs_content[pkg]
-                items = []
-                if pkgcont.has_key('dir'):
-                    items = map(lambda x:x+'/', pkgcont['dir'])
-                if pkgcont.has_key('file'):
-                    items.extend(pkgcont['file'])
-
-                if items:
-                    content += '    '
-                    content += '\n    '.join(items)
-                    content += '\n'
-
-                content += '\n'
-                f.write(content)
-            f.close()
-            self.outimage.append(contfile)
-
-    def do_mount_instroot(self):
-        """Mount or prepare the install root directory.
-
-        This is the interface where plugin may prepare the install root by e.g.
-        mounting creating and loopback mounting a filesystem image to
-        _instroot.
-        """
+    def do_chroot(self):
         pass
 
-    def do_umount_instroot(self):
-        """Undo anything performed in do_mount_instroot().
-
-        This is the interface where plugin must undo anything which was done
-        in do_mount_instroot(). For example, if a filesystem image was mounted
-        onto _instroot, it should be unmounted here.
-        """
+    def do_pack(self):
         pass
 
-    def do_mount(self, base_on = None, cachedir = None):
-        """Setup the target filesystem in preparation for an install.
-
-        This interface should setup the filesystem which other functions will
-        install into and configure.
-        """
-        def __get_cachedir(cachedir = None):
-            if self.cachedir:
-                return self.cachedir
-    
-            self.__ensure_builddir()
-            if cachedir:
-                self.cachedir = cachedir
-            else:
-                self.cachedir = self.__builddir + "/yum-cache"
-            makedirs(self.cachedir)
-            return self.cachedir
-
-        def __do_bindmounts():
-            """Mount various system directories onto _instroot.
-    
-            This method is called by mount(), but may also be used by subclasses
-            in order to re-mount the bindmounts after modifying the underlying
-            filesystem.
-    
-            """
-            for b in self.__bindmounts:
-                b.mount()
-        
-        def __create_minimal_dev():
-            """Create a minimal /dev so that we don't corrupt the host /dev"""
-            origumask = os.umask(0000)
-            devices = (('null',   1, 3, 0666),
-                       ('urandom',1, 9, 0666),
-                       ('random', 1, 8, 0666),
-                       ('full',   1, 7, 0666),
-                       ('ptmx',   5, 2, 0666),
-                       ('tty',    5, 0, 0666),
-                       ('zero',   1, 5, 0666))
-            links = (("/proc/self/fd", "/dev/fd"),
-                     ("/proc/self/fd/0", "/dev/stdin"),
-                     ("/proc/self/fd/1", "/dev/stdout"),
-                     ("/proc/self/fd/2", "/dev/stderr"))
-    
-            for (node, major, minor, perm) in devices:
-                if not os.path.exists(self._instroot + "/dev/" + node):
-                    os.mknod(self._instroot + "/dev/" + node, perm | stat.S_IFCHR, os.makedev(major,minor))
-            for (src, dest) in links:
-                if not os.path.exists(self._instroot + dest):
-                    os.symlink(src, self._instroot + dest)
-            os.umask(origumask)
-        
-        def __write_fstab():
-            fstab = open(self._instroot + "/etc/fstab", "w")
-            fstab.write(self._get_fstab())
-            fstab.close()
-        
-        self.__ensure_builddir()
-
-        makedirs(self._instroot)
-        makedirs(self._outdir)
-
-        self.do_mount_instroot(base_on)
-
-        for d in ("/dev/pts", "/etc", "/boot", "/var/log", "/var/cache/yum", "/sys", "/proc", "/usr/bin"):
-            makedirs(self._instroot + d)
-
-        if self.target_arch and self.target_arch.startswith("arm"):
-            self.qemu_emulator = setup_qemu_emulator(self._instroot, self.target_arch)
-
-        __get_cachedir(cachedir)
-
-        # bind mount system directories into _instroot
-        for (f, dest) in [("/sys", None), ("/proc", None), ("/proc/sys/fs/binfmt_misc", None),
-                          ("/dev/pts", None),
-                          (__get_cachedir(), "/var/cache/yum")]:
-            self.__bindmounts.append(BindChrootMount(f, self._instroot, dest))
-
-        __do_bindmounts()
-
-        __create_minimal_dev()
-
-        if os.path.exists(self._instroot + "/etc/mtab"):
-            os.unlink(self._instroot + "/etc/mtab")
-        os.symlink("../proc/mounts", self._instroot + "/etc/mtab")
-
-        __write_fstab()
-
-        # get size of available space in 'instroot' fs
-        self._root_fs_avail = get_filesystem_avail(self._instroot)
-
-    def do_umount(self):
-        """Unmounts the target filesystem.
-
-        It should detache the system from the install root.
-        """
-        def __undo_bindmounts():
-            """Unmount the bind-mounted system directories from _instroot.
-    
-            This method is usually only called by unmount(), but may also be used
-            by subclasses in order to gain access to the filesystem obscured by
-            the bindmounts - e.g. in order to create device nodes on the image
-            filesystem.
-    
-            """
-            self.__bindmounts.reverse()
-            for b in self.__bindmounts:
-                b.unmount()
-
-        try:
-            mtab = self._instroot + "/etc/mtab"
-            if not os.path.islink(mtab):
-                os.unlink(self._instroot + "/etc/mtab")
-            if self.qemu_emulator:
-                os.unlink(self._instroot + self.qemu_emulator)
-        except OSError:
-            pass
-
-        __undo_bindmounts()
-
-        """ Clean up yum garbage """
-        try:
-            instroot_pdir = os.path.dirname(self._instroot + self._instroot)
-            if os.path.exists(instroot_pdir):
-                shutil.rmtree(instroot_pdir, ignore_errors = True)
-            yumcachedir = self._instroot + "/var/cache/yum"
-            if os.path.exists(yumcachedir):
-                shutil.rmtree(yumcachedir, ignore_errors = True)
-            yumlibdir = self._instroot + "/var/lib/yum"
-            if os.path.exists(yumlibdir):
-                shutil.rmtree(yumlibdir, ignore_errors = True)
-        except OSError:
-            pass
-
-        self.do_umount_instroot()
-
-    def do_cleanup(self):
-        """Unmounts the target filesystem and deletes temporary files.
-
-        This interface deletes any temporary files and directories that were created
-        on the host system while building the image.
-        """
-        if not self.__builddir:
-            return
-
-        self.do_umount()
-
-        shutil.rmtree(self.__builddir, ignore_errors = True)
-        self.__builddir = None
-
-    def do_install(self, repo_urls={}):
-        """Install packages into the install root.
-
-        This interface installs the packages listed in the supplied kickstart
-        into the install root. By default, the packages are installed from the
-        repository URLs specified in the kickstart.
-        """
-        def __sanity_check():
-            """Ensure that the config we've been given is sane."""
-            if not (kickstart.get_packages(self.ks) or
-                    kickstart.get_groups(self.ks)):
-                raise CreatorError("No packages or groups specified")
-    
-            kickstart.convert_method_to_repo(self.ks)
-    
-            if not kickstart.get_repos(self.ks):
-                raise CreatorError("No repositories specified")
-            
-        def __select_packages(pkg_manager):
-            skipped_pkgs = []
-            for pkg in self._required_pkgs:
-                e = pkg_manager.selectPackage(pkg)
-                if e:
-                    if kickstart.ignore_missing(self.ks):
-                        skipped_pkgs.append(pkg)
-                    elif self.__is_excluded_pkg(pkg):
-                        skipped_pkgs.append(pkg)
-                    else:
-                        raise CreatorError("Failed to find package '%s' : %s" %
-                                           (pkg, e))
-    
-            for pkg in skipped_pkgs:
-                logging.warn("Skipping missing package '%s'" % (pkg,))
-    
-        def __select_groups(pkg_manager):
-            skipped_groups = []
-            for group in self._required_groups:
-                e = pkg_manager.selectGroup(group.name, group.include)
-                if e:
-                    if kickstart.ignore_missing(self.ks):
-                        skipped_groups.append(group)
-                    else:
-                        raise CreatorError("Failed to find group '%s' : %s" %
-                                           (group.name, e))
-    
-            for group in skipped_groups:
-                logging.warn("Skipping missing group '%s'" % (group.name,))
-    
-        def __deselect_packages(pkg_manager):
-            for pkg in self._excluded_pkgs:
-                pkg_manager.deselectPackage(pkg)
-    
-        def __localinst_packages(pkg_manager):
-            for rpm_path in self._get_local_packages():
-                pkg_manager.installLocal(rpm_path)
-
-        # initialize pkg list to install
-        if self.ks:
-            __sanity_check()
-
-            self._required_pkgs = \
-                kickstart.get_packages(self.ks, self._get_required_packages())
-            self._excluded_pkgs = \
-                kickstart.get_excluded(self.ks, self._get_excluded_packages())
-            self._required_groups = kickstart.get_groups(self.ks)
-        else:
-            self._required_pkgs = None
-            self._excluded_pkgs = None
-            self._required_groups = None
-
-        yum_conf = self._mktemp(prefix = "yum.conf-")
-
-        keep_record = None
-        if self._include_src:
-            keep_record = 'include_src'
-        if self._recording_pkgs in ('name', 'content'):
-            keep_record = self._recording_pkgs
-        #fix in next step pkg_manager
-        pkg_manager = self.get_pkg_manager(keep_record)
-        pkg_manager.setup(yum_conf, self._instroot)
-
-        for repo in kickstart.get_repos(self.ks, repo_urls):
-            (name, baseurl, mirrorlist, inc, exc, proxy, proxy_username, proxy_password, debuginfo, source, gpgkey, disable) = repo
-
-            try:
-                yr = pkg_manager.addRepository(name, baseurl, mirrorlist, proxy, proxy_username, proxy_password, inc, exc)
-                if inc:
-                    yr.includepkgs = inc
-                if exc:
-                    yr.exclude = exc
-            except CreatorError, e:
-                raise CreatorError("%s" % (e,))
-
-        if kickstart.exclude_docs(self.ks):
-            rpm.addMacro("_excludedocs", "1")
-        rpm.addMacro("__file_context_path", "%{nil}")
-        if kickstart.inst_langs(self.ks) != None:
-            rpm.addMacro("_install_langs", kickstart.inst_langs(self.ks))
-
-        try:
-            __select_packages(pkg_manager)
-            __select_groups(pkg_manager)
-            __deselect_packages(pkg_manager)
-            __localinst_packages(pkg_manager)
-
-            BOOT_SAFEGUARD = 256L * 1024 * 1024 # 256M
-            checksize = self._root_fs_avail
-            if checksize:
-                checksize -= BOOT_SAFEGUARD
-            if self.target_arch:
-                pkg_manager._add_prob_flags(rpm.RPMPROB_FILTER_IGNOREARCH)
-
-            try:
-                save_env = os.environ["LC_ALL"]
-            except KeyError:
-                save_env = None
-            os.environ["LC_ALL"] = 'C'
-            pkg_manager.runInstall(checksize)
-            if save_env:
-                os.environ["LC_ALL"] = save_env
-            else:
-                os.unsetenv("LC_ALL")
-        finally:
-            if keep_record:
-                self._pkgs_content = pkg_manager.getAllContent()
-
-            pkg_manager.closeRpmDB()
-            pkg_manager.close()
-            os.unlink(yum_conf)
-
-        # do some clean up to avoid lvm info leakage.  this sucks.
-        for subdir in ("cache", "backup", "archive"):
-            lvmdir = self._instroot + "/etc/lvm/" + subdir
-            try:
-                for f in os.listdir(lvmdir):
-                    os.unlink(lvmdir + "/" + f)
-            except:
-                pass
-
-    def do_configure(self, repodata = None):
-        """Configure the system image according to the kickstart.
-
-        This interface applies the (e.g. keyboard or network) configuration
-        specified in the kickstart and executes the kickstart %post scripts.
-
-        If neccessary, it also prepares the image to be bootable by e.g.
-        creating an initrd and bootloader configuration.
-        """
-        def __save_repo_keys(repodata):
-            if not repodata:
-                return None
-            gpgkeydir = "/etc/pki/rpm-gpg"
-            makedirs(self._instroot + gpgkeydir)
-            for repo in repodata:
-                if repo["repokey"]:
-                    repokey = gpgkeydir + "/RPM-GPG-KEY-%s" %  repo["name"]
-                    shutil.copy(repo["repokey"], self._instroot + repokey)
-
-        def __run_post_scripts():
-            print "Running scripts"
-            for s in kickstart.get_post_scripts(self.ks):
-                (fd, path) = tempfile.mkstemp(prefix = "ks-script-",
-                                              dir = self._instroot + "/tmp")
-    
-                s.script = s.script.replace("\r", "")
-                os.write(fd, s.script)
-                os.close(fd)
-                os.chmod(path, 0700)
-    
-                env = self._get_post_scripts_env(s.inChroot)
-    
-                if not s.inChroot:
-                    env["INSTALL_ROOT"] = self._instroot
-                    env["IMG_NAME"] = self._name
-                    preexec = None
-                    script = path
-                else:
-                    preexec = self._chroot
-                    script = "/tmp/" + os.path.basename(path)
-    
-                try:
-                    try:
-                        subprocess.call([s.interp, script],
-                                        preexec_fn = preexec, env = env, stdout = sys.stdout, stderr = sys.stderr)
-                    except OSError, (err, msg):
-                        raise CreatorError("Failed to execute %%post script "
-                                           "with '%s' : %s" % (s.interp, msg))
-                finally:
-                    os.unlink(path)
-
-        ksh = self.ks.handler
-
-        try:
-            kickstart.LanguageConfig(self._instroot).apply(ksh.lang)
-            kickstart.KeyboardConfig(self._instroot).apply(ksh.keyboard)
-            kickstart.TimezoneConfig(self._instroot).apply(ksh.timezone)
-            #kickstart.AuthConfig(self._instroot).apply(ksh.authconfig)
-            kickstart.FirewallConfig(self._instroot).apply(ksh.firewall)
-            kickstart.RootPasswordConfig(self._instroot).apply(ksh.rootpw)
-            kickstart.UserConfig(self._instroot).apply(ksh.user)
-            kickstart.ServicesConfig(self._instroot).apply(ksh.services)
-            kickstart.XConfig(self._instroot).apply(ksh.xconfig)
-            kickstart.NetworkConfig(self._instroot).apply(ksh.network)
-            kickstart.RPMMacroConfig(self._instroot).apply(self.ks)
-            kickstart.DesktopConfig(self._instroot).apply(ksh.desktop)
-            __save_repo_keys(repodata)
-            kickstart.MoblinRepoConfig(self._instroot).apply(ksh.repo, repodata)
-        except:
-            print "Failed to apply configuration to image"
-            raise
-
-        self._create_bootconfig()
-        __run_post_scripts()
-
-
-    def do_package(self, destdir = "."):
-        """Prepares the created image for final delivery.
-
-        This interface merely copies the install root to the supplied destination
-        directory,
-        """
-        def __do_genchecksum(self, image_name):
-            if not self._genchecksum:
-                return
-    
-            """ Generate md5sum if /usr/bin/md5sum is available """
-            if os.path.exists("/usr/bin/md5sum"):
-                p = subprocess.Popen(["/usr/bin/md5sum", "-b", image_name],
-                                     stdout=subprocess.PIPE)
-                (md5sum, errorstr) = p.communicate()
-                if p.returncode != 0:
-                    logging.warning("Can't generate md5sum for image %s" % image_name)
-                else:
-                    pattern = re.compile("\*.*$")
-                    md5sum = pattern.sub("*" + os.path.basename(image_name), md5sum)
-                    fd = open(image_name + ".md5sum", "w")
-                    fd.write(md5sum)
-                    fd.close()
-                    self.outimage.append(image_name+".md5sum")
-        
-        self._stage_final_image()
-
-        if self._img_compression_method:
-            if not self._img_name:
-                raise CreatorError("Image name not set.")
-            rc = None
-            img_location = os.path.join(self._outdir,self._img_name)
-
-            print "Compressing %s with %s. Please wait..." % (img_location, self._img_compression_method)
-            if self._img_compression_method == "bz2":
-                bzip2 = find_binary_path('bzip2')
-                rc = subprocess.call([bzip2, "-f", img_location])
-                if rc:
-                    raise CreatorError("Failed to compress image %s with %s." % (img_location, self._img_compression_method))
-                for bootimg in glob.glob(os.path.dirname(img_location) + "/*-boot.bin"):
-                    print "Compressing %s with bzip2. Please wait..." % bootimg
-                    rc = subprocess.call([bzip2, "-f", bootimg])
-                    if rc:
-                        raise CreatorError("Failed to compress image %s with %s." % (bootimg, self._img_compression_method))
-            elif self._img_compression_method == "tar.bz2":
-                dst = "%s.tar.bz2" % (img_location)
-
-                tar = tarfile.open(dst, "w:bz2")
-                # Add files to tarball and remove originals after packaging
-                tar.add(img_location, self._img_name)
-                os.unlink(img_location)
-                for bootimg in glob.glob(os.path.dirname(img_location) + "/*-boot.bin"):
-                    tar.add(bootimg,os.path.basename(bootimg))
-                    os.unlink(bootimg)
-                tar.close()
-
-        if self._recording_pkgs:
-            self._save_recording_pkgs(destdir)
-
-        """ For image formats with two or multiple image files, it will be better to put them under a directory """
-        if self.image_format in ("raw", "vmdk", "vdi", "nand", "mrstnand"):
-            destdir = os.path.join(destdir, "%s-%s" % (self.name, self.image_format))
-            logging.debug("creating destination dir: %s" % destdir)
-            makedirs(destdir)
-
-        # Ensure all data is flushed to _outdir
-        synccmd = find_binary_path("sync")
-        subprocess.call([synccmd])
-
-        for f in os.listdir(self._outdir):
-            shutil.move(os.path.join(self._outdir, f),
-                        os.path.join(destdir, f))
-            self.outimage.append(os.path.join(destdir, f))
-            __do_genchecksum(os.path.join(destdir, f))
-
-    def do_create(self):
-        """ Temporary solution to create image in one single interface """
-        self.do_mount()
-        self.do_install()
-        self.do_configure()
-        self.do_umount()
-        self.do_package()
-
-    def _base_on(self, base_on):
-        """Support Image Convertor, unpack the source image for building the instroot directory.
-        
-            Subclass need a actual implementation.
-        """
-        shutil.copyfile(base_on, self._image)
-
-    def _mount_srcimg(self, srcimg):
-        """Mount source image.
-    
-           This method may be used by subclasses to mount source image for Chroot,
-           There is no default implementation. 
-           e.g.
-           "livecd":
-               imgcreate.DiskMount(imgcreate.LoopbackDisk(self.img, 0), self.imgmnt)
-        """
+    def do_unpack(self):
         pass
 
-    def _umount_srcimg(self, srcimg):
-        """Umount source image.
-    
-           This method may be used by subclasses to umount source image for Chroot,
-           e.g. umount a raw image. There is no default implementation.
-        """
-        pass
+#[a, b]: a is for subcmd name, b is for plugin class 
+mic_plugin = ["", None]
index cecc0ed..7e2ab06 100644 (file)
-#!/usr/bin/python
+#!/usr/bin/python -t
 import os
 import sys
-import micng.pluginbase.base_plugin as bp
+import logging
+
+DEFAULT_PLUGIN_LOCATION="/usr/lib/micng/plugins"
+DEFINED_PLGUIN_TYPES=["imager", "backend", "hook"]
+STRING_PLUGIN_MARK="mic_plugin"
+STRING_PTYPE_MARK="plugin_type"
 
 class PluginMgr(object):
-    def __init__(self, dirlist = []):
-        self.plugin_place = ["/usr/lib/micng/plugins"] + dirlist
-        self.plugins = {}
+    def __init__(self, plugin_dirs=[]):
+        self.plugin_locations = []
+        self.plugin_sets = {}
+        self.plugin_types = DEFINED_PLGUIN_TYPES
+        # initial plugin directory
+        self.addPluginDir(DEFAULT_PLUGIN_LOCATION)
+        for directory in plugin_dirs:
+            self.addPluginDir(os.path.expanduser(directory))
+        # intial plugin sets
+        for plugintype in self.plugin_types:
+            self.plugin_sets[plugintype] = []
     
+    def addPluginDir(self, plugin_dir=None):
+        if not os.path.exists(plugin_dir):
+            logging.debug("Directory already exists: %s" % plugin_dir)
+            return
+        if not os.path.isdir(plugin_dir):
+            logging.debug("Not a directory: %s" % plugin_dir)
+            return
+        if not (plugin_dir in self.plugin_locations):
+            self.plugin_locations.append(plugin_dir)
+
+    def pluginCheck(self, pymod):
+        if not hasattr(pymod, STRING_PLUGIN_MARK):
+            logging.debug("Not a valid plugin: %s" % pymod.__file__)
+            logging.debug("Please check whether %s given" % STRING_PLUGIN_MARK)
+            return False
+        plclass = getattr(pymod, STRING_PLUGIN_MARK)[1]
+        if not hasattr(plclass, STRING_PTYPE_MARK):
+            logging.debug("Not a valid plugin: %s" % pymod.__file__)
+            logging.debug("Please check whether %s given" % STRING_PTYPE_MARK)
+            return False
+        pltype = getattr(plclass, STRING_PTYPE_MARK)
+        if not (pltype in self.plugin_types):
+            logging.debug("Unsupported plugin type in %s: %s" % (pymod.__file__, plugintype))
+            return False
+        return True
+
+    def importModule(self, dir_path, plugin_filename):
+        if plugin_filename.endswith(".pyc"):
+            return
+        if not plugin_filename.endswith(".py"):
+            logging.debug("Not a python file: %s" % os.path.join(dir_path, plugin_filename))
+            return
+        if plugin_filename == "__init__.py":
+            logging.debug("Unsupported python file: %s" % os.path.join(dir_path, plugin_filename))
+            return
+        modname = os.path.splitext(plugin_filename)[0]
+        if sys.modules.has_key(modname):
+            pymod = sys.modules[modname]
+            logging.debug("Module %s already exists: %s" % (modname, pymod.__file__))
+        else:
+            pymod = __import__(modname)
+            pymod.__file__ = os.path.join(dir_path, plugin_filename)
+        if not self.pluginCheck(pymod):
+            logging.warn("Failed to check plugin: %s" % os.path.join(dir_path, plugin_filename))
+            return
+        (pname, pcls) = pymod.__dict__[STRING_PLUGIN_MARK]
+        plugintype = getattr(pcls, STRING_PTYPE_MARK)
+        self.plugin_sets[plugintype].append((pname, pcls))
+
     def loadPlugins(self):
-        for pdir in map(os.path.abspath, self.plugin_place):
+        for pdir in map(os.path.abspath, self.plugin_locations):
             for pitem in os.walk(pdir):
-                sys.path.append(pitem[0])
+                sys.path.insert(0, pitem[0])
                 for pf in pitem[2]:
-                    if not pf.endswith(".py"):
-                        continue
-
-                    pmod =  __import__(os.path.splitext(pf)[0])
-                    if hasattr(pmod, "mic_plugin"):
-                        pname, pcls = pmod.mic_plugin
-                        ptmp = (pname, pcls)
-                        if hasattr(pcls, "plugin_type"):
-                            if pcls.plugin_type not in self.plugins.keys():
-                                self.plugins[pcls.plugin_type] = [ptmp]
-                            else:
-                                self.plugins[pcls.plugin_type].append(ptmp)
-                                     
+                    self.importModule(pitem[0], pf)
+                del(sys.path[0])
+
     def getPluginByCateg(self, categ = None):
-        if categ is None:
-            return self.plugins
+        if not (categ in self.plugin_types):
+            logging.warn("Failed to get plugin category: %s" % categ)
+            return None
         else:
-            return self.plugins[categ]                            
+            return self.plugin_sets[categ]                            
+
+    def getImagerPlugins(self):
+        return self.plugin_sets['imager']
+
+    def getBackendPlugins(self):
+        return self.plugin_sets['backend']
+
+    def getHookPlugins(self):
+        return self.plugin_sets['hook']
+
+    def listAllPlugins(self):
+        for key in self.plugin_sets.keys():
+            sys.stdout.write("plugin type (%s) :::\n" % key)
+            for item in self.plugin_sets[key]:
+                sys.stdout.write("%-6s: %s\n" % (item[0], item[1]))
+
+    def getPluginType(self, plugin_str):
+        pass
+
+if __name__ == "__main__":
+    logging.getLogger().setLevel(logging.DEBUG)
+    pluginmgr = PluginMgr()
+    pluginmgr.loadPlugins()
+    pluginmgr.listAllPlugins()
+    
diff --git a/micng/utils/argparse.py b/micng/utils/argparse.py
deleted file mode 100644 (file)
index a69d294..0000000
+++ /dev/null
@@ -1,2271 +0,0 @@
-# -*- coding: utf-8 -*-\r
-\r
-# Copyright Â© 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.\r
-#\r
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not\r
-# use this file except in compliance with the License. You may obtain a copy\r
-# of the License at\r
-#\r
-#     http://www.apache.org/licenses/LICENSE-2.0\r
-#\r
-# Unless required by applicable law or agreed to in writing, software\r
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
-# License for the specific language governing permissions and limitations\r
-# under the License.\r
-\r
-"""Command-line parsing library\r
-\r
-This module is an optparse-inspired command-line parsing library that:\r
-\r
-    - handles both optional and positional arguments\r
-    - produces highly informative usage messages\r
-    - supports parsers that dispatch to sub-parsers\r
-\r
-The following is a simple usage example that sums integers from the\r
-command-line and writes the result to a file::\r
-\r
-    parser = argparse.ArgumentParser(\r
-        description='sum the integers at the command line')\r
-    parser.add_argument(\r
-        'integers', metavar='int', nargs='+', type=int,\r
-        help='an integer to be summed')\r
-    parser.add_argument(\r
-        '--log', default=sys.stdout, type=argparse.FileType('w'),\r
-        help='the file where the sum should be written')\r
-    args = parser.parse_args()\r
-    args.log.write('%s' % sum(args.integers))\r
-    args.log.close()\r
-\r
-The module contains the following public classes:\r
-\r
-    - ArgumentParser -- The main entry point for command-line parsing. As the\r
-        example above shows, the add_argument() method is used to populate\r
-        the parser with actions for optional and positional arguments. Then\r
-        the parse_args() method is invoked to convert the args at the\r
-        command-line into an object with attributes.\r
-\r
-    - ArgumentError -- The exception raised by ArgumentParser objects when\r
-        there are errors with the parser's actions. Errors raised while\r
-        parsing the command-line are caught by ArgumentParser and emitted\r
-        as command-line messages.\r
-\r
-    - FileType -- A factory for defining types of files to be created. As the\r
-        example above shows, instances of FileType are typically passed as\r
-        the type= argument of add_argument() calls.\r
-\r
-    - Action -- The base class for parser actions. Typically actions are\r
-        selected by passing strings like 'store_true' or 'append_const' to\r
-        the action= argument of add_argument(). However, for greater\r
-        customization of ArgumentParser actions, subclasses of Action may\r
-        be defined and passed as the action= argument.\r
-\r
-    - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,\r
-        ArgumentDefaultsHelpFormatter -- Formatter classes which\r
-        may be passed as the formatter_class= argument to the\r
-        ArgumentParser constructor. HelpFormatter is the default,\r
-        RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser\r
-        not to change the formatting for help text, and\r
-        ArgumentDefaultsHelpFormatter adds information about argument defaults\r
-        to the help.\r
-\r
-All other classes in this module are considered implementation details.\r
-(Also note that HelpFormatter and RawDescriptionHelpFormatter are only\r
-considered public as object names -- the API of the formatter objects is\r
-still considered an implementation detail.)\r
-"""\r
-\r
-__version__ = '1.0.1'\r
-__all__ = [\r
-    'ArgumentParser',\r
-    'ArgumentError',\r
-    'Namespace',\r
-    'Action',\r
-    'FileType',\r
-    'HelpFormatter',\r
-    'RawDescriptionHelpFormatter',\r
-    'RawTextHelpFormatter'\r
-    'ArgumentDefaultsHelpFormatter',\r
-]\r
-\r
-\r
-import copy as _copy\r
-import os as _os\r
-import re as _re\r
-import sys as _sys\r
-import textwrap as _textwrap\r
-\r
-from gettext import gettext as _\r
-\r
-try:\r
-    _set = set\r
-except NameError:\r
-    from sets import Set as _set\r
-\r
-try:\r
-    _basestring = basestring\r
-except NameError:\r
-    _basestring = str\r
-\r
-try:\r
-    _sorted = sorted\r
-except NameError:\r
-\r
-    def _sorted(iterable, reverse=False):\r
-        result = list(iterable)\r
-        result.sort()\r
-        if reverse:\r
-            result.reverse()\r
-        return result\r
-\r
-# silence Python 2.6 buggy warnings about Exception.message\r
-if _sys.version_info[:2] == (2, 6):\r
-    import warnings\r
-    warnings.filterwarnings(\r
-        action='ignore',\r
-        message='BaseException.message has been deprecated as of Python 2.6',\r
-        category=DeprecationWarning,\r
-        module='argparse')\r
-\r
-\r
-SUPPRESS = '==SUPPRESS=='\r
-\r
-OPTIONAL = '?'\r
-ZERO_OR_MORE = '*'\r
-ONE_OR_MORE = '+'\r
-PARSER = '==PARSER=='\r
-\r
-# =============================\r
-# Utility functions and classes\r
-# =============================\r
-\r
-class _AttributeHolder(object):\r
-    """Abstract base class that provides __repr__.\r
-\r
-    The __repr__ method returns a string in the format::\r
-        ClassName(attr=name, attr=name, ...)\r
-    The attributes are determined either by a class-level attribute,\r
-    '_kwarg_names', or by inspecting the instance __dict__.\r
-    """\r
-\r
-    def __repr__(self):\r
-        type_name = type(self).__name__\r
-        arg_strings = []\r
-        for arg in self._get_args():\r
-            arg_strings.append(repr(arg))\r
-        for name, value in self._get_kwargs():\r
-            arg_strings.append('%s=%r' % (name, value))\r
-        return '%s(%s)' % (type_name, ', '.join(arg_strings))\r
-\r
-    def _get_kwargs(self):\r
-        return _sorted(self.__dict__.items())\r
-\r
-    def _get_args(self):\r
-        return []\r
-\r
-\r
-def _ensure_value(namespace, name, value):\r
-    if getattr(namespace, name, None) is None:\r
-        setattr(namespace, name, value)\r
-    return getattr(namespace, name)\r
-\r
-\r
-# ===============\r
-# Formatting Help\r
-# ===============\r
-\r
-class HelpFormatter(object):\r
-    """Formatter for generating usage messages and argument help strings.\r
-\r
-    Only the name of this class is considered a public API. All the methods\r
-    provided by the class are considered an implementation detail.\r
-    """\r
-\r
-    def __init__(self,\r
-                 prog,\r
-                 indent_increment=2,\r
-                 max_help_position=24,\r
-                 width=None):\r
-\r
-        # default setting for width\r
-        if width is None:\r
-            try:\r
-                width = int(_os.environ['COLUMNS'])\r
-            except (KeyError, ValueError):\r
-                width = 80\r
-            width -= 2\r
-\r
-        self._prog = prog\r
-        self._indent_increment = indent_increment\r
-        self._max_help_position = max_help_position\r
-        self._width = width\r
-\r
-        self._current_indent = 0\r
-        self._level = 0\r
-        self._action_max_length = 0\r
-\r
-        self._root_section = self._Section(self, None)\r
-        self._current_section = self._root_section\r
-\r
-        self._whitespace_matcher = _re.compile(r'\s+')\r
-        self._long_break_matcher = _re.compile(r'\n\n\n+')\r
-\r
-    # ===============================\r
-    # Section and indentation methods\r
-    # ===============================\r
-    def _indent(self):\r
-        self._current_indent += self._indent_increment\r
-        self._level += 1\r
-\r
-    def _dedent(self):\r
-        self._current_indent -= self._indent_increment\r
-        assert self._current_indent >= 0, 'Indent decreased below 0.'\r
-        self._level -= 1\r
-\r
-    class _Section(object):\r
-\r
-        def __init__(self, formatter, parent, heading=None):\r
-            self.formatter = formatter\r
-            self.parent = parent\r
-            self.heading = heading\r
-            self.items = []\r
-\r
-        def format_help(self):\r
-            # format the indented section\r
-            if self.parent is not None:\r
-                self.formatter._indent()\r
-            join = self.formatter._join_parts\r
-            for func, args in self.items:\r
-                func(*args)\r
-            item_help = join([func(*args) for func, args in self.items])\r
-            if self.parent is not None:\r
-                self.formatter._dedent()\r
-\r
-            # return nothing if the section was empty\r
-            if not item_help:\r
-                return ''\r
-\r
-            # add the heading if the section was non-empty\r
-            if self.heading is not SUPPRESS and self.heading is not None:\r
-                current_indent = self.formatter._current_indent\r
-                heading = '%*s%s:\n' % (current_indent, '', self.heading)\r
-            else:\r
-                heading = ''\r
-\r
-            # join the section-initial newline, the heading and the help\r
-            return join(['\n', heading, item_help, '\n'])\r
-\r
-    def _add_item(self, func, args):\r
-        self._current_section.items.append((func, args))\r
-\r
-    # ========================\r
-    # Message building methods\r
-    # ========================\r
-    def start_section(self, heading):\r
-        self._indent()\r
-        section = self._Section(self, self._current_section, heading)\r
-        self._add_item(section.format_help, [])\r
-        self._current_section = section\r
-\r
-    def end_section(self):\r
-        self._current_section = self._current_section.parent\r
-        self._dedent()\r
-\r
-    def add_text(self, text):\r
-        if text is not SUPPRESS and text is not None:\r
-            self._add_item(self._format_text, [text])\r
-\r
-    def add_usage(self, usage, actions, groups, prefix=None):\r
-        if usage is not SUPPRESS:\r
-            args = usage, actions, groups, prefix\r
-            self._add_item(self._format_usage, args)\r
-\r
-    def add_argument(self, action):\r
-        if action.help is not SUPPRESS:\r
-\r
-            # find all invocations\r
-            get_invocation = self._format_action_invocation\r
-            invocations = [get_invocation(action)]\r
-            for subaction in self._iter_indented_subactions(action):\r
-                invocations.append(get_invocation(subaction))\r
-\r
-            # update the maximum item length\r
-            invocation_length = max([len(s) for s in invocations])\r
-            action_length = invocation_length + self._current_indent\r
-            self._action_max_length = max(self._action_max_length,\r
-                                          action_length)\r
-\r
-            # add the item to the list\r
-            self._add_item(self._format_action, [action])\r
-\r
-    def add_arguments(self, actions):\r
-        for action in actions:\r
-            self.add_argument(action)\r
-\r
-    # =======================\r
-    # Help-formatting methods\r
-    # =======================\r
-    def format_help(self):\r
-        help = self._root_section.format_help()\r
-        if help:\r
-            help = self._long_break_matcher.sub('\n\n', help)\r
-            help = help.strip('\n') + '\n'\r
-        return help\r
-\r
-    def _join_parts(self, part_strings):\r
-        return ''.join([part\r
-                        for part in part_strings\r
-                        if part and part is not SUPPRESS])\r
-\r
-    def _format_usage(self, usage, actions, groups, prefix):\r
-        if prefix is None:\r
-            prefix = _('usage: ')\r
-\r
-        # if usage is specified, use that\r
-        if usage is not None:\r
-            usage = usage % dict(prog=self._prog)\r
-\r
-        # if no optionals or positionals are available, usage is just prog\r
-        elif usage is None and not actions:\r
-            usage = '%(prog)s' % dict(prog=self._prog)\r
-\r
-        # if optionals and positionals are available, calculate usage\r
-        elif usage is None:\r
-            prog = '%(prog)s' % dict(prog=self._prog)\r
-\r
-            # split optionals from positionals\r
-            optionals = []\r
-            positionals = []\r
-            for action in actions:\r
-                if action.option_strings:\r
-                    optionals.append(action)\r
-                else:\r
-                    positionals.append(action)\r
-\r
-            # build full usage string\r
-            format = self._format_actions_usage\r
-            action_usage = format(optionals + positionals, groups)\r
-            usage = ' '.join([s for s in [prog, action_usage] if s])\r
-\r
-            # wrap the usage parts if it's too long\r
-            text_width = self._width - self._current_indent\r
-            if len(prefix) + len(usage) > text_width:\r
-\r
-                # break usage into wrappable parts\r
-                part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'\r
-                opt_usage = format(optionals, groups)\r
-                pos_usage = format(positionals, groups)\r
-                opt_parts = _re.findall(part_regexp, opt_usage)\r
-                pos_parts = _re.findall(part_regexp, pos_usage)\r
-                assert ' '.join(opt_parts) == opt_usage\r
-                assert ' '.join(pos_parts) == pos_usage\r
-\r
-                # helper for wrapping lines\r
-                def get_lines(parts, indent, prefix=None):\r
-                    lines = []\r
-                    line = []\r
-                    if prefix is not None:\r
-                        line_len = len(prefix) - 1\r
-                    else:\r
-                        line_len = len(indent) - 1\r
-                    for part in parts:\r
-                        if line_len + 1 + len(part) > text_width:\r
-                            lines.append(indent + ' '.join(line))\r
-                            line = []\r
-                            line_len = len(indent) - 1\r
-                        line.append(part)\r
-                        line_len += len(part) + 1\r
-                    if line:\r
-                        lines.append(indent + ' '.join(line))\r
-                    if prefix is not None:\r
-                        lines[0] = lines[0][len(indent):]\r
-                    return lines\r
-\r
-                # if prog is short, follow it with optionals or positionals\r
-                if len(prefix) + len(prog) <= 0.75 * text_width:\r
-                    indent = ' ' * (len(prefix) + len(prog) + 1)\r
-                    if opt_parts:\r
-                        lines = get_lines([prog] + opt_parts, indent, prefix)\r
-                        lines.extend(get_lines(pos_parts, indent))\r
-                    elif pos_parts:\r
-                        lines = get_lines([prog] + pos_parts, indent, prefix)\r
-                    else:\r
-                        lines = [prog]\r
-\r
-                # if prog is long, put it on its own line\r
-                else:\r
-                    indent = ' ' * len(prefix)\r
-                    parts = opt_parts + pos_parts\r
-                    lines = get_lines(parts, indent)\r
-                    if len(lines) > 1:\r
-                        lines = []\r
-                        lines.extend(get_lines(opt_parts, indent))\r
-                        lines.extend(get_lines(pos_parts, indent))\r
-                    lines = [prog] + lines\r
-\r
-                # join lines into usage\r
-                usage = '\n'.join(lines)\r
-\r
-        # prefix with 'usage:'\r
-        return '%s%s\n\n' % (prefix, usage)\r
-\r
-    def _format_actions_usage(self, actions, groups):\r
-        # find group indices and identify actions in groups\r
-        group_actions = _set()\r
-        inserts = {}\r
-        for group in groups:\r
-            try:\r
-                start = actions.index(group._group_actions[0])\r
-            except ValueError:\r
-                continue\r
-            else:\r
-                end = start + len(group._group_actions)\r
-                if actions[start:end] == group._group_actions:\r
-                    for action in group._group_actions:\r
-                        group_actions.add(action)\r
-                    if not group.required:\r
-                        inserts[start] = '['\r
-                        inserts[end] = ']'\r
-                    else:\r
-                        inserts[start] = '('\r
-                        inserts[end] = ')'\r
-                    for i in range(start + 1, end):\r
-                        inserts[i] = '|'\r
-\r
-        # collect all actions format strings\r
-        parts = []\r
-        for i, action in enumerate(actions):\r
-\r
-            # suppressed arguments are marked with None\r
-            # remove | separators for suppressed arguments\r
-            if action.help is SUPPRESS:\r
-                parts.append(None)\r
-                if inserts.get(i) == '|':\r
-                    inserts.pop(i)\r
-                elif inserts.get(i + 1) == '|':\r
-                    inserts.pop(i + 1)\r
-\r
-            # produce all arg strings\r
-            elif not action.option_strings:\r
-                part = self._format_args(action, action.dest)\r
-\r
-                # if it's in a group, strip the outer []\r
-                if action in group_actions:\r
-                    if part[0] == '[' and part[-1] == ']':\r
-                        part = part[1:-1]\r
-\r
-                # add the action string to the list\r
-                parts.append(part)\r
-\r
-            # produce the first way to invoke the option in brackets\r
-            else:\r
-                option_string = action.option_strings[0]\r
-\r
-                # if the Optional doesn't take a value, format is:\r
-                #    -s or --long\r
-                if action.nargs == 0:\r
-                    part = '%s' % option_string\r
-\r
-                # if the Optional takes a value, format is:\r
-                #    -s ARGS or --long ARGS\r
-                else:\r
-                    default = action.dest.upper()\r
-                    args_string = self._format_args(action, default)\r
-                    part = '%s %s' % (option_string, args_string)\r
-\r
-                # make it look optional if it's not required or in a group\r
-                if not action.required and action not in group_actions:\r
-                    part = '[%s]' % part\r
-\r
-                # add the action string to the list\r
-                parts.append(part)\r
-\r
-        # insert things at the necessary indices\r
-        for i in _sorted(inserts, reverse=True):\r
-            parts[i:i] = [inserts[i]]\r
-\r
-        # join all the action items with spaces\r
-        text = ' '.join([item for item in parts if item is not None])\r
-\r
-        # clean up separators for mutually exclusive groups\r
-        open = r'[\[(]'\r
-        close = r'[\])]'\r
-        text = _re.sub(r'(%s) ' % open, r'\1', text)\r
-        text = _re.sub(r' (%s)' % close, r'\1', text)\r
-        text = _re.sub(r'%s *%s' % (open, close), r'', text)\r
-        text = _re.sub(r'\(([^|]*)\)', r'\1', text)\r
-        text = text.strip()\r
-\r
-        # return the text\r
-        return text\r
-\r
-    def _format_text(self, text):\r
-        text_width = self._width - self._current_indent\r
-        indent = ' ' * self._current_indent\r
-        return self._fill_text(text, text_width, indent) + '\n\n'\r
-\r
-    def _format_action(self, action):\r
-        # determine the required width and the entry label\r
-        help_position = min(self._action_max_length + 2,\r
-                            self._max_help_position)\r
-        help_width = self._width - help_position\r
-        action_width = help_position - self._current_indent - 2\r
-        action_header = self._format_action_invocation(action)\r
-\r
-        # ho nelp; start on same line and add a final newline\r
-        if not action.help:\r
-            tup = self._current_indent, '', action_header\r
-            action_header = '%*s%s\n' % tup\r
-\r
-        # short action name; start on the same line and pad two spaces\r
-        elif len(action_header) <= action_width:\r
-            tup = self._current_indent, '', action_width, action_header\r
-            action_header = '%*s%-*s  ' % tup\r
-            indent_first = 0\r
-\r
-        # long action name; start on the next line\r
-        else:\r
-            tup = self._current_indent, '', action_header\r
-            action_header = '%*s%s\n' % tup\r
-            indent_first = help_position\r
-\r
-        # collect the pieces of the action help\r
-        parts = [action_header]\r
-\r
-        # if there was help for the action, add lines of help text\r
-        if action.help:\r
-            help_text = self._expand_help(action)\r
-            help_lines = self._split_lines(help_text, help_width)\r
-            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))\r
-            for line in help_lines[1:]:\r
-                parts.append('%*s%s\n' % (help_position, '', line))\r
-\r
-        # or add a newline if the description doesn't end with one\r
-        elif not action_header.endswith('\n'):\r
-            parts.append('\n')\r
-\r
-        # if there are any sub-actions, add their help as well\r
-        for subaction in self._iter_indented_subactions(action):\r
-            parts.append(self._format_action(subaction))\r
-\r
-        # return a single string\r
-        return self._join_parts(parts)\r
-\r
-    def _format_action_invocation(self, action):\r
-        if not action.option_strings:\r
-            metavar, = self._metavar_formatter(action, action.dest)(1)\r
-            return metavar\r
-\r
-        else:\r
-            parts = []\r
-\r
-            # if the Optional doesn't take a value, format is:\r
-            #    -s, --long\r
-            if action.nargs == 0:\r
-                parts.extend(action.option_strings)\r
-\r
-            # if the Optional takes a value, format is:\r
-            #    -s ARGS, --long ARGS\r
-            else:\r
-                default = action.dest.upper()\r
-                args_string = self._format_args(action, default)\r
-                for option_string in action.option_strings:\r
-                    parts.append('%s %s' % (option_string, args_string))\r
-\r
-            return ', '.join(parts)\r
-\r
-    def _metavar_formatter(self, action, default_metavar):\r
-        if action.metavar is not None:\r
-            result = action.metavar\r
-        elif action.choices is not None:\r
-            choice_strs = [str(choice) for choice in action.choices]\r
-            #result = '{%s}' % ','.join(choice_strs)\r
-            result = ""\r
-        else:\r
-            result = default_metavar\r
-\r
-        def format(tuple_size):\r
-            if isinstance(result, tuple):\r
-                return result\r
-            else:\r
-                return (result, ) * tuple_size\r
-        return format\r
-\r
-    def _format_args(self, action, default_metavar):\r
-        get_metavar = self._metavar_formatter(action, default_metavar)\r
-        if action.nargs is None:\r
-            result = '%s' % get_metavar(1)\r
-        elif action.nargs == OPTIONAL:\r
-            result = '[%s]' % get_metavar(1)\r
-        elif action.nargs == ZERO_OR_MORE:\r
-            result = '[%s [%s ...]]' % get_metavar(2)\r
-        elif action.nargs == ONE_OR_MORE:\r
-            result = '%s [%s ...]' % get_metavar(2)\r
-        elif action.nargs is PARSER:\r
-            result = '%s ...' % get_metavar(1)\r
-        else:\r
-            formats = ['%s' for _ in range(action.nargs)]\r
-            result = ' '.join(formats) % get_metavar(action.nargs)\r
-        return result\r
-\r
-    def _expand_help(self, action):\r
-        params = dict(vars(action), prog=self._prog)\r
-        for name in list(params):\r
-            if params[name] is SUPPRESS:\r
-                del params[name]\r
-        if params.get('choices') is not None:\r
-            choices_str = ', '.join([str(c) for c in params['choices']])\r
-            params['choices'] = choices_str\r
-        return self._get_help_string(action) % params\r
-\r
-    def _iter_indented_subactions(self, action):\r
-        try:\r
-            get_subactions = action._get_subactions\r
-        except AttributeError:\r
-            pass\r
-        else:\r
-            self._indent()\r
-            for subaction in get_subactions():\r
-                yield subaction\r
-            self._dedent()\r
-\r
-    def _split_lines(self, text, width):\r
-        text = self._whitespace_matcher.sub(' ', text).strip()\r
-        return _textwrap.wrap(text, width)\r
-\r
-    def _fill_text(self, text, width, indent):\r
-        text = self._whitespace_matcher.sub(' ', text).strip()\r
-        return _textwrap.fill(text, width, initial_indent=indent,\r
-                                           subsequent_indent=indent)\r
-\r
-    def _get_help_string(self, action):\r
-        return action.help\r
-\r
-\r
-class RawDescriptionHelpFormatter(HelpFormatter):\r
-    """Help message formatter which retains any formatting in descriptions.\r
-\r
-    Only the name of this class is considered a public API. All the methods\r
-    provided by the class are considered an implementation detail.\r
-    """\r
-\r
-    def _fill_text(self, text, width, indent):\r
-        return ''.join([indent + line for line in text.splitlines(True)])\r
-\r
-\r
-class RawTextHelpFormatter(RawDescriptionHelpFormatter):\r
-    """Help message formatter which retains formatting of all help text.\r
-\r
-    Only the name of this class is considered a public API. All the methods\r
-    provided by the class are considered an implementation detail.\r
-    """\r
-\r
-    def _split_lines(self, text, width):\r
-        return text.splitlines()\r
-\r
-\r
-class ArgumentDefaultsHelpFormatter(HelpFormatter):\r
-    """Help message formatter which adds default values to argument help.\r
-\r
-    Only the name of this class is considered a public API. All the methods\r
-    provided by the class are considered an implementation detail.\r
-    """\r
-\r
-    def _get_help_string(self, action):\r
-        help = action.help\r
-        if '%(default)' not in action.help:\r
-            if action.default is not SUPPRESS:\r
-                defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]\r
-                if action.option_strings or action.nargs in defaulting_nargs:\r
-                    help += ' (default: %(default)s)'\r
-        return help\r
-\r
-\r
-# =====================\r
-# Options and Arguments\r
-# =====================\r
-\r
-def _get_action_name(argument):\r
-    if argument is None:\r
-        return None\r
-    elif argument.option_strings:\r
-        return  '/'.join(argument.option_strings)\r
-    elif argument.metavar not in (None, SUPPRESS):\r
-        return argument.metavar\r
-    elif argument.dest not in (None, SUPPRESS):\r
-        return argument.dest\r
-    else:\r
-        return None\r
-\r
-\r
-class ArgumentError(Exception):\r
-    """An error from creating or using an argument (optional or positional).\r
-\r
-    The string value of this exception is the message, augmented with\r
-    information about the argument that caused it.\r
-    """\r
-\r
-    def __init__(self, argument, message):\r
-        self.argument_name = _get_action_name(argument)\r
-        self.message = message\r
-\r
-    def __str__(self):\r
-        if self.argument_name is None:\r
-            format = '%(message)s'\r
-        else:\r
-            format = 'argument %(argument_name)s: %(message)s'\r
-        return format % dict(message=self.message,\r
-                             argument_name=self.argument_name)\r
-\r
-# ==============\r
-# Action classes\r
-# ==============\r
-\r
-class Action(_AttributeHolder):\r
-    """Information about how to convert command line strings to Python objects.\r
-\r
-    Action objects are used by an ArgumentParser to represent the information\r
-    needed to parse a single argument from one or more strings from the\r
-    command line. The keyword arguments to the Action constructor are also\r
-    all attributes of Action instances.\r
-\r
-    Keyword Arguments:\r
-\r
-        - option_strings -- A list of command-line option strings which\r
-            should be associated with this action.\r
-\r
-        - dest -- The name of the attribute to hold the created object(s)\r
-\r
-        - nargs -- The number of command-line arguments that should be\r
-            consumed. By default, one argument will be consumed and a single\r
-            value will be produced.  Other values include:\r
-                - N (an integer) consumes N arguments (and produces a list)\r
-                - '?' consumes zero or one arguments\r
-                - '*' consumes zero or more arguments (and produces a list)\r
-                - '+' consumes one or more arguments (and produces a list)\r
-            Note that the difference between the default and nargs=1 is that\r
-            with the default, a single value will be produced, while with\r
-            nargs=1, a list containing a single value will be produced.\r
-\r
-        - const -- The value to be produced if the option is specified and the\r
-            option uses an action that takes no values.\r
-\r
-        - default -- The value to be produced if the option is not specified.\r
-\r
-        - type -- The type which the command-line arguments should be converted\r
-            to, should be one of 'string', 'int', 'float', 'complex' or a\r
-            callable object that accepts a single string argument. If None,\r
-            'string' is assumed.\r
-\r
-        - choices -- A container of values that should be allowed. If not None,\r
-            after a command-line argument has been converted to the appropriate\r
-            type, an exception will be raised if it is not a member of this\r
-            collection.\r
-\r
-        - required -- True if the action must always be specified at the\r
-            command line. This is only meaningful for optional command-line\r
-            arguments.\r
-\r
-        - help -- The help string describing the argument.\r
-\r
-        - metavar -- The name to be used for the option's argument with the\r
-            help string. If None, the 'dest' value will be used as the name.\r
-    """\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 nargs=None,\r
-                 const=None,\r
-                 default=None,\r
-                 type=None,\r
-                 choices=None,\r
-                 required=False,\r
-                 help=None,\r
-                 metavar=None):\r
-        self.option_strings = option_strings\r
-        self.dest = dest\r
-        self.nargs = nargs\r
-        self.const = const\r
-        self.default = default\r
-        self.type = type\r
-        self.choices = choices\r
-        self.required = required\r
-        self.help = help\r
-        self.metavar = metavar\r
-\r
-    def _get_kwargs(self):\r
-        names = [\r
-            'option_strings',\r
-            'dest',\r
-            'nargs',\r
-            'const',\r
-            'default',\r
-            'type',\r
-            'choices',\r
-            'help',\r
-            'metavar',\r
-        ]\r
-        return [(name, getattr(self, name)) for name in names]\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        raise NotImplementedError(_('.__call__() not defined'))\r
-\r
-\r
-class _StoreAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 nargs=None,\r
-                 const=None,\r
-                 default=None,\r
-                 type=None,\r
-                 choices=None,\r
-                 required=False,\r
-                 help=None,\r
-                 metavar=None):\r
-        if nargs == 0:\r
-            raise ValueError('nargs for store actions must be > 0; if you '\r
-                             'have nothing to store, actions such as store '\r
-                             'true or store const may be more appropriate')\r
-        if const is not None and nargs != OPTIONAL:\r
-            raise ValueError('nargs must be %r to supply const' % OPTIONAL)\r
-        super(_StoreAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=nargs,\r
-            const=const,\r
-            default=default,\r
-            type=type,\r
-            choices=choices,\r
-            required=required,\r
-            help=help,\r
-            metavar=metavar)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        setattr(namespace, self.dest, values)\r
-\r
-\r
-class _StoreConstAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 const,\r
-                 default=None,\r
-                 required=False,\r
-                 help=None,\r
-                 metavar=None):\r
-        super(_StoreConstAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=0,\r
-            const=const,\r
-            default=default,\r
-            required=required,\r
-            help=help)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        setattr(namespace, self.dest, self.const)\r
-\r
-\r
-class _StoreTrueAction(_StoreConstAction):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 default=False,\r
-                 required=False,\r
-                 help=None):\r
-        super(_StoreTrueAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            const=True,\r
-            default=default,\r
-            required=required,\r
-            help=help)\r
-\r
-\r
-class _StoreFalseAction(_StoreConstAction):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 default=True,\r
-                 required=False,\r
-                 help=None):\r
-        super(_StoreFalseAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            const=False,\r
-            default=default,\r
-            required=required,\r
-            help=help)\r
-\r
-\r
-class _AppendAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 nargs=None,\r
-                 const=None,\r
-                 default=None,\r
-                 type=None,\r
-                 choices=None,\r
-                 required=False,\r
-                 help=None,\r
-                 metavar=None):\r
-        if nargs == 0:\r
-            raise ValueError('nargs for append actions must be > 0; if arg '\r
-                             'strings are not supplying the value to append, '\r
-                             'the append const action may be more appropriate')\r
-        if const is not None and nargs != OPTIONAL:\r
-            raise ValueError('nargs must be %r to supply const' % OPTIONAL)\r
-        super(_AppendAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=nargs,\r
-            const=const,\r
-            default=default,\r
-            type=type,\r
-            choices=choices,\r
-            required=required,\r
-            help=help,\r
-            metavar=metavar)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        items = _copy.copy(_ensure_value(namespace, self.dest, []))\r
-        items.append(values)\r
-        setattr(namespace, self.dest, items)\r
-\r
-\r
-class _AppendConstAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 const,\r
-                 default=None,\r
-                 required=False,\r
-                 help=None,\r
-                 metavar=None):\r
-        super(_AppendConstAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=0,\r
-            const=const,\r
-            default=default,\r
-            required=required,\r
-            help=help,\r
-            metavar=metavar)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        items = _copy.copy(_ensure_value(namespace, self.dest, []))\r
-        items.append(self.const)\r
-        setattr(namespace, self.dest, items)\r
-\r
-\r
-class _CountAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest,\r
-                 default=None,\r
-                 required=False,\r
-                 help=None):\r
-        super(_CountAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=0,\r
-            default=default,\r
-            required=required,\r
-            help=help)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        new_count = _ensure_value(namespace, self.dest, 0) + 1\r
-        setattr(namespace, self.dest, new_count)\r
-\r
-\r
-class _HelpAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest=SUPPRESS,\r
-                 default=SUPPRESS,\r
-                 help=None):\r
-        super(_HelpAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            default=default,\r
-            nargs=0,\r
-            help=help)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        parser.print_help()\r
-        parser.exit()\r
-\r
-\r
-class _VersionAction(Action):\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 dest=SUPPRESS,\r
-                 default=SUPPRESS,\r
-                 help=None):\r
-        super(_VersionAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            default=default,\r
-            nargs=0,\r
-            help=help)\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        parser.print_version()\r
-        parser.exit()\r
-\r
-\r
-class _SubParsersAction(Action):\r
-\r
-    class _ChoicesPseudoAction(Action):\r
-\r
-        def __init__(self, name, help):\r
-            sup = super(_SubParsersAction._ChoicesPseudoAction, self)\r
-            sup.__init__(option_strings=[], dest=name, help=help)\r
-\r
-    def __init__(self,\r
-                 option_strings,\r
-                 prog,\r
-                 parser_class,\r
-                 dest=SUPPRESS,\r
-                 help=None,\r
-                 metavar=None):\r
-\r
-        self._prog_prefix = prog\r
-        self._parser_class = parser_class\r
-        self._name_parser_map = {}\r
-        self._choices_actions = []\r
-\r
-        super(_SubParsersAction, self).__init__(\r
-            option_strings=option_strings,\r
-            dest=dest,\r
-            nargs=PARSER,\r
-            choices=self._name_parser_map,\r
-            help=help,\r
-            metavar=metavar)\r
-\r
-    def add_parser(self, name, **kwargs):\r
-        # set prog from the existing prefix\r
-        if kwargs.get('prog') is None:\r
-            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)\r
-\r
-        # create a pseudo-action to hold the choice help\r
-        if 'help' in kwargs:\r
-            help = kwargs.pop('help')\r
-            choice_action = self._ChoicesPseudoAction(name, help)\r
-            self._choices_actions.append(choice_action)\r
-\r
-        # create the parser and add it to the map\r
-        parser = self._parser_class(**kwargs)\r
-        self._name_parser_map[name] = parser\r
-        return parser\r
-\r
-    def _get_subactions(self):\r
-        return self._choices_actions\r
-\r
-    def __call__(self, parser, namespace, values, option_string=None):\r
-        parser_name = values[0]\r
-        arg_strings = values[1:]\r
-\r
-        # set the parser name if requested\r
-        if self.dest is not SUPPRESS:\r
-            setattr(namespace, self.dest, parser_name)\r
-\r
-        # select the parser\r
-        try:\r
-            parser = self._name_parser_map[parser_name]\r
-        except KeyError:\r
-            tup = parser_name, ', '.join(self._name_parser_map)\r
-            msg = _('unknown parser %r (choices: %s)' % tup)\r
-            raise ArgumentError(self, msg)\r
-\r
-        # parse all the remaining options into the namespace\r
-        parser.parse_args(arg_strings, namespace)\r
-\r
-\r
-# ==============\r
-# Type classes\r
-# ==============\r
-\r
-class FileType(object):\r
-    """Factory for creating file object types\r
-\r
-    Instances of FileType are typically passed as type= arguments to the\r
-    ArgumentParser add_argument() method.\r
-\r
-    Keyword Arguments:\r
-        - mode -- A string indicating how the file is to be opened. Accepts the\r
-            same values as the builtin open() function.\r
-        - bufsize -- The file's desired buffer size. Accepts the same values as\r
-            the builtin open() function.\r
-    """\r
-\r
-    def __init__(self, mode='r', bufsize=None):\r
-        self._mode = mode\r
-        self._bufsize = bufsize\r
-\r
-    def __call__(self, string):\r
-        # the special argument "-" means sys.std{in,out}\r
-        if string == '-':\r
-            if 'r' in self._mode:\r
-                return _sys.stdin\r
-            elif 'w' in self._mode:\r
-                return _sys.stdout\r
-            else:\r
-                msg = _('argument "-" with mode %r' % self._mode)\r
-                raise ValueError(msg)\r
-\r
-        # all other arguments are used as file names\r
-        if self._bufsize:\r
-            return open(string, self._mode, self._bufsize)\r
-        else:\r
-            return open(string, self._mode)\r
-\r
-    def __repr__(self):\r
-        args = [self._mode, self._bufsize]\r
-        args_str = ', '.join([repr(arg) for arg in args if arg is not None])\r
-        return '%s(%s)' % (type(self).__name__, args_str)\r
-\r
-# ===========================\r
-# Optional and Positional Parsing\r
-# ===========================\r
-\r
-class Namespace(_AttributeHolder):\r
-    """Simple object for storing attributes.\r
-\r
-    Implements equality by attribute names and values, and provides a simple\r
-    string representation.\r
-    """\r
-\r
-    def __init__(self, **kwargs):\r
-        for name in kwargs:\r
-            setattr(self, name, kwargs[name])\r
-\r
-    def __eq__(self, other):\r
-        return vars(self) == vars(other)\r
-\r
-    def __ne__(self, other):\r
-        return not (self == other)\r
-\r
-\r
-class _ActionsContainer(object):\r
-\r
-    def __init__(self,\r
-                 description,\r
-                 prefix_chars,\r
-                 argument_default,\r
-                 conflict_handler):\r
-        super(_ActionsContainer, self).__init__()\r
-\r
-        self.description = description\r
-        self.argument_default = argument_default\r
-        self.prefix_chars = prefix_chars\r
-        self.conflict_handler = conflict_handler\r
-\r
-        # set up registries\r
-        self._registries = {}\r
-\r
-        # register actions\r
-        self.register('action', None, _StoreAction)\r
-        self.register('action', 'store', _StoreAction)\r
-        self.register('action', 'store_const', _StoreConstAction)\r
-        self.register('action', 'store_true', _StoreTrueAction)\r
-        self.register('action', 'store_false', _StoreFalseAction)\r
-        self.register('action', 'append', _AppendAction)\r
-        self.register('action', 'append_const', _AppendConstAction)\r
-        self.register('action', 'count', _CountAction)\r
-        self.register('action', 'help', _HelpAction)\r
-        self.register('action', 'version', _VersionAction)\r
-        self.register('action', 'parsers', _SubParsersAction)\r
-\r
-        # raise an exception if the conflict handler is invalid\r
-        self._get_handler()\r
-\r
-        # action storage\r
-        self._actions = []\r
-        self._option_string_actions = {}\r
-\r
-        # groups\r
-        self._action_groups = []\r
-        self._mutually_exclusive_groups = []\r
-\r
-        # defaults storage\r
-        self._defaults = {}\r
-\r
-        # determines whether an "option" looks like a negative number\r
-        self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')\r
-\r
-        # whether or not there are any optionals that look like negative\r
-        # numbers -- uses a list so it can be shared and edited\r
-        self._has_negative_number_optionals = []\r
-\r
-    # ====================\r
-    # Registration methods\r
-    # ====================\r
-    def register(self, registry_name, value, object):\r
-        registry = self._registries.setdefault(registry_name, {})\r
-        registry[value] = object\r
-\r
-    def _registry_get(self, registry_name, value, default=None):\r
-        return self._registries[registry_name].get(value, default)\r
-\r
-    # ==================================\r
-    # Namespace default settings methods\r
-    # ==================================\r
-    def set_defaults(self, **kwargs):\r
-        self._defaults.update(kwargs)\r
-\r
-        # if these defaults match any existing arguments, replace\r
-        # the previous default on the object with the new one\r
-        for action in self._actions:\r
-            if action.dest in kwargs:\r
-                action.default = kwargs[action.dest]\r
-\r
-    # =======================\r
-    # Adding argument actions\r
-    # =======================\r
-    def add_argument(self, *args, **kwargs):\r
-        """\r
-        add_argument(dest, ..., name=value, ...)\r
-        add_argument(option_string, option_string, ..., name=value, ...)\r
-        """\r
-\r
-        # if no positional args are supplied or only one is supplied and\r
-        # it doesn't look like an option string, parse a positional\r
-        # argument\r
-        chars = self.prefix_chars\r
-        if not args or len(args) == 1 and args[0][0] not in chars:\r
-            kwargs = self._get_positional_kwargs(*args, **kwargs)\r
-\r
-        # otherwise, we're adding an optional argument\r
-        else:\r
-            kwargs = self._get_optional_kwargs(*args, **kwargs)\r
-\r
-        # if no default was supplied, use the parser-level default\r
-        if 'default' not in kwargs:\r
-            dest = kwargs['dest']\r
-            if dest in self._defaults:\r
-                kwargs['default'] = self._defaults[dest]\r
-            elif self.argument_default is not None:\r
-                kwargs['default'] = self.argument_default\r
-\r
-        # create the action object, and add it to the parser\r
-        action_class = self._pop_action_class(kwargs)\r
-        action = action_class(**kwargs)\r
-        return self._add_action(action)\r
-\r
-    def add_argument_group(self, *args, **kwargs):\r
-        group = _ArgumentGroup(self, *args, **kwargs)\r
-        self._action_groups.append(group)\r
-        return group\r
-\r
-    def add_mutually_exclusive_group(self, **kwargs):\r
-        group = _MutuallyExclusiveGroup(self, **kwargs)\r
-        self._mutually_exclusive_groups.append(group)\r
-        return group\r
-\r
-    def _add_action(self, action):\r
-        # resolve any conflicts\r
-        self._check_conflict(action)\r
-\r
-        # add to actions list\r
-        self._actions.append(action)\r
-        action.container = self\r
-\r
-        # index the action by any option strings it has\r
-        for option_string in action.option_strings:\r
-            self._option_string_actions[option_string] = action\r
-\r
-        # set the flag if any option strings look like negative numbers\r
-        for option_string in action.option_strings:\r
-            if self._negative_number_matcher.match(option_string):\r
-                if not self._has_negative_number_optionals:\r
-                    self._has_negative_number_optionals.append(True)\r
-\r
-        # return the created action\r
-        return action\r
-\r
-    def _remove_action(self, action):\r
-        self._actions.remove(action)\r
-\r
-    def _add_container_actions(self, container):\r
-        # collect groups by titles\r
-        title_group_map = {}\r
-        for group in self._action_groups:\r
-            if group.title in title_group_map:\r
-                msg = _('cannot merge actions - two groups are named %r')\r
-                raise ValueError(msg % (group.title))\r
-            title_group_map[group.title] = group\r
-\r
-        # map each action to its group\r
-        group_map = {}\r
-        for group in container._action_groups:\r
-\r
-            # if a group with the title exists, use that, otherwise\r
-            # create a new group matching the container's group\r
-            if group.title not in title_group_map:\r
-                title_group_map[group.title] = self.add_argument_group(\r
-                    title=group.title,\r
-                    description=group.description,\r
-                    conflict_handler=group.conflict_handler)\r
-\r
-            # map the actions to their new group\r
-            for action in group._group_actions:\r
-                group_map[action] = title_group_map[group.title]\r
-\r
-        # add container's mutually exclusive groups\r
-        # NOTE: if add_mutually_exclusive_group ever gains title= and\r
-        # description= then this code will need to be expanded as above\r
-        for group in container._mutually_exclusive_groups:\r
-            mutex_group = self.add_mutually_exclusive_group(\r
-                required=group.required)\r
-\r
-            # map the actions to their new mutex group\r
-            for action in group._group_actions:\r
-                group_map[action] = mutex_group\r
-\r
-        # add all actions to this container or their group\r
-        for action in container._actions:\r
-            group_map.get(action, self)._add_action(action)\r
-\r
-    def _get_positional_kwargs(self, dest, **kwargs):\r
-        # make sure required is not specified\r
-        if 'required' in kwargs:\r
-            msg = _("'required' is an invalid argument for positionals")\r
-            raise TypeError(msg)\r
-\r
-        # mark positional arguments as required if at least one is\r
-        # always required\r
-        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:\r
-            kwargs['required'] = True\r
-        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:\r
-            kwargs['required'] = True\r
-\r
-        # return the keyword arguments with no option strings\r
-        return dict(kwargs, dest=dest, option_strings=[])\r
-\r
-    def _get_optional_kwargs(self, *args, **kwargs):\r
-        # determine short and long option strings\r
-        option_strings = []\r
-        long_option_strings = []\r
-        for option_string in args:\r
-            # error on one-or-fewer-character option strings\r
-            if len(option_string) < 2:\r
-                msg = _('invalid option string %r: '\r
-                        'must be at least two characters long')\r
-                raise ValueError(msg % option_string)\r
-\r
-            # error on strings that don't start with an appropriate prefix\r
-            if not option_string[0] in self.prefix_chars:\r
-                msg = _('invalid option string %r: '\r
-                        'must start with a character %r')\r
-                tup = option_string, self.prefix_chars\r
-                raise ValueError(msg % tup)\r
-\r
-            # error on strings that are all prefix characters\r
-            if not (_set(option_string) - _set(self.prefix_chars)):\r
-                msg = _('invalid option string %r: '\r
-                        'must contain characters other than %r')\r
-                tup = option_string, self.prefix_chars\r
-                raise ValueError(msg % tup)\r
-\r
-            # strings starting with two prefix characters are long options\r
-            option_strings.append(option_string)\r
-            if option_string[0] in self.prefix_chars:\r
-                if option_string[1] in self.prefix_chars:\r
-                    long_option_strings.append(option_string)\r
-\r
-        # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'\r
-        dest = kwargs.pop('dest', None)\r
-        if dest is None:\r
-            if long_option_strings:\r
-                dest_option_string = long_option_strings[0]\r
-            else:\r
-                dest_option_string = option_strings[0]\r
-            dest = dest_option_string.lstrip(self.prefix_chars)\r
-            dest = dest.replace('-', '_')\r
-\r
-        # return the updated keyword arguments\r
-        return dict(kwargs, dest=dest, option_strings=option_strings)\r
-\r
-    def _pop_action_class(self, kwargs, default=None):\r
-        action = kwargs.pop('action', default)\r
-        return self._registry_get('action', action, action)\r
-\r
-    def _get_handler(self):\r
-        # determine function from conflict handler string\r
-        handler_func_name = '_handle_conflict_%s' % self.conflict_handler\r
-        try:\r
-            return getattr(self, handler_func_name)\r
-        except AttributeError:\r
-            msg = _('invalid conflict_resolution value: %r')\r
-            raise ValueError(msg % self.conflict_handler)\r
-\r
-    def _check_conflict(self, action):\r
-\r
-        # find all options that conflict with this option\r
-        confl_optionals = []\r
-        for option_string in action.option_strings:\r
-            if option_string in self._option_string_actions:\r
-                confl_optional = self._option_string_actions[option_string]\r
-                confl_optionals.append((option_string, confl_optional))\r
-\r
-        # resolve any conflicts\r
-        if confl_optionals:\r
-            conflict_handler = self._get_handler()\r
-            conflict_handler(action, confl_optionals)\r
-\r
-    def _handle_conflict_error(self, action, conflicting_actions):\r
-        message = _('conflicting option string(s): %s')\r
-        conflict_string = ', '.join([option_string\r
-                                     for option_string, action\r
-                                     in conflicting_actions])\r
-        raise ArgumentError(action, message % conflict_string)\r
-\r
-    def _handle_conflict_resolve(self, action, conflicting_actions):\r
-\r
-        # remove all conflicting options\r
-        for option_string, action in conflicting_actions:\r
-\r
-            # remove the conflicting option\r
-            action.option_strings.remove(option_string)\r
-            self._option_string_actions.pop(option_string, None)\r
-\r
-            # if the option now has no option string, remove it from the\r
-            # container holding it\r
-            if not action.option_strings:\r
-                action.container._remove_action(action)\r
-\r
-\r
-class _ArgumentGroup(_ActionsContainer):\r
-\r
-    def __init__(self, container, title=None, description=None, **kwargs):\r
-        # add any missing keyword arguments by checking the container\r
-        update = kwargs.setdefault\r
-        update('conflict_handler', container.conflict_handler)\r
-        update('prefix_chars', container.prefix_chars)\r
-        update('argument_default', container.argument_default)\r
-        super_init = super(_ArgumentGroup, self).__init__\r
-        super_init(description=description, **kwargs)\r
-\r
-        # group attributes\r
-        self.title = title\r
-        self._group_actions = []\r
-\r
-        # share most attributes with the container\r
-        self._registries = container._registries\r
-        self._actions = container._actions\r
-        self._option_string_actions = container._option_string_actions\r
-        self._defaults = container._defaults\r
-        self._has_negative_number_optionals = \\r
-            container._has_negative_number_optionals\r
-\r
-    def _add_action(self, action):\r
-        action = super(_ArgumentGroup, self)._add_action(action)\r
-        self._group_actions.append(action)\r
-        return action\r
-\r
-    def _remove_action(self, action):\r
-        super(_ArgumentGroup, self)._remove_action(action)\r
-        self._group_actions.remove(action)\r
-\r
-\r
-class _MutuallyExclusiveGroup(_ArgumentGroup):\r
-\r
-    def __init__(self, container, required=False):\r
-        super(_MutuallyExclusiveGroup, self).__init__(container)\r
-        self.required = required\r
-        self._container = container\r
-\r
-    def _add_action(self, action):\r
-        if action.required:\r
-            msg = _('mutually exclusive arguments must be optional')\r
-            raise ValueError(msg)\r
-        action = self._container._add_action(action)\r
-        self._group_actions.append(action)\r
-        return action\r
-\r
-    def _remove_action(self, action):\r
-        self._container._remove_action(action)\r
-        self._group_actions.remove(action)\r
-\r
-\r
-class ArgumentParser(_AttributeHolder, _ActionsContainer):\r
-    """Object for parsing command line strings into Python objects.\r
-\r
-    Keyword Arguments:\r
-        - prog -- The name of the program (default: sys.argv[0])\r
-        - usage -- A usage message (default: auto-generated from arguments)\r
-        - description -- A description of what the program does\r
-        - epilog -- Text following the argument descriptions\r
-        - version -- Add a -v/--version option with the given version string\r
-        - parents -- Parsers whose arguments should be copied into this one\r
-        - formatter_class -- HelpFormatter class for printing help messages\r
-        - prefix_chars -- Characters that prefix optional arguments\r
-        - fromfile_prefix_chars -- Characters that prefix files containing\r
-            additional arguments\r
-        - argument_default -- The default value for all arguments\r
-        - conflict_handler -- String indicating how to handle conflicts\r
-        - add_help -- Add a -h/-help option\r
-    """\r
-\r
-    def __init__(self,\r
-                 prog=None,\r
-                 usage=None,\r
-                 description=None,\r
-                 epilog=None,\r
-                 version=None,\r
-                 parents=[],\r
-                 formatter_class=HelpFormatter,\r
-                 prefix_chars='-',\r
-                 fromfile_prefix_chars=None,\r
-                 argument_default=None,\r
-                 conflict_handler='error',\r
-                 add_help=True):\r
-\r
-        superinit = super(ArgumentParser, self).__init__\r
-        superinit(description=description,\r
-                  prefix_chars=prefix_chars,\r
-                  argument_default=argument_default,\r
-                  conflict_handler=conflict_handler)\r
-\r
-        # default setting for prog\r
-        if prog is None:\r
-            prog = _os.path.basename(_sys.argv[0])\r
-\r
-        self.prog = prog\r
-        self.usage = usage\r
-        self.epilog = epilog\r
-        self.version = version\r
-        self.formatter_class = formatter_class\r
-        self.fromfile_prefix_chars = fromfile_prefix_chars\r
-        self.add_help = add_help\r
-\r
-        add_group = self.add_argument_group\r
-        self._positionals = add_group(_('arguments'))\r
-        self._optionals = add_group(_('options'))\r
-        self._subparsers = None\r
-\r
-        # register types\r
-        def identity(string):\r
-            return string\r
-        self.register('type', None, identity)\r
-\r
-        # add help and version arguments if necessary\r
-        # (using explicit default to override global argument_default)\r
-        if self.add_help:\r
-            self.add_argument(\r
-                '-h', '--help', action='help', default=SUPPRESS,\r
-                help=_('show this help message and exit'))\r
-        if self.version:\r
-            self.add_argument(\r
-                '-v', '--version', action='version', default=SUPPRESS,\r
-                help=_("show program's version number and exit"))\r
-\r
-        # add parent arguments and defaults\r
-        for parent in parents:\r
-            self._add_container_actions(parent)\r
-            try:\r
-                defaults = parent._defaults\r
-            except AttributeError:\r
-                pass\r
-            else:\r
-                self._defaults.update(defaults)\r
-\r
-    # =======================\r
-    # Pretty __repr__ methods\r
-    # =======================\r
-    def _get_kwargs(self):\r
-        names = [\r
-            'prog',\r
-            'usage',\r
-            'description',\r
-            'version',\r
-            'formatter_class',\r
-            'conflict_handler',\r
-            'add_help',\r
-        ]\r
-        return [(name, getattr(self, name)) for name in names]\r
-\r
-    # ==================================\r
-    # Optional/Positional adding methods\r
-    # ==================================\r
-    def add_subparsers(self, **kwargs):\r
-        if self._subparsers is not None:\r
-            self.error(_('cannot have multiple subparser arguments'))\r
-\r
-        # add the parser class to the arguments if it's not present\r
-        kwargs.setdefault('parser_class', type(self))\r
-\r
-        if 'title' in kwargs or 'description' in kwargs:\r
-            title = _(kwargs.pop('title', 'subcommands'))\r
-            description = _(kwargs.pop('description', None))\r
-            self._subparsers = self.add_argument_group(title, description)\r
-        else:\r
-            self._subparsers = self._positionals\r
-\r
-        # prog defaults to the usage message of this parser, skipping\r
-        # optional arguments and with no "usage:" prefix\r
-        if kwargs.get('prog') is None:\r
-            formatter = self._get_formatter()\r
-            positionals = self._get_positional_actions()\r
-            groups = self._mutually_exclusive_groups\r
-            formatter.add_usage(self.usage, positionals, groups, '')\r
-            kwargs['prog'] = formatter.format_help().strip()\r
-\r
-        # create the parsers action and add it to the positionals list\r
-        parsers_class = self._pop_action_class(kwargs, 'parsers')\r
-        action = parsers_class(option_strings=[], **kwargs)\r
-        self._subparsers._add_action(action)\r
-\r
-        # return the created parsers action\r
-        return action\r
-\r
-    def _add_action(self, action):\r
-        if action.option_strings:\r
-            self._optionals._add_action(action)\r
-        else:\r
-            self._positionals._add_action(action)\r
-        return action\r
-\r
-    def _get_optional_actions(self):\r
-        return [action\r
-                for action in self._actions\r
-                if action.option_strings]\r
-\r
-    def _get_positional_actions(self):\r
-        return [action\r
-                for action in self._actions\r
-                if not action.option_strings]\r
-\r
-    # =====================================\r
-    # Command line argument parsing methods\r
-    # =====================================\r
-    def parse_args(self, args=None, namespace=None):\r
-        args, argv = self.parse_known_args(args, namespace)\r
-        if argv:\r
-            msg = _('unrecognized arguments: %s')\r
-            self.error(msg % ' '.join(argv))\r
-        return args\r
-\r
-    def parse_known_args(self, args=None, namespace=None):\r
-        # args default to the system args\r
-        if args is None:\r
-            args = _sys.argv[1:]\r
-\r
-        # default Namespace built from parser defaults\r
-        if namespace is None:\r
-            namespace = Namespace()\r
-\r
-        # add any action defaults that aren't present\r
-        for action in self._actions:\r
-            if action.dest is not SUPPRESS:\r
-                if not hasattr(namespace, action.dest):\r
-                    if action.default is not SUPPRESS:\r
-                        default = action.default\r
-                        if isinstance(action.default, _basestring):\r
-                            default = self._get_value(action, default)\r
-                        setattr(namespace, action.dest, default)\r
-\r
-        # add any parser defaults that aren't present\r
-        for dest in self._defaults:\r
-            if not hasattr(namespace, dest):\r
-                setattr(namespace, dest, self._defaults[dest])\r
-\r
-        # parse the arguments and exit if there are any errors\r
-        try:\r
-            return self._parse_known_args(args, namespace)\r
-        except ArgumentError:\r
-            err = _sys.exc_info()[1]\r
-            self.error(str(err))\r
-\r
-    def _parse_known_args(self, arg_strings, namespace):\r
-        # replace arg strings that are file references\r
-        if self.fromfile_prefix_chars is not None:\r
-            arg_strings = self._read_args_from_files(arg_strings)\r
-\r
-        # map all mutually exclusive arguments to the other arguments\r
-        # they can't occur with\r
-        action_conflicts = {}\r
-        for mutex_group in self._mutually_exclusive_groups:\r
-            group_actions = mutex_group._group_actions\r
-            for i, mutex_action in enumerate(mutex_group._group_actions):\r
-                conflicts = action_conflicts.setdefault(mutex_action, [])\r
-                conflicts.extend(group_actions[:i])\r
-                conflicts.extend(group_actions[i + 1:])\r
-\r
-        # find all option indices, and determine the arg_string_pattern\r
-        # which has an 'O' if there is an option at an index,\r
-        # an 'A' if there is an argument, or a '-' if there is a '--'\r
-        option_string_indices = {}\r
-        arg_string_pattern_parts = []\r
-        arg_strings_iter = iter(arg_strings)\r
-        for i, arg_string in enumerate(arg_strings_iter):\r
-\r
-            # all args after -- are non-options\r
-            if arg_string == '--':\r
-                arg_string_pattern_parts.append('-')\r
-                for arg_string in arg_strings_iter:\r
-                    arg_string_pattern_parts.append('A')\r
-\r
-            # otherwise, add the arg to the arg strings\r
-            # and note the index if it was an option\r
-            else:\r
-                option_tuple = self._parse_optional(arg_string)\r
-                if option_tuple is None:\r
-                    pattern = 'A'\r
-                else:\r
-                    option_string_indices[i] = option_tuple\r
-                    pattern = 'O'\r
-                arg_string_pattern_parts.append(pattern)\r
-\r
-        # join the pieces together to form the pattern\r
-        arg_strings_pattern = ''.join(arg_string_pattern_parts)\r
-\r
-        # converts arg strings to the appropriate and then takes the action\r
-        seen_actions = _set()\r
-        seen_non_default_actions = _set()\r
-\r
-        def take_action(action, argument_strings, option_string=None):\r
-            seen_actions.add(action)\r
-            argument_values = self._get_values(action, argument_strings)\r
-\r
-            # error if this argument is not allowed with other previously\r
-            # seen arguments, assuming that actions that use the default\r
-            # value don't really count as "present"\r
-            if argument_values is not action.default:\r
-                seen_non_default_actions.add(action)\r
-                for conflict_action in action_conflicts.get(action, []):\r
-                    if conflict_action in seen_non_default_actions:\r
-                        msg = _('not allowed with argument %s')\r
-                        action_name = _get_action_name(conflict_action)\r
-                        raise ArgumentError(action, msg % action_name)\r
-\r
-            # take the action if we didn't receive a SUPPRESS value\r
-            # (e.g. from a default)\r
-            if argument_values is not SUPPRESS:\r
-                action(self, namespace, argument_values, option_string)\r
-\r
-        # function to convert arg_strings into an optional action\r
-        def consume_optional(start_index):\r
-\r
-            # get the optional identified at this index\r
-            option_tuple = option_string_indices[start_index]\r
-            action, option_string, explicit_arg = option_tuple\r
-\r
-            # identify additional optionals in the same arg string\r
-            # (e.g. -xyz is the same as -x -y -z if no args are required)\r
-            match_argument = self._match_argument\r
-            action_tuples = []\r
-            while True:\r
-\r
-                # if we found no optional action, skip it\r
-                if action is None:\r
-                    extras.append(arg_strings[start_index])\r
-                    return start_index + 1\r
-\r
-                # if there is an explicit argument, try to match the\r
-                # optional's string arguments to only this\r
-                if explicit_arg is not None:\r
-                    arg_count = match_argument(action, 'A')\r
-\r
-                    # if the action is a single-dash option and takes no\r
-                    # arguments, try to parse more single-dash options out\r
-                    # of the tail of the option string\r
-                    chars = self.prefix_chars\r
-                    if arg_count == 0 and option_string[1] not in chars:\r
-                        action_tuples.append((action, [], option_string))\r
-                        for char in self.prefix_chars:\r
-                            option_string = char + explicit_arg[0]\r
-                            explicit_arg = explicit_arg[1:] or None\r
-                            optionals_map = self._option_string_actions\r
-                            if option_string in optionals_map:\r
-                                action = optionals_map[option_string]\r
-                                break\r
-                        else:\r
-                            msg = _('ignored explicit argument %r')\r
-                            raise ArgumentError(action, msg % explicit_arg)\r
-\r
-                    # if the action expect exactly one argument, we've\r
-                    # successfully matched the option; exit the loop\r
-                    elif arg_count == 1:\r
-                        stop = start_index + 1\r
-                        args = [explicit_arg]\r
-                        action_tuples.append((action, args, option_string))\r
-                        break\r
-\r
-                    # error if a double-dash option did not use the\r
-                    # explicit argument\r
-                    else:\r
-                        msg = _('ignored explicit argument %r')\r
-                        raise ArgumentError(action, msg % explicit_arg)\r
-\r
-                # if there is no explicit argument, try to match the\r
-                # optional's string arguments with the following strings\r
-                # if successful, exit the loop\r
-                else:\r
-                    start = start_index + 1\r
-                    selected_patterns = arg_strings_pattern[start:]\r
-                    arg_count = match_argument(action, selected_patterns)\r
-                    stop = start + arg_count\r
-                    args = arg_strings[start:stop]\r
-                    action_tuples.append((action, args, option_string))\r
-                    break\r
-\r
-            # add the Optional to the list and return the index at which\r
-            # the Optional's string args stopped\r
-            assert action_tuples\r
-            for action, args, option_string in action_tuples:\r
-                take_action(action, args, option_string)\r
-            return stop\r
-\r
-        # the list of Positionals left to be parsed; this is modified\r
-        # by consume_positionals()\r
-        positionals = self._get_positional_actions()\r
-\r
-        # function to convert arg_strings into positional actions\r
-        def consume_positionals(start_index):\r
-            # match as many Positionals as possible\r
-            match_partial = self._match_arguments_partial\r
-            selected_pattern = arg_strings_pattern[start_index:]\r
-            arg_counts = match_partial(positionals, selected_pattern)\r
-\r
-            # slice off the appropriate arg strings for each Positional\r
-            # and add the Positional and its args to the list\r
-            for action, arg_count in zip(positionals, arg_counts):\r
-                args = arg_strings[start_index: start_index + arg_count]\r
-                start_index += arg_count\r
-                take_action(action, args)\r
-\r
-            # slice off the Positionals that we just parsed and return the\r
-            # index at which the Positionals' string args stopped\r
-            positionals[:] = positionals[len(arg_counts):]\r
-            return start_index\r
-\r
-        # consume Positionals and Optionals alternately, until we have\r
-        # passed the last option string\r
-        extras = []\r
-        start_index = 0\r
-        if option_string_indices:\r
-            max_option_string_index = max(option_string_indices)\r
-        else:\r
-            max_option_string_index = -1\r
-        while start_index <= max_option_string_index:\r
-\r
-            # consume any Positionals preceding the next option\r
-            next_option_string_index = min([\r
-                index\r
-                for index in option_string_indices\r
-                if index >= start_index])\r
-            if start_index != next_option_string_index:\r
-                positionals_end_index = consume_positionals(start_index)\r
-\r
-                # only try to parse the next optional if we didn't consume\r
-                # the option string during the positionals parsing\r
-                if positionals_end_index > start_index:\r
-                    start_index = positionals_end_index\r
-                    continue\r
-                else:\r
-                    start_index = positionals_end_index\r
-\r
-            # if we consumed all the positionals we could and we're not\r
-            # at the index of an option string, there were extra arguments\r
-            if start_index not in option_string_indices:\r
-                strings = arg_strings[start_index:next_option_string_index]\r
-                extras.extend(strings)\r
-                start_index = next_option_string_index\r
-\r
-            # consume the next optional and any arguments for it\r
-            start_index = consume_optional(start_index)\r
-\r
-        # consume any positionals following the last Optional\r
-        stop_index = consume_positionals(start_index)\r
-\r
-        # if we didn't consume all the argument strings, there were extras\r
-        extras.extend(arg_strings[stop_index:])\r
-\r
-        # if we didn't use all the Positional objects, there were too few\r
-        # arg strings supplied.\r
-        if positionals:\r
-            self.error(_('too few arguments'))\r
-\r
-        # make sure all required actions were present\r
-        for action in self._actions:\r
-            if action.required:\r
-                if action not in seen_actions:\r
-                    name = _get_action_name(action)\r
-                    self.error(_('argument %s is required') % name)\r
-\r
-        # make sure all required groups had one option present\r
-        for group in self._mutually_exclusive_groups:\r
-            if group.required:\r
-                for action in group._group_actions:\r
-                    if action in seen_non_default_actions:\r
-                        break\r
-\r
-                # if no actions were used, report the error\r
-                else:\r
-                    names = [_get_action_name(action)\r
-                             for action in group._group_actions\r
-                             if action.help is not SUPPRESS]\r
-                    msg = _('one of the arguments %s is required')\r
-                    self.error(msg % ' '.join(names))\r
-\r
-        # return the updated namespace and the extra arguments\r
-        return namespace, extras\r
-\r
-    def _read_args_from_files(self, arg_strings):\r
-        # expand arguments referencing files\r
-        new_arg_strings = []\r
-        for arg_string in arg_strings:\r
-\r
-            # for regular arguments, just add them back into the list\r
-            if arg_string[0] not in self.fromfile_prefix_chars:\r
-                new_arg_strings.append(arg_string)\r
-\r
-            # replace arguments referencing files with the file content\r
-            else:\r
-                try:\r
-                    args_file = open(arg_string[1:])\r
-                    try:\r
-                        arg_strings = args_file.read().splitlines()\r
-                        arg_strings = self._read_args_from_files(arg_strings)\r
-                        new_arg_strings.extend(arg_strings)\r
-                    finally:\r
-                        args_file.close()\r
-                except IOError:\r
-                    err = _sys.exc_info()[1]\r
-                    self.error(str(err))\r
-\r
-        # return the modified argument list\r
-        return new_arg_strings\r
-\r
-    def _match_argument(self, action, arg_strings_pattern):\r
-        # match the pattern for this action to the arg strings\r
-        nargs_pattern = self._get_nargs_pattern(action)\r
-        match = _re.match(nargs_pattern, arg_strings_pattern)\r
-\r
-        # raise an exception if we weren't able to find a match\r
-        if match is None:\r
-            nargs_errors = {\r
-                None: _('expected one argument'),\r
-                OPTIONAL: _('expected at most one argument'),\r
-                ONE_OR_MORE: _('expected at least one argument'),\r
-            }\r
-            default = _('expected %s argument(s)') % action.nargs\r
-            msg = nargs_errors.get(action.nargs, default)\r
-            raise ArgumentError(action, msg)\r
-\r
-        # return the number of arguments matched\r
-        return len(match.group(1))\r
-\r
-    def _match_arguments_partial(self, actions, arg_strings_pattern):\r
-        # progressively shorten the actions list by slicing off the\r
-        # final actions until we find a match\r
-        result = []\r
-        for i in range(len(actions), 0, -1):\r
-            actions_slice = actions[:i]\r
-            pattern = ''.join([self._get_nargs_pattern(action)\r
-                               for action in actions_slice])\r
-            match = _re.match(pattern, arg_strings_pattern)\r
-            if match is not None:\r
-                result.extend([len(string) for string in match.groups()])\r
-                break\r
-\r
-        # return the list of arg string counts\r
-        return result\r
-\r
-    def _parse_optional(self, arg_string):\r
-        # if it's an empty string, it was meant to be a positional\r
-        if not arg_string:\r
-            return None\r
-\r
-        # if it doesn't start with a prefix, it was meant to be positional\r
-        if not arg_string[0] in self.prefix_chars:\r
-            return None\r
-\r
-        # if it's just dashes, it was meant to be positional\r
-        if not arg_string.strip('-'):\r
-            return None\r
-\r
-        # if the option string is present in the parser, return the action\r
-        if arg_string in self._option_string_actions:\r
-            action = self._option_string_actions[arg_string]\r
-            return action, arg_string, None\r
-\r
-        # search through all possible prefixes of the option string\r
-        # and all actions in the parser for possible interpretations\r
-        option_tuples = self._get_option_tuples(arg_string)\r
-\r
-        # if multiple actions match, the option string was ambiguous\r
-        if len(option_tuples) > 1:\r
-            options = ', '.join([option_string\r
-                for action, option_string, explicit_arg in option_tuples])\r
-            tup = arg_string, options\r
-            self.error(_('ambiguous option: %s could match %s') % tup)\r
-\r
-        # if exactly one action matched, this segmentation is good,\r
-        # so return the parsed action\r
-        elif len(option_tuples) == 1:\r
-            option_tuple, = option_tuples\r
-            return option_tuple\r
-\r
-        # if it was not found as an option, but it looks like a negative\r
-        # number, it was meant to be positional\r
-        # unless there are negative-number-like options\r
-        if self._negative_number_matcher.match(arg_string):\r
-            if not self._has_negative_number_optionals:\r
-                return None\r
-\r
-        # if it contains a space, it was meant to be a positional\r
-        if ' ' in arg_string:\r
-            return None\r
-\r
-        # it was meant to be an optional but there is no such option\r
-        # in this parser (though it might be a valid option in a subparser)\r
-        return None, arg_string, None\r
-\r
-    def _get_option_tuples(self, option_string):\r
-        result = []\r
-\r
-        # option strings starting with two prefix characters are only\r
-        # split at the '='\r
-        chars = self.prefix_chars\r
-        if option_string[0] in chars and option_string[1] in chars:\r
-            if '=' in option_string:\r
-                option_prefix, explicit_arg = option_string.split('=', 1)\r
-            else:\r
-                option_prefix = option_string\r
-                explicit_arg = None\r
-            for option_string in self._option_string_actions:\r
-                if option_string.startswith(option_prefix):\r
-                    action = self._option_string_actions[option_string]\r
-                    tup = action, option_string, explicit_arg\r
-                    result.append(tup)\r
-\r
-        # single character options can be concatenated with their arguments\r
-        # but multiple character options always have to have their argument\r
-        # separate\r
-        elif option_string[0] in chars and option_string[1] not in chars:\r
-            option_prefix = option_string\r
-            explicit_arg = None\r
-            short_option_prefix = option_string[:2]\r
-            short_explicit_arg = option_string[2:]\r
-\r
-            for option_string in self._option_string_actions:\r
-                if option_string == short_option_prefix:\r
-                    action = self._option_string_actions[option_string]\r
-                    tup = action, option_string, short_explicit_arg\r
-                    result.append(tup)\r
-                elif option_string.startswith(option_prefix):\r
-                    action = self._option_string_actions[option_string]\r
-                    tup = action, option_string, explicit_arg\r
-                    result.append(tup)\r
-\r
-        # shouldn't ever get here\r
-        else:\r
-            self.error(_('unexpected option string: %s') % option_string)\r
-\r
-        # return the collected option tuples\r
-        return result\r
-\r
-    def _get_nargs_pattern(self, action):\r
-        # in all examples below, we have to allow for '--' args\r
-        # which are represented as '-' in the pattern\r
-        nargs = action.nargs\r
-\r
-        # the default (None) is assumed to be a single argument\r
-        if nargs is None:\r
-            nargs_pattern = '(-*A-*)'\r
-\r
-        # allow zero or one arguments\r
-        elif nargs == OPTIONAL:\r
-            nargs_pattern = '(-*A?-*)'\r
-\r
-        # allow zero or more arguments\r
-        elif nargs == ZERO_OR_MORE:\r
-            nargs_pattern = '(-*[A-]*)'\r
-\r
-        # allow one or more arguments\r
-        elif nargs == ONE_OR_MORE:\r
-            nargs_pattern = '(-*A[A-]*)'\r
-\r
-        # allow one argument followed by any number of options or arguments\r
-        elif nargs is PARSER:\r
-            nargs_pattern = '(-*A[-AO]*)'\r
-\r
-        # all others should be integers\r
-        else:\r
-            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)\r
-\r
-        # if this is an optional action, -- is not allowed\r
-        if action.option_strings:\r
-            nargs_pattern = nargs_pattern.replace('-*', '')\r
-            nargs_pattern = nargs_pattern.replace('-', '')\r
-\r
-        # return the pattern\r
-        return nargs_pattern\r
-\r
-    # ========================\r
-    # Value conversion methods\r
-    # ========================\r
-    def _get_values(self, action, arg_strings):\r
-        # for everything but PARSER args, strip out '--'\r
-        if action.nargs is not PARSER:\r
-            arg_strings = [s for s in arg_strings if s != '--']\r
-\r
-        # optional argument produces a default when not present\r
-        if not arg_strings and action.nargs == OPTIONAL:\r
-            if action.option_strings:\r
-                value = action.const\r
-            else:\r
-                value = action.default\r
-            if isinstance(value, _basestring):\r
-                value = self._get_value(action, value)\r
-                self._check_value(action, value)\r
-\r
-        # when nargs='*' on a positional, if there were no command-line\r
-        # args, use the default if it is anything other than None\r
-        elif (not arg_strings and action.nargs == ZERO_OR_MORE and\r
-              not action.option_strings):\r
-            if action.default is not None:\r
-                value = action.default\r
-            else:\r
-                value = arg_strings\r
-            self._check_value(action, value)\r
-\r
-        # single argument or optional argument produces a single value\r
-        elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:\r
-            arg_string, = arg_strings\r
-            value = self._get_value(action, arg_string)\r
-            self._check_value(action, value)\r
-\r
-        # PARSER arguments convert all values, but check only the first\r
-        elif action.nargs is PARSER:\r
-            value = [self._get_value(action, v) for v in arg_strings]\r
-            self._check_value(action, value[0])\r
-\r
-        # all other types of nargs produce a list\r
-        else:\r
-            value = [self._get_value(action, v) for v in arg_strings]\r
-            for v in value:\r
-                self._check_value(action, v)\r
-\r
-        # return the converted value\r
-        return value\r
-\r
-    def _get_value(self, action, arg_string):\r
-        type_func = self._registry_get('type', action.type, action.type)\r
-        if not hasattr(type_func, '__call__'):\r
-            if not hasattr(type_func, '__bases__'): # classic classes\r
-                msg = _('%r is not callable')\r
-                raise ArgumentError(action, msg % type_func)\r
-\r
-        # convert the value to the appropriate type\r
-        try:\r
-            result = type_func(arg_string)\r
-\r
-        # TypeErrors or ValueErrors indicate errors\r
-        except (TypeError, ValueError):\r
-            name = getattr(action.type, '__name__', repr(action.type))\r
-            msg = _('invalid %s value: %r')\r
-            raise ArgumentError(action, msg % (name, arg_string))\r
-\r
-        # return the converted value\r
-        return result\r
-\r
-    def _check_value(self, action, value):\r
-        # converted value must be one of the choices (if specified)\r
-        if action.choices is not None and value not in action.choices:\r
-            tup = value, ', '.join(map(repr, action.choices))\r
-            msg = _('invalid choice: %r (choose from %s)') % tup\r
-            raise ArgumentError(action, msg)\r
-\r
-    # =======================\r
-    # Help-formatting methods\r
-    # =======================\r
-    def format_usage(self):\r
-        formatter = self._get_formatter()\r
-        formatter.add_usage(self.usage, self._actions,\r
-                            self._mutually_exclusive_groups)\r
-        return formatter.format_help()\r
-\r
-    def format_help(self):\r
-        formatter = self._get_formatter()\r
-\r
-        # usage\r
-        formatter.add_usage(self.usage, self._actions,\r
-                            self._mutually_exclusive_groups)\r
-\r
-        # description\r
-        formatter.add_text(self.description)\r
-\r
-        # positionals, optionals and user-defined groups\r
-        for action_group in self._action_groups:\r
-            formatter.start_section(action_group.title)\r
-            formatter.add_text(action_group.description)\r
-            formatter.add_arguments(action_group._group_actions)\r
-            formatter.end_section()\r
-\r
-        # epilog\r
-        formatter.add_text(self.epilog)\r
-\r
-        # determine help from format above\r
-        return formatter.format_help()\r
-\r
-    def format_version(self):\r
-        formatter = self._get_formatter()\r
-        formatter.add_text(self.version)\r
-        return formatter.format_help()\r
-\r
-    def _get_formatter(self):\r
-        return self.formatter_class(prog=self.prog)\r
-\r
-    # =====================\r
-    # Help-printing methods\r
-    # =====================\r
-    def print_usage(self, file=None):\r
-        self._print_message(self.format_usage(), file)\r
-\r
-    def print_help(self, file=None):\r
-        self._print_message(self.format_help(), file)\r
-\r
-    def print_version(self, file=None):\r
-        self._print_message(self.format_version(), file)\r
-\r
-    def _print_message(self, message, file=None):\r
-        if message:\r
-            if file is None:\r
-                file = _sys.stderr\r
-            file.write(message)\r
-\r
-    # ===============\r
-    # Exiting methods\r
-    # ===============\r
-    def exit(self, status=0, message=None):\r
-        if message:\r
-            _sys.stderr.write(message)\r
-        _sys.exit(status)\r
-\r
-    def error(self, message):\r
-        """error(message: string)\r
-\r
-        Prints a usage message incorporating the message to stderr and\r
-        exits.\r
-\r
-        If you override this in a subclass, it should not return -- it\r
-        should either exit or raise an exception.\r
-        """\r
-        self.print_usage(_sys.stderr)\r
-        self.exit(2, _('%s: error: %s\n') % (self.prog, message))\r
index ba08563..3ce37f2 100644 (file)
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 
+class Usage(Exception):
+    def __init__(self, msg=None):
+        Exception.__init__(self, msg)
+
 class CreatorError(Exception):
     """An exception base class for all imgcreate errors."""
     def __init__(self, msg):
old mode 100755 (executable)
new mode 100644 (file)
index b60590e..40b38d2
@@ -43,8 +43,8 @@ except ImportError:
     import cElementTree
 xmlparse = cElementTree.parse
 
-import errors as errors
-import fs_related as fs_related
+from errors import *
+from fs_related import *
 
 chroot_lockfd = -1
 chroot_lock = ""
@@ -1292,7 +1292,7 @@ def chroot(chrootdir, bindmounts = None, execute = "/bin/bash"):
         
         for i in range(len(fileOutput)):
             if fileOutput[i].find("ARM") > 0:
-                qemu_emulator = setup_qemu_emulator.(chrootdir, "arm")
+                qemu_emulator = setup_qemu_emulator(chrootdir, "arm")
                 architecture_found = True
                 break
             if fileOutput[i].find("Intel") > 0:
diff --git a/micng/utils/partitionedfs.py b/micng/utils/partitionedfs.py
new file mode 100644 (file)
index 0000000..c8abc0f
--- /dev/null
@@ -0,0 +1,610 @@
+#
+# partitionedfs.py: partitioned files system class, extends fs.py
+#
+# Copyright 2007-2008, Red Hat  Inc.
+# Copyright 2008, Daniel P. Berrange
+# Copyright 2008,  David P. Huff
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import glob
+import shutil
+import subprocess
+import logging
+import time
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+
+class PartitionedMount(Mount):
+    def __init__(self, disks, mountdir, skipformat = False):
+        Mount.__init__(self, mountdir)
+        self.disks = {}
+        for name in disks.keys():
+            self.disks[name] = { 'disk': disks[name],  # Disk object
+                                 'mapped': False, # True if kpartx mapping exists
+                                 'numpart': 0, # Number of allocate partitions
+                                 'partitions': [], # indexes to self.partitions
+                                 # Partitions with part num higher than 3 will 
+                                 # be put inside extended partition.
+                                 'extended': 0, # Size of extended partition
+                                 # Sector 0 is used by the MBR and can't be used
+                                 # as the start, so setting offset to 1.
+                                 'offset': 1 } # Offset of next partition (in sectors)
+
+        self.partitions = []
+        self.subvolumes = []
+        self.mapped = False
+        self.mountOrder = []
+        self.unmountOrder = []
+        self.parted=find_binary_path("parted")
+        self.kpartx=find_binary_path("kpartx")
+        self.mkswap=find_binary_path("mkswap")
+        self.btrfscmd=None
+        self.mountcmd=find_binary_path("mount")
+        self.umountcmd=find_binary_path("umount")
+        self.skipformat = skipformat
+        self.snapshot_created = self.skipformat
+        # Size of a sector used in calculations
+        self.sector_size = 512
+
+    def add_partition(self, size, disk, mountpoint, fstype = None, fsopts = None, boot = False):
+        # Converting M to s for parted
+        size = size * 1024 * 1024 / self.sector_size
+
+        """ We need to handle subvolumes for btrfs """
+        if fstype == "btrfs" and fsopts and fsopts.find("subvol=") != -1:
+            self.btrfscmd=find_binary_path("btrfs")
+            subvol = None
+            opts = fsopts.split(",")
+            for opt in opts:
+                if opt.find("subvol=") != -1:
+                    subvol = opt.replace("subvol=", "").strip()
+                    break
+            if not subvol:
+                raise MountError("No subvolume: %s" % fsopts)
+            self.subvolumes.append({'size': size, # In sectors
+                                    'mountpoint': mountpoint, # Mount relative to chroot
+                                    'fstype': fstype, # Filesystem type
+                                    'fsopts': fsopts, # Filesystem mount options
+                                    'disk': disk, # physical disk name holding partition
+                                    'device': None, # kpartx device node for partition
+                                    'mount': None, # Mount object
+                                    'subvol': subvol, # Subvolume name
+                                    'boot': boot, # Bootable flag
+                                    'mounted': False # Mount flag
+                                   })
+
+        """ We still need partition for "/" or non-subvolume """
+        if mountpoint == "/" or not fsopts or fsopts.find("subvol=") == -1:
+            """ Don't need subvolume for "/" because it will be set as default subvolume """
+            if fsopts and fsopts.find("subvol=") != -1:
+                opts = fsopts.split(",")
+                for opt in opts:
+                    if opt.strip().startswith("subvol="):
+                        opts.remove(opt)
+                        break
+                fsopts = ",".join(opts)
+            self.partitions.append({'size': size, # In sectors
+                                    'mountpoint': mountpoint, # Mount relative to chroot
+                                    'fstype': fstype, # Filesystem type
+                                    'fsopts': fsopts, # Filesystem mount options
+                                    'disk': disk, # physical disk name holding partition
+                                    'device': None, # kpartx device node for partition
+                                    'mount': None, # Mount object
+                                    'num': None, # Partition number
+                                    'boot': boot}) # Bootable flag
+
+    def __create_part_to_image(self,device, parttype, fstype, start, size):
+        # Start is included to the size so we need to substract one from the end.
+        end = start+size-1
+        logging.debug("Added '%s' part at %d of size %d" % (parttype,start,end))
+        part_cmd = [self.parted, "-s", device, "unit", "s", "mkpart", parttype]
+        if fstype:
+            part_cmd.extend([fstype])
+        part_cmd.extend(["%d" % start, "%d" % end])
+        logging.debug(part_cmd)
+        p1 = subprocess.Popen(part_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        (out,err) = p1.communicate()
+        logging.debug(out)
+        return p1
+
+    def __format_disks(self):
+        logging.debug("Assigning partitions to disks")
+        
+        mbr_sector_skipped = False
+        
+        for n in range(len(self.partitions)):
+            p = self.partitions[n]
+
+            if not self.disks.has_key(p['disk']):
+                raise MountError("No disk %s for partition %s" % (p['disk'], p['mountpoint']))
+            
+            if not mbr_sector_skipped:
+                # This hack is used to remove one sector from the first partition,
+                # that is the used to the MBR.
+                p['size'] -= 1
+                mbr_sector_skipped = True
+
+            d = self.disks[p['disk']]
+            d['numpart'] += 1
+            if d['numpart'] > 3:
+                # Increase allocation of extended partition to hold this partition
+                d['extended'] += p['size']
+                p['type'] = 'logical'
+                p['num'] = d['numpart'] + 1
+            else:
+                p['type'] = 'primary'
+                p['num'] = d['numpart']
+
+            p['start'] = d['offset']
+            d['offset'] += p['size']
+            d['partitions'].append(n)
+            logging.debug("Assigned %s to %s%d at %d at size %d" % (p['mountpoint'], p['disk'], p['num'], p['start'], p['size']))
+
+        if self.skipformat:
+            logging.debug("Skipping disk format, because skipformat flag is set.")
+            return
+            
+        for dev in self.disks.keys():
+            d = self.disks[dev]
+            logging.debug("Initializing partition table for %s" % (d['disk'].device))
+            p1 = subprocess.Popen([self.parted, "-s", d['disk'].device, "mklabel", "msdos"],
+                                 stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+            (out,err) = p1.communicate()            
+            logging.debug(out)
+            
+            if p1.returncode != 0:
+                # NOTE: We don't throw exception when return code is not 0, because
+                # parted always fails to reload part table with loop devices.
+                # This prevents us from distinguishing real errors based on return code.
+                logging.debug("WARNING: parted returned '%s' instead of 0 when creating partition-table for disk '%s'." % (p1.returncode,d['disk'].device))
+
+        logging.debug("Creating partitions")
+
+        for p in self.partitions:
+            d = self.disks[p['disk']]
+            if p['num'] == 5:
+                self.__create_part_to_image(d['disk'].device,"extended",None,p['start'],d['extended'])
+                
+            if p['fstype'] == "swap":
+                parted_fs_type = "linux-swap"
+            elif p['fstype'] == "vfat":
+                parted_fs_type = "fat32"
+            elif p['fstype'] == "msdos":
+                parted_fs_type = "fat16"
+            else:
+                # Type for ext2/ext3/ext4/btrfs
+                parted_fs_type = "ext2"
+
+            # Boot ROM of OMAP boards require vfat boot partition to have an 
+            # even number of sectors.
+            if p['mountpoint'] == "/boot" and p['fstype'] in ["vfat","msdos"] and p['size'] % 2:
+                logging.debug("Substracting one sector from '%s' partition to get even number of sectors for the partition." % (p['mountpoint']))
+                p['size'] -= 1
+                
+            p1 = self.__create_part_to_image(d['disk'].device,p['type'], 
+                                             parted_fs_type, p['start'], 
+                                             p['size'])
+
+            if p1.returncode != 0:
+                # NOTE: We don't throw exception when return code is not 0, because
+                # parted always fails to reload part table with loop devices.
+                # This prevents us from distinguishing real errors based on return code.
+                logging.debug("WARNING: parted returned '%s' instead of 0 when creating partition '%s' for disk '%s'." % (p1.returncode,p['mountpoint'],d['disk'].device))
+
+            if p['boot']:
+                logging.debug("Setting boot flag for partition '%s' on disk '%s'." % (p['num'],d['disk'].device))
+                boot_cmd = [self.parted, "-s", d['disk'].device, "set", "%d" % p['num'], "boot", "on"]
+                logging.debug(boot_cmd)
+                p1 = subprocess.Popen(boot_cmd,
+                                      stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+                (out,err) = p1.communicate()            
+                logging.debug(out)
+
+                if p1.returncode != 0:
+                    # NOTE: We don't throw exception when return code is not 0, because
+                    # parted always fails to reload part table with loop devices.
+                    # This prevents us from distinguishing real errors based on return code.
+                    logging.debug("WARNING: parted returned '%s' instead of 0 when adding boot flag for partition '%s' disk '%s'." % (p1.returncode,p['num'],d['disk'].device))
+
+    def __map_partitions(self):
+        """Load it if dm_snapshot isn't loaded"""
+        load_module("dm_snapshot")
+
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        for dev in self.disks.keys():
+            d = self.disks[dev]
+            if d['mapped']:
+                continue
+
+            logging.debug("Running kpartx on %s" % d['disk'].device )
+            kpartx = subprocess.Popen([self.kpartx, "-l", "-v", d['disk'].device],
+                                      stdout=subprocess.PIPE, stderr=dev_null)
+
+            kpartxOutput = kpartx.communicate()[0].strip().split("\n")
+
+            if kpartx.returncode:
+                os.close(dev_null)
+                raise MountError("Failed to query partition mapping for '%s'" %
+                                 d['disk'].device)
+
+            # Strip trailing blank and mask verbose output
+            i = 0
+            while i < len(kpartxOutput) and kpartxOutput[i][0:4] != "loop":
+               i = i + 1
+            kpartxOutput = kpartxOutput[i:]
+
+            # Quick sanity check that the number of partitions matches
+            # our expectation. If it doesn't, someone broke the code
+            # further up
+            if len(kpartxOutput) != d['numpart']:
+                os.close(dev_null)
+                raise MountError("Unexpected number of partitions from kpartx: %d != %d" %
+                                 (len(kpartxOutput), d['numpart']))
+
+            for i in range(len(kpartxOutput)):
+                line = kpartxOutput[i]
+                newdev = line.split()[0]
+                mapperdev = "/dev/mapper/" + newdev
+                loopdev = d['disk'].device + newdev[-1]
+
+                logging.debug("Dev %s: %s -> %s" % (newdev, loopdev, mapperdev))
+                pnum = d['partitions'][i]
+                self.partitions[pnum]['device'] = loopdev
+
+                # grub's install wants partitions to be named
+                # to match their parent device + partition num
+                # kpartx doesn't work like this, so we add compat
+                # symlinks to point to /dev/mapper
+                if os.path.lexists(loopdev):
+                    os.unlink(loopdev)
+                os.symlink(mapperdev, loopdev)
+
+            logging.debug("Adding partx mapping for %s" % d['disk'].device)
+            p1 = subprocess.Popen([self.kpartx, "-v", "-a", d['disk'].device],
+                                  stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+            
+            (out,err) = p1.communicate()
+            logging.debug(out)
+            
+            if p1.returncode != 0:
+                # Make sure that the device maps are also removed on error case.
+                # The d['mapped'] isn't set to True if the kpartx fails so
+                # failed mapping will not be cleaned on cleanup either.
+                subprocess.call([self.kpartx, "-d", d['disk'].device],
+                                stdout=dev_null, stderr=dev_null)
+                os.close(dev_null)
+                raise MountError("Failed to map partitions for '%s'" %
+                                 d['disk'].device)
+            d['mapped'] = True
+        os.close(dev_null)
+
+
+    def __unmap_partitions(self):
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        for dev in self.disks.keys():
+            d = self.disks[dev]
+            if not d['mapped']:
+                continue
+
+            logging.debug("Removing compat symlinks")
+            for pnum in d['partitions']:
+                if self.partitions[pnum]['device'] != None:
+                    os.unlink(self.partitions[pnum]['device'])
+                    self.partitions[pnum]['device'] = None
+
+            logging.debug("Unmapping %s" % d['disk'].device)
+            rc = subprocess.call([self.kpartx, "-d", d['disk'].device],
+                                 stdout=dev_null, stderr=dev_null)
+            if rc != 0:
+                os.close(dev_null)
+                raise MountError("Failed to unmap partitions for '%s'" %
+                                 d['disk'].device)
+
+            d['mapped'] = False
+            os.close(dev_null)
+
+
+    def __calculate_mountorder(self):
+        logging.debug("Calculating mount order")
+        for p in self.partitions:
+            self.mountOrder.append(p['mountpoint'])
+            self.unmountOrder.append(p['mountpoint'])
+
+        self.mountOrder.sort()
+        self.unmountOrder.sort()
+        self.unmountOrder.reverse()
+
+    def cleanup(self):
+        Mount.cleanup(self)
+        self.__unmap_partitions()
+        for dev in self.disks.keys():
+            d = self.disks[dev]
+            try:
+                d['disk'].cleanup()
+            except:
+                pass
+
+    def unmount(self):
+        self.__unmount_subvolumes()
+        for mp in self.unmountOrder:
+            if mp == 'swap':
+                continue
+            p = None
+            for p1 in self.partitions:
+                if p1['mountpoint'] == mp:
+                    p = p1
+                    break
+
+            if p['mount'] != None:
+                try:
+                    """ Create subvolume snapshot here """
+                    if p['fstype'] == "btrfs" and p['mountpoint'] == "/" and not self.snapshot_created:
+                        self.__create_subvolume_snapshots(p, p["mount"])
+                    p['mount'].cleanup()
+                except:
+                    pass
+                p['mount'] = None
+
+    """ Only for btrfs """
+    def __get_subvolume_id(self, rootpath, subvol):
+        if not self.btrfscmd:
+            self.btrfscmd=find_binary_path("btrfs")
+        argv = [ self.btrfscmd, "subvolume", "list", rootpath ]
+        p1 = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        (out,err) = p1.communicate()
+        logging.debug(out)
+        if p1.returncode != 0:
+            raise MountError("Failed to get subvolume id from %s', return code: %d." % (rootpath, p1.returncode))
+        subvolid = -1
+        for line in out.split("\n"):
+            if line.endswith(" path %s" % subvol):
+                subvolid = line.split(" ")[1]
+                if not subvolid.isdigit():
+                    raise MountError("Invalid subvolume id: %s" % subvolid)
+                subvolid = int(subvolid)
+                break
+        return subvolid
+
+    def __create_subvolume_metadata(self, p, pdisk):
+        if len(self.subvolumes) == 0:
+            return
+        argv = [ self.btrfscmd, "subvolume", "list", pdisk.mountdir ]
+        p1 = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        (out,err) = p1.communicate()
+        logging.debug(out)
+        if p1.returncode != 0:
+            raise MountError("Failed to get subvolume id from %s', return code: %d." % (pdisk.mountdir, p1.returncode))
+        subvolid_items = out.split("\n")
+        subvolume_metadata = ""
+        for subvol in self.subvolumes:
+            for line in subvolid_items:
+                if line.endswith(" path %s" % subvol["subvol"]):
+                    subvolid = line.split(" ")[1]
+                    if not subvolid.isdigit():
+                        raise MountError("Invalid subvolume id: %s" % subvolid)
+                    subvolid = int(subvolid)
+                    opts = subvol["fsopts"].split(",")
+                    for opt in opts:
+                        if opt.strip().startswith("subvol="):
+                            opts.remove(opt)
+                            break
+                    fsopts = ",".join(opts)
+                    subvolume_metadata += "%d\t%s\t%s\t%s\n" % (subvolid, subvol["subvol"], subvol['mountpoint'], fsopts)
+        if subvolume_metadata:
+            fd = open("%s/.subvolume_metadata" % pdisk.mountdir, "w")
+            fd.write(subvolume_metadata)
+            fd.close()
+
+    def __get_subvolume_metadata(self, p, pdisk):
+        subvolume_metadata_file = "%s/.subvolume_metadata" % pdisk.mountdir
+        if not os.path.exists(subvolume_metadata_file):
+            return
+        fd = open(subvolume_metadata_file, "r")
+        content = fd.read()
+        fd.close()
+        for line in content.split("\n"):
+            items = line.split("\t")
+            if items and len(items) == 4:
+                self.subvolumes.append({'size': 0, # In sectors
+                                        'mountpoint': items[2], # Mount relative to chroot
+                                        'fstype': "btrfs", # Filesystem type
+                                        'fsopts': items[3] + ",subvol=%s" %  items[1], # Filesystem mount options
+                                        'disk': p['disk'], # physical disk name holding partition
+                                        'device': None, # kpartx device node for partition
+                                        'mount': None, # Mount object
+                                        'subvol': items[1], # Subvolume name
+                                        'boot': False, # Bootable flag
+                                        'mounted': False # Mount flag
+                                   })
+
+    def __create_subvolumes(self, p, pdisk):
+        """ Create all the subvolumes """
+        for subvol in self.subvolumes:
+            argv = [ self.btrfscmd, "subvolume", "create", pdisk.mountdir + "/" + subvol["subvol"]]
+            p1 = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+            (out,err) = p1.communicate()
+            logging.debug(out)
+            if p1.returncode != 0:
+                raise MountError("Failed to create subvolume '%s', return code: %d." % (subvol["subvol"], p1.returncode))
+
+        """ Set default subvolume, subvolume for "/" is default """
+        subvol = None
+        for subvolume in self.subvolumes:
+            if subvolume["mountpoint"] == "/" and p["disk"] == subvolume["disk"]:
+                subvol = subvolume
+                break
+        if subvol:
+            """ Get default subvolume id """
+            subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
+            """ Set default subvolume """
+            if subvolid != -1:
+                argv = [ self.btrfscmd, "subvolume", "set-default", "%d" % subvolid, pdisk.mountdir]
+                p1 = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+                (out,err) = p1.communicate()
+                logging.debug(out)
+                if p1.returncode != 0:
+                    raise MountError("Failed to set default subvolume id: %d', return code: %d." % (subvolid, p1.returncode))
+
+        self.__create_subvolume_metadata(p, pdisk)
+
+    def __mount_subvolumes(self, p, pdisk):
+        if self.skipformat:
+            """ Get subvolume info """
+            self.__get_subvolume_metadata(p, pdisk)
+            """ Set default mount options """
+            if len(self.subvolumes) != 0:
+                for subvol in self.subvolumes:
+                    if subvol["mountpoint"] == p["mountpoint"] == "/":
+                        opts = subvol["fsopts"].split(",")
+                        for opt in opts:
+                            if opt.strip().startswith("subvol="):
+                                opts.remove(opt)
+                                break
+                        pdisk.fsopts = ",".join(opts)
+                        break
+            
+        if len(self.subvolumes) == 0:
+            """ Return directly if no subvolumes """
+            return
+
+        """ Remount to make default subvolume mounted """
+        rc = subprocess.call([self.umountcmd, pdisk.mountdir])
+        if rc != 0:
+            raise MountError("Failed to umount %s" % pdisk.mountdir)
+        rc = subprocess.call([self.mountcmd, "-o", pdisk.fsopts, pdisk.disk.device, pdisk.mountdir])
+        if rc != 0:
+            raise MountError("Failed to umount %s" % pdisk.mountdir)
+        for subvol in self.subvolumes:
+            if subvol["mountpoint"] == "/":
+                continue
+            subvolid = self. __get_subvolume_id(pdisk.mountdir, subvol["subvol"])
+            if subvolid == -1:
+                logging.debug("WARNING: invalid subvolume %s" % subvol["subvol"])
+                continue
+            """ Replace subvolume name with subvolume ID """
+            opts = subvol["fsopts"].split(",")
+            for opt in opts:
+                if opt.strip().startswith("subvol="):
+                    opts.remove(opt)
+                    break
+            #opts.append("subvolid=%d" % subvolid)
+            opts.extend(["subvolrootid=0", "subvol=%s" % subvol["subvol"]])
+            fsopts = ",".join(opts)
+            subvol['fsopts'] = fsopts
+            mountpoint = self.mountdir + subvol['mountpoint']
+            makedirs(mountpoint)
+            rc = subprocess.call([self.mountcmd, "-o", fsopts, pdisk.disk.device, mountpoint])
+            if rc != 0:
+                raise MountError("Failed to mount subvolume %s to %s" % (subvol["subvol"], mountpoint))
+            subvol["mounted"] = True
+
+    def __unmount_subvolumes(self):
+        """ It may be called multiple times, so we need to chekc if it is still mounted. """
+        for subvol in self.subvolumes:
+            if subvol["mountpoint"] == "/":
+                continue
+            if not subvol["mounted"]:
+                continue
+            mountpoint = self.mountdir + subvol['mountpoint']
+            rc = subprocess.call([self.umountcmd, mountpoint])
+            if rc != 0:
+                raise MountError("Failed to unmount subvolume %s from %s" % (subvol["subvol"], mountpoint))
+            subvol["mounted"] = False
+
+    def __create_subvolume_snapshots(self, p, pdisk):
+        if self.snapshot_created:
+            return
+
+        """ Remount with subvolid=0 """
+        rc = subprocess.call([self.umountcmd, pdisk.mountdir])
+        if rc != 0:
+            raise MountError("Failed to umount %s" % pdisk.mountdir)
+        if pdisk.fsopts:
+            mountopts = pdisk.fsopts + ",subvolid=0"
+        else:
+            mountopts = "subvolid=0"
+        rc = subprocess.call([self.mountcmd, "-o", mountopts, pdisk.disk.device, pdisk.mountdir])
+        if rc != 0:
+            raise MountError("Failed to umount %s" % pdisk.mountdir)
+
+        """ Create all the subvolume snapshots """
+        snapshotts = time.strftime("%Y%m%d-%H%M")
+        for subvol in self.subvolumes:
+            subvolpath = pdisk.mountdir + "/" + subvol["subvol"]
+            snapshotpath = subvolpath + "_%s-1" % snapshotts
+            argv = [ self.btrfscmd, "subvolume", "snapshot", subvolpath, snapshotpath ]
+            p1 = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+            (out,err) = p1.communicate()
+            logging.debug(out)
+            if p1.returncode != 0:
+                raise MountError("Failed to create subvolume snapshot '%s' for '%s', return code: %d." % (snapshotpath, subvolpath, p1.returncode))
+        self.snapshot_created = True
+  
+    def mount(self):
+        for dev in self.disks.keys():
+            d = self.disks[dev]
+            d['disk'].create()
+
+        self.__format_disks()
+        self.__map_partitions()
+        self.__calculate_mountorder()
+
+        for mp in self.mountOrder:
+            p = None
+            for p1 in self.partitions:
+                if p1['mountpoint'] == mp:
+                    p = p1
+                    break
+
+            if mp == 'swap':
+                subprocess.call([self.mkswap, p['device']])
+                continue
+
+            rmmountdir = False
+            if p['mountpoint'] == "/":
+                rmmountdir = True
+            if p['fstype'] == "vfat" or p['fstype'] == "msdos":
+                myDiskMount = VfatDiskMount
+            elif p['fstype'] in ("ext2", "ext3", "ext4"):
+                myDiskMount = ExtDiskMount
+            elif p['fstype'] == "btrfs":
+                myDiskMount = BtrfsDiskMount
+            else:
+                raise MountError("Fail to support file system " + p['fstype'])
+
+            if p['fstype'] == "btrfs" and not p['fsopts']:
+                p['fsopts'] = "subvolid=0"
+                
+            pdisk = myDiskMount(RawDisk(p['size'] * self.sector_size, p['device']),
+                                 self.mountdir + p['mountpoint'],
+                                 p['fstype'],
+                                 4096,
+                                 p['mountpoint'],
+                                 rmmountdir,
+                                 self.skipformat,
+                                 fsopts = p['fsopts'])
+            pdisk.mount(pdisk.fsopts)
+            if p['fstype'] == "btrfs" and p['mountpoint'] == "/":
+                if not self.skipformat:
+                    self.__create_subvolumes(p, pdisk)
+                self.__mount_subvolumes(p, pdisk)
+            p['mount'] = pdisk
+
+    def resparse(self, size = None):
+        # Can't re-sparse a disk image - too hard
+        pass
diff --git a/micng/utils/pkgmanagers/__init__.py b/micng/utils/pkgmanagers/__init__.py
deleted file mode 100644 (file)
index c18877e..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/python
-
-import os
-from micng.utils.errors import *
-
-class pkgManager:
-    def __init__(self):
-        self.managers = {}
-        self.default_pkg_manager = None
-
-    def register_pkg_manager(self, name, manager):
-#        print "Registering package manager: %s" % name
-        if not self.managers.has_key(name):
-            self.managers[name] = manager
-        
-    def unregister_pkg_manager(self, name):
-        if self.managers.has_key(name):
-            del self.managers[name]
-
-    def set_default_pkg_manager(self, name):
-        if self.managers.has_key(name):
-            self.default_pkg_manager = self.managers[name]
-            print "Use package manager %s" % name
-
-    def get_default_pkg_manager(self):
-        if self.default_pkg_manager:
-            return self.default_pkg_manager
-        else:
-            if self.managers.has_key("zypp"):
-                print "Use package manager zypp"
-                return self.managers["zypp"]
-            elif self.managers.has_key("yum"):
-                print "Use package manager yum"
-                return self.managers["yum"]
-            else:
-                keys = self.managers.keys()
-                if keys:
-                    print "Use package manager %s" % keys[0]
-                    return self.managers[keys[0]]
-                else:
-                    return None
-
-    def load_pkg_managers(self):
-        mydir = os.path.dirname(os.path.realpath(__file__))
-        for file in os.listdir(mydir):
-            if os.path.isfile(mydir + "/" + file) and file.endswith(".py") and file != "__init__.py":
-                pkgmgrmod = file[:file.rfind(".py")]
-                try:
-                    exec("import micng.utils.pkgmanagers.%s as %s " % (pkgmgrmod, pkgmgrmod))
-                    exec("pkgmgr = %s._pkgmgr" % pkgmgrmod)
-                    self.register_pkg_manager(pkgmgr[0], pkgmgr[1])
-                except:
-                    continue
-        if not self.managers.keys():
-            raise CreatorError("No packag manager available")
diff --git a/micng/utils/pkgmanagers/yumpkgmgr.py b/micng/utils/pkgmanagers/yumpkgmgr.py
deleted file mode 100644 (file)
index 0f7e2ee..0000000
+++ /dev/null
@@ -1,448 +0,0 @@
-#
-# yum.py : yum utilities
-#
-# Copyright 2007, Red Hat  Inc.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; version 2 of the License.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Library General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-
-import glob
-import os
-import sys
-import logging
-
-import yum
-import rpmUtils
-import pykickstart.parser
-
-import urlparse
-import urllib2 as u2
-import tempfile
-import shutil
-import subprocess
-
-from micng.utils.errors import *
-from micng.utils.fs_related import *
-from micng.imager.BaseImageCreator import ImageCreator
-
-class MyYumRepository(yum.yumRepo.YumRepository):
-    def __init__(self, repoid):
-        yum.yumRepo.YumRepository.__init__(self, repoid)
-        self.sslverify = False
-
-    def _setupGrab(self):
-        self.sslverify = False
-        yum.yumRepo.YumRepository._setupGrab(self)
-
-    def __del__(self):
-        pass
-
-class Yum(yum.YumBase):
-    def __init__(self, creator = None, recording_pkgs=None):
-        if not isinstance(creator, ImageCreator):
-            raise CreatorError("Invalid argument: creator")
-        yum.YumBase.__init__(self)
-        
-        self.creator = creator
-        
-        if self.creator.target_arch:
-            if rpmUtils.arch.arches.has_key(self.creator.target_arch):
-                self.arch.setup_arch(self.creator.target_arch)
-            else:
-                raise CreatorError("Invalid target arch: %s" % self.creator.target_arch)
-
-        self.__recording_pkgs = recording_pkgs
-        self.__pkgs_content = {}
-
-    def doFileLogSetup(self, uid, logfile):
-        # don't do the file log for the livecd as it can lead to open fds
-        # being left and an inability to clean up after ourself
-        pass
-
-    def close(self):
-        try:
-            os.unlink(self.conf.installroot + "/yum.conf")
-        except:
-            pass
-        self.closeRpmDB()
-        yum.YumBase.close(self)
-        self._delRepos()
-        self._delSacks()
-
-        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
-            for i in range(3, os.sysconf("SC_OPEN_MAX")):
-                try:
-                    os.close(i)
-                except:
-                    pass
-
-    def __del__(self):
-        pass
-
-    def _writeConf(self, confpath, installroot):
-        conf  = "[main]\n"
-        conf += "installroot=%s\n" % installroot
-        conf += "cachedir=/var/cache/yum\n"
-        conf += "plugins=0\n"
-        conf += "reposdir=\n"
-        conf += "failovermethod=priority\n"
-        conf += "http_caching=packages\n"
-        conf += "sslverify=0\n"
-
-        f = file(confpath, "w+")
-        f.write(conf)
-        f.close()
-
-        os.chmod(confpath, 0644)
-
-    def _cleanupRpmdbLocks(self, installroot):
-        # cleans up temporary files left by bdb so that differing
-        # versions of rpm don't cause problems
-        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
-            os.unlink(f)
-
-    def setup(self, confpath, installroot):
-        self._writeConf(confpath, installroot)
-        self._cleanupRpmdbLocks(installroot)
-        self.doConfigSetup(fn = confpath, root = installroot)
-        self.conf.cache = 0
-        self.doTsSetup()
-        self.doRpmDBSetup()
-        self.doRepoSetup()
-        self.doSackSetup()
-
-    def selectPackage(self, pkg):
-        """Select a given package.  Can be specified with name.arch or name*"""
-        try:
-            self.install(pattern = pkg)
-            return None
-        except yum.Errors.InstallError, e:
-            return e
-        except yum.Errors.RepoError, e:
-            raise CreatorError("Unable to download from repo : %s" % (e,))
-        except yum.Errors.YumBaseError, e:
-            raise CreatorError("Unable to install: %s" % (e,))
-
-    def deselectPackage(self, pkg):
-        """Deselect package.  Can be specified as name.arch or name*"""
-        sp = pkg.rsplit(".", 2)
-        txmbrs = []
-        if len(sp) == 2:
-            txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1])
-
-        if len(txmbrs) == 0:
-            exact, match, unmatch = yum.packages.parsePackages(self.pkgSack.returnPackages(), [pkg], casematch=1)
-            for p in exact + match:
-                txmbrs.append(p)
-
-        if len(txmbrs) > 0:
-            for x in txmbrs:
-                self.tsInfo.remove(x.pkgtup)
-                # we also need to remove from the conditionals
-                # dict so that things don't get pulled back in as a result
-                # of them.  yes, this is ugly.  conditionals should die.
-                for req, pkgs in self.tsInfo.conditionals.iteritems():
-                    if x in pkgs:
-                        pkgs.remove(x)
-                        self.tsInfo.conditionals[req] = pkgs
-        else:
-            logging.warn("No such package %s to remove" %(pkg,))
-
-    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
-        try:
-            yum.YumBase.selectGroup(self, grp)
-            if include == pykickstart.parser.GROUP_REQUIRED:
-                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
-            elif include == pykickstart.parser.GROUP_ALL:
-                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
-            return None
-        except (yum.Errors.InstallError, yum.Errors.GroupsError), e:
-            return e
-        except yum.Errors.RepoError, e:
-            raise CreatorError("Unable to download from repo : %s" % (e,))
-        except yum.Errors.YumBaseError, e:
-            raise CreatorError("Unable to install: %s" % (e,))
-
-    def __checkAndDownloadURL(self, u2opener, url, savepath):
-        try:
-            if u2opener:
-                f = u2opener.open(url)
-            else:
-                f = u2.urlopen(url)
-        except u2.HTTPError, httperror:
-            if httperror.code in (404, 503):
-                return None
-            else:
-                raise CreatorError(httperror)
-        except OSError, oserr:
-            if oserr.errno == 2:
-                return None
-            else:
-                raise CreatorError(oserr)
-        except IOError, oserr:
-            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
-                return None
-            else:
-                raise CreatorError(oserr)
-        except u2.URLError, err:
-            raise CreatorError(err)
-
-        # save to file
-        licf = open(savepath, "w")
-        licf.write(f.read())
-        licf.close()
-        f.close()
-
-        return savepath
-
-    def __pagerFile(self, savepath):
-        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
-            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
-        else:
-            pagers = ('less', 'more')
-
-        file_showed = None
-        for pager in pagers:
-            try:
-                subprocess.call([pager, savepath])
-            except OSError:
-                continue
-            else:
-                file_showed = True
-                break
-        if not file_showed:
-            f = open(savepath)
-            print f.read()
-            f.close()
-            raw_input('press <ENTER> to continue...')
-
-    def checkRepositoryEULA(self, name, repo):
-        """ This function is to check the LICENSE file if provided. """
-
-        # when proxy needed, make urllib2 follow it
-        proxy = repo.proxy
-        proxy_username = repo.proxy_username
-        proxy_password = repo.proxy_password
-
-        handlers = []
-        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
-        u2opener = None
-        if proxy:
-            if proxy_username:
-                proxy_netloc = urlparse.urlsplit(proxy).netloc
-                if proxy_password:
-                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
-                else:
-                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
-            else:
-                proxy_url = proxy
-
-            proxy_support = u2.ProxyHandler({'http': proxy_url,
-                                             'ftp': proxy_url})
-            handlers.append(proxy_support)
-
-        # download all remote files to one temp dir
-        baseurl = None
-        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
-
-        for url in repo.baseurl:
-            if not url.endswith('/'):
-                url += '/'
-            tmphandlers = handlers
-            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
-            if scheme not in ("http", "https", "ftp", "ftps", "file"):
-                raise CreatorError("Error: invalid url %s" % url)
-            if '@' in host:
-                try:
-                    user_pass, host = host.split('@', 1)
-                    if ':' in user_pass:
-                        user, password = user_pass.split(':', 1)
-                except ValueError, e:
-                    raise CreatorError('Bad URL: %s' % url)
-                print "adding HTTP auth: %s, %s" %(user, password)
-                auth_handler.add_password(None, host, user, password)
-                tmphandlers.append(auth_handler)
-                url = scheme + "://" + host + path + parm + query + frag
-            if len(tmphandlers) != 0:
-                u2opener = u2.build_opener(*tmphandlers)
-            # try to download
-            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
-            repo_eula_path = self.__checkAndDownloadURL(
-                                    u2opener,
-                                    repo_eula_url,
-                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
-            if repo_eula_path:
-                # found
-                baseurl = url
-                break
-
-        if not baseurl:
-            return True
-
-        # show the license file
-        print 'For the software packages in this yum repo:'
-        print '    %s: %s' % (name, baseurl)
-        print 'There is an "End User License Agreement" file that need to be checked.'
-        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
-        raw_input('press <ENTER> to continue...')
-
-        self.__pagerFile(repo_eula_path)
-
-        # Asking for the "Accept/Decline"
-        accept = True
-        while accept:
-            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
-            if input_accept.upper() in ('YES', 'Y'):
-                break
-            elif input_accept.upper() in ('NO', 'N'):
-                accept = None
-                print 'Will not install pkgs from this repo.'
-
-        if not accept:
-            #cleanup
-            shutil.rmtree(repo_lic_dir)
-            return None
-
-        # try to find support_info.html for extra infomation
-        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
-        repo_info_path = self.__checkAndDownloadURL(
-                                u2opener,
-                                repo_info_url,
-                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
-        if repo_info_path:
-            print 'There is one more file in the repo for additional support information, please read it'
-            raw_input('press <ENTER> to continue...')
-            self.__pagerFile(repo_info_path)
-
-        #cleanup
-        shutil.rmtree(repo_lic_dir)
-        return True
-
-    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
-        def _varSubstitute(option):
-            # takes a variable and substitutes like yum configs do
-            option = option.replace("$basearch", rpmUtils.arch.getBaseArch())
-            option = option.replace("$arch", rpmUtils.arch.getCanonArch())
-            return option
-
-        repo = MyYumRepository(name)
-        repo.sslverify = False
-
-        """Set proxy"""
-        repo.proxy = proxy
-        repo.proxy_username = proxy_username
-        repo.proxy_password = proxy_password
-
-        if url:
-            repo.baseurl.append(_varSubstitute(url))
-
-        # check LICENSE files
-        if not self.checkRepositoryEULA(name, repo):
-            return None
-
-        if mirrorlist:
-            repo.mirrorlist = _varSubstitute(mirrorlist)
-        conf = yum.config.RepoConf()
-        for k, v in conf.iteritems():
-            if v or not hasattr(repo, k):
-                repo.setAttribute(k, v)
-        repo.basecachedir = self.conf.cachedir
-        repo.failovermethod = "priority"
-        repo.metadata_expire = 0
-        # Enable gpg check for verifying corrupt packages
-        repo.gpgcheck = 1
-        repo.enable()
-        repo.setup(0)
-        repo.setCallback(TextProgress())
-        self.repos.add(repo)
-        return repo
-
-    def installHasFile(self, file):
-        provides_pkg = self.whatProvides(file, None, None)
-        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
-        for p in dlpkgs:
-            for q in provides_pkg:
-                if (p == q):
-                    return True
-        return False
-
-    def runInstall(self, checksize = 0):
-        os.environ["HOME"] = "/"
-        try:
-            (res, resmsg) = self.buildTransaction()
-        except yum.Errors.RepoError, e:
-            raise CreatorError("Unable to download from repo : %s" %(e,))
-        if res != 2:
-            raise CreatorError("Failed to build transaction : %s" % str.join("\n", resmsg))
-
-        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
-
-        # record the total size of installed pkgs
-        pkgs_total_size = sum(map(lambda x: int(x.size), dlpkgs))
-
-        # check needed size before actually download and install
-        if checksize and pkgs_total_size > checksize:
-            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
-
-        if self.__recording_pkgs:
-            # record all pkg and the content
-            for pkg in dlpkgs:
-                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name, pkg.printVer(), pkg.arch)
-                self.__pkgs_content[pkg_long_name] = pkg.files
-
-        total_count = len(dlpkgs)
-        cached_count = 0
-        print "Checking packages cache and packages integrity..."
-        for po in dlpkgs:
-            local = po.localPkg()
-            if not os.path.exists(local):
-                continue
-            if not self.verifyPkg(local, po, False):
-                print "Package %s is damaged: %s" % (os.path.basename(local), local)
-            else:
-                cached_count +=1
-        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
-        try:
-            self.downloadPkgs(dlpkgs)
-            # FIXME: sigcheck?
-    
-            self.initActionTs()
-            self.populateTs(keepold=0)
-            deps = self.ts.check()
-            if len(deps) != 0:
-                """ This isn't fatal, Ubuntu has this issue but it is ok. """
-                print deps
-                logging.warn("Dependency check failed!")
-            rc = self.ts.order()
-            if rc != 0:
-                raise CreatorError("ordering packages for installation failed!")
-    
-            # FIXME: callback should be refactored a little in yum
-            sys.path.append('/usr/share/yum-cli')
-            import callback
-            cb = callback.RPMInstallCallback()
-            cb.tsInfo = self.tsInfo
-            cb.filelog = False
-            ret = self.runTransaction(cb)
-            print ""
-            self._cleanupRpmdbLocks(self.conf.installroot)
-            return ret
-        except yum.Errors.RepoError, e:
-            raise CreatorError("Unable to download from repo : %s" % (e,))
-        except yum.Errors.YumBaseError, e:
-            raise CreatorError("Unable to install: %s" % (e,))
-
-    def getAllContent(self):
-        return self.__pkgs_content
-
-_pkgmgr = ["yum", Yum]
diff --git a/micng/utils/pkgmanagers/zypppkgmgr.py b/micng/utils/pkgmanagers/zypppkgmgr.py
deleted file mode 100644 (file)
index 49bc88f..0000000
+++ /dev/null
@@ -1,752 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-import glob
-import re
-import zypp
-import rpm
-import shutil
-import tempfile
-import urlparse
-import urllib2 as u2
-from micng.utils.errors import *
-from micng.imager.BaseImageCreator import ImageCreator
-import pykickstart.parser
-from micng.utils.fs_related import *
-from micng.utils.misc import *
-from micng.utils.rpmmisc import *
-
-class RepositoryStub:
-    def __init__(self):
-        self.name = None
-        self.baseurl = []
-        self.mirrorlist = None
-        self.proxy = None
-        self.proxy_username = None
-        self.proxy_password = None
-        self.includepkgs = None
-        self.includepkgs = None
-        self.exclude = None
-
-        self.enabled = True
-        self.autorefresh = True
-        self.keeppackages = True
-
-class RepoError(CreatorError):
-    pass
-
-class RpmError(CreatorError):
-    pass
-
-class Zypp:
-    def __init__(self, creator = None, recording_pkgs=None):
-        if not isinstance(creator, ImageCreator):
-            raise CreatorError("Invalid argument: creator")
-
-        self.__recording_pkgs = recording_pkgs
-        self.__pkgs_content = {}
-        self.creator = creator
-        self.repos = []
-        self.packages = []
-        self.patterns = []
-        self.localpkgs = {}
-        self.repo_manager = None
-        self.repo_manager_options = None
-        self.Z = None
-        self.ts = None
-        self.probFilterFlags = []
-        self.bin_rpm = find_binary_path("rpm")
-        self.incpkgs = []
-        self.excpkgs = []
-
-    def doFileLogSetup(self, uid, logfile):
-        # don't do the file log for the livecd as it can lead to open fds
-        # being left and an inability to clean up after ourself
-        pass
-
-    def closeRpmDB(self):
-        pass
-
-    def close(self):
-        try:
-            os.unlink(self.installroot + "/yum.conf")
-        except:
-            pass
-        self.closeRpmDB()
-        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
-            for i in range(3, os.sysconf("SC_OPEN_MAX")):
-                try:
-                    os.close(i)
-                except:
-                    pass
-        if self.ts:
-            self.ts.closeDB()
-            self.ts = None
-
-    def __del__(self):
-        self.close()
-
-    def _writeConf(self, confpath, installroot):
-        conf  = "[main]\n"
-        conf += "installroot=%s\n" % installroot
-        conf += "cachedir=/var/cache/yum\n"
-        conf += "plugins=0\n"
-        conf += "reposdir=\n"
-        conf += "failovermethod=priority\n"
-        conf += "http_caching=packages\n"
-
-        f = file(confpath, "w+")
-        f.write(conf)
-        f.close()
-
-        os.chmod(confpath, 0644)
-
-    def _cleanupRpmdbLocks(self, installroot):
-        # cleans up temporary files left by bdb so that differing
-        # versions of rpm don't cause problems
-        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
-            os.unlink(f)
-
-    def setup(self, confpath, installroot):
-        self._writeConf(confpath, installroot)
-        self._cleanupRpmdbLocks(installroot)
-        self.installroot = installroot
-
-    def selectPackage(self, pkg):
-        """ Select a given package or package pattern, can be specified with name.arch or name* or *name """
-        if not self.Z:
-            self.__initialize_zypp()
-        
-        found = False
-        startx = pkg.startswith("*")
-        endx = pkg.endswith("*")
-        ispattern = startx or endx
-        sp = pkg.rsplit(".", 2)
-        for item in self.Z.pool():
-            kind = "%s" % item.kind()
-            if kind == "package":
-                name = "%s" % item.name()
-                if not ispattern:
-                    if name in self.incpkgs or self.excpkgs:
-                        found = True
-                        break
-                    if len(sp) == 2:
-                        arch = "%s" % item.arch()
-                        if name == sp[0] and arch == sp[1]:
-                            found = True
-                            if name not in self.packages:
-                                self.packages.append(name)
-                                item.status().setToBeInstalled (zypp.ResStatus.USER)
-                            break
-                    else:
-                        if name == sp[0]:
-                            found = True
-                            if name not in self.packages:
-                                self.packages.append(name)
-                                item.status().setToBeInstalled (zypp.ResStatus.USER)
-                            break
-                else:
-                    if name in self.incpkgs or self.excpkgs:
-                        found =  True
-                        continue
-                    if startx and name.endswith(sp[0][1:]):
-                        found = True
-                        if name not in self.packages:
-                            self.packages.append(name)
-                            item.status().setToBeInstalled (zypp.ResStatus.USER)
-
-                    if endx and name.startswith(sp[0][:-1]):
-                        found = True
-                        if name not in self.packages:
-                            self.packages.append(name)
-                            item.status().setToBeInstalled (zypp.ResStatus.USER)
-        if found:
-            return None
-        else:
-            e = CreatorError("Unable to find package: %s" % (pkg,))
-            return e
-
-    def deselectPackage(self, pkg):
-        """Deselect package.  Can be specified as name.arch or name*"""
-        
-        if not self.Z:
-            self.__initialize_zypp()
-        
-        startx = pkg.startswith("*")
-        endx = pkg.endswith("*")
-        ispattern = startx or endx
-        sp = pkg.rsplit(".", 2)
-        for item in self.Z.pool():
-            kind = "%s" % item.kind()
-            if kind == "package":
-                name = "%s" % item.name()
-                if not ispattern:
-                    if len(sp) == 2:
-                        arch = "%s" % item.arch()
-                        if name == sp[0] and arch == sp[1]:
-                            if item.status().isToBeInstalled():
-                                item.status().resetTransact(zypp.ResStatus.USER)
-                            if name in self.packages:
-                                self.packages.remove(name)
-                            break
-                    else:
-                        if name == sp[0]:
-                            if item.status().isToBeInstalled():
-                                item.status().resetTransact(zypp.ResStatus.USER)
-                            if name in self.packages:
-                                self.packages.remove(name)
-                            break                             
-                else:
-                    if startx and name.endswith(sp[0][1:]):
-                        if item.status().isToBeInstalled():
-                            item.status().resetTransact(zypp.ResStatus.USER)
-                        if name in self.packages:
-                            self.packages.remove(name)
-
-                    if endx and name.startswith(sp[0][:-1]):
-                        if item.status().isToBeInstalled():
-                            item.status().resetTransact(zypp.ResStatus.USER)
-                        if name in self.packages:
-                            self.packages.remove(name)
-    
-    def __selectIncpkgs(self):        
-        found = False
-        for pkg in self.incpkgs:
-            for item in self.Z.pool():
-                kind = "%s" % item.kind()
-                if kind == "package":
-                    name = "%s" % item.name()
-                    repoalias = "%s" % item.repoInfo().alias()
-                    if name == pkg and repoalias.endswith("include"):
-                        found = True
-                        if name not in self.packages:
-                            self.packages.append(name)
-                            item.status().setToBeInstalled (zypp.ResStatus.USER)
-                        break         
-        if not found:
-            raise CreatorError("Unable to find package: %s" % (pkg,))
-    
-    def __selectExcpkgs(self):    
-        found = False        
-        for pkg in self.excpkgs:
-            for item in self.Z.pool():
-                kind = "%s" % item.kind()
-                if kind == "package":
-                    name = "%s" % item.name()
-                    repoalias = "%s" % item.repoInfo().alias()
-                    if name == pkg and not repoalias.endswith("exclude"):
-                        found = True
-                        if name not in self.packages:
-                            self.packages.append(name)
-                            item.status().setToBeInstalled (zypp.ResStatus.USER)
-                        break                     
-        if not found:
-            raise CreatorError("Unable to find package: %s" % (pkg,))
-
-        
-    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
-        if not self.Z:
-            self.__initialize_zypp()
-        found = False
-        for item in self.Z.pool():
-            kind = "%s" % item.kind()
-            if kind == "pattern":
-                summary = "%s" % item.summary()
-                name = "%s" % item.name()
-                if name == grp or summary == grp:
-                    found = True
-                    if name not in self.patterns:
-                        self.patterns.append(name)
-                        item.status().setToBeInstalled (zypp.ResStatus.USER)
-                    break
-                
-        if found:
-            if include == pykickstart.parser.GROUP_REQUIRED:
-                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
-            elif include == pykickstart.parser.GROUP_ALL:
-                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
-            return None
-        else:
-            e = CreatorError("Unable to find pattern: %s" % (grp,))
-            return e
-
-    def __checkAndDownloadURL(self, u2opener, url, savepath):
-        try:
-            if u2opener:
-                f = u2opener.open(url)
-            else:
-                f = u2.urlopen(url)
-        except u2.HTTPError, httperror:
-            if httperror.code in (404, 503):
-                return None
-            else:
-                raise CreatorError(httperror)
-        except OSError, oserr:
-            if oserr.errno == 2:
-                return None
-            else:
-                raise CreatorError(oserr)
-        except IOError, oserr:
-            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
-                return None
-            else:
-                raise CreatorError(oserr)
-        except u2.URLError, err:
-            raise CreatorError(err)
-
-        # save to file
-        licf = open(savepath, "w")
-        licf.write(f.read())
-        licf.close()
-        f.close()
-
-        return savepath
-
-    def __pagerFile(self, savepath):
-        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
-            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
-        else:
-            pagers = ('less', 'more')
-
-        file_showed = None
-        for pager in pagers:
-            try:
-                subprocess.call([pager, savepath])
-            except OSError:
-                continue
-            else:
-                file_showed = True
-                break
-        if not file_showed:
-            f = open(savepath)
-            print f.read()
-            f.close()
-            raw_input('press <ENTER> to continue...')
-
-    def checkRepositoryEULA(self, name, repo):
-        """ This function is to check the LICENSE file if provided. """
-
-        # when proxy needed, make urllib2 follow it
-        proxy = repo.proxy
-        proxy_username = repo.proxy_username
-        proxy_password = repo.proxy_password
-
-        handlers = []
-        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
-        u2opener = None
-        if proxy:
-            if proxy_username:
-                proxy_netloc = urlparse.urlsplit(proxy).netloc
-                if proxy_password:
-                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
-                else:
-                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
-            else:
-                proxy_url = proxy
-
-            proxy_support = u2.ProxyHandler({'http': proxy_url,
-                                             'ftp': proxy_url})
-            handlers.append(proxy_support)
-
-        # download all remote files to one temp dir
-        baseurl = None
-        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
-
-        for url in repo.baseurl:
-            if not url.endswith('/'):
-                url += '/'
-            tmphandlers = handlers
-            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
-            if scheme not in ("http", "https", "ftp", "ftps", "file"):
-                raise CreatorError("Error: invalid url %s" % url)
-            if '@' in host:
-                try:
-                    user_pass, host = host.split('@', 1)
-                    if ':' in user_pass:
-                        user, password = user_pass.split(':', 1)
-                except ValueError, e:
-                    raise CreatorError('Bad URL: %s' % url)
-                print "adding HTTP auth: %s, %s" %(user, password)
-                auth_handler.add_password(None, host, user, password)
-                tmphandlers.append(auth_handler)
-                url = scheme + "://" + host + path + parm + query + frag
-            if len(tmphandlers) != 0:
-                u2opener = u2.build_opener(*tmphandlers)
-            # try to download
-            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
-            repo_eula_path = self.__checkAndDownloadURL(
-                                    u2opener,
-                                    repo_eula_url,
-                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
-            if repo_eula_path:
-                # found
-                baseurl = url
-                break
-
-        if not baseurl:
-            return True
-
-        # show the license file
-        print 'For the software packages in this yum repo:'
-        print '    %s: %s' % (name, baseurl)
-        print 'There is an "End User License Agreement" file that need to be checked.'
-        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
-        raw_input('press <ENTER> to continue...')
-
-        self.__pagerFile(repo_eula_path)
-
-        # Asking for the "Accept/Decline"
-        accept = True
-        while accept:
-            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
-            if input_accept.upper() in ('YES', 'Y'):
-                break
-            elif input_accept.upper() in ('NO', 'N'):
-                accept = None
-                print 'Will not install pkgs from this repo.'
-
-        if not accept:
-            #cleanup
-            shutil.rmtree(repo_lic_dir)
-            return None
-
-        # try to find support_info.html for extra infomation
-        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
-        repo_info_path = self.__checkAndDownloadURL(
-                                u2opener,
-                                repo_info_url,
-                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
-        if repo_info_path:
-            print 'There is one more file in the repo for additional support information, please read it'
-            raw_input('press <ENTER> to continue...')
-            self.__pagerFile(repo_info_path)
-
-        #cleanup
-        shutil.rmtree(repo_lic_dir)
-        return True
-
-    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
-        if not self.repo_manager:
-            self.__initialize_repo_manager()
-
-        repo = RepositoryStub()
-        repo.name = name
-        repo.id = name
-        repo.proxy = proxy
-        repo.proxy_username = proxy_username
-        repo.proxy_password = proxy_password
-        repo.baseurl.append(url)
-        repo_alias = repo.id
-        if inc:
-            repo_alias = name + "include"
-            self.incpkgs = inc
-        if exc:
-            repo_alias = name + "exclude"
-            self.excpkgs = exc
-
-        # check LICENSE files
-        if not self.checkRepositoryEULA(name, repo):
-            return None
-
-        if mirrorlist:
-            repo.mirrorlist = mirrorlist
-
-        # Enable gpg check for verifying corrupt packages
-        repo.gpgcheck = 1
-        self.repos.append(repo)
-
-
-        try:
-            repo_info = zypp.RepoInfo()
-            repo_info.setAlias(repo_alias)
-            repo_info.setName(repo.name)
-            repo_info.setEnabled(repo.enabled)
-            repo_info.setAutorefresh(repo.autorefresh)
-            repo_info.setKeepPackages(repo.keeppackages)
-            repo_info.addBaseUrl(zypp.Url(repo.baseurl[0]))
-            self.repo_manager.addRepository(repo_info)
-            self.__build_repo_cache(name)
-        except RuntimeError, e:
-            raise CreatorError("%s" % (e,))
-
-        return repo
-
-    def installHasFile(self, file):
-        return False
-
-    def runInstall(self, checksize = 0):
-        if self.incpkgs:
-            self.__selectIncpkgs()
-        if self.excpkgs:
-            self.__selectExcpkgs()
-        
-        os.environ["HOME"] = "/"
-        self.buildTransaction()
-
-        todo = zypp.GetResolvablesToInsDel(self.Z.pool())
-        installed_pkgs = todo._toInstall
-        dlpkgs = []
-        for item in installed_pkgs:
-            if not zypp.isKindPattern(item):
-                dlpkgs.append(item)
-
-        # record the total size of installed pkgs
-        pkgs_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs))
-
-        # check needed size before actually download and install
-        if checksize and pkgs_total_size > checksize:
-            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
-
-        if self.__recording_pkgs:
-            # record all pkg and the content
-            for pkg in dlpkgs:
-                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name(), pkg.edition(), pkg.arch())
-                self.__pkgs_content[pkg_long_name] = {} #TBD: to get file list
-
-        total_count = len(dlpkgs)
-        cached_count = 0
-        localpkgs = self.localpkgs.keys()
-        print "Checking packages cache and packages integrity..."
-        for po in dlpkgs:
-            """ Check if it is cached locally """
-            if po.name() in localpkgs:
-                cached_count += 1
-            else:
-                local = self.getLocalPkgPath(po)
-                if os.path.exists(local):
-                    if self.checkPkg(local) != 0:
-                        os.unlink(local)
-                    else:
-                        cached_count += 1
-        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
-        try:
-            print "downloading packages..."
-            self.downloadPkgs(dlpkgs)
-            self.installPkgs(dlpkgs)
-    
-        except RepoError, e:
-            raise CreatorError("Unable to download from repo : %s" % (e,))
-        except RpmError, e:
-            raise CreatorError("Unable to install: %s" % (e,))
-
-    def getAllContent(self):
-        return self.__pkgs_content
-
-    def __initialize_repo_manager(self):
-        if self.repo_manager:
-            return
-
-        """ Clean up repo metadata """
-        shutil.rmtree(self.creator.cachedir + "/var", ignore_errors = True)
-        shutil.rmtree(self.creator.cachedir + "/etc", ignore_errors = True)
-        shutil.rmtree(self.creator.cachedir + "/raw", ignore_errors = True)
-        shutil.rmtree(self.creator.cachedir + "/solv", ignore_errors = True)
-        
-        zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE
-                                       | zypp.KeyRing.ACCEPT_VERIFICATION_FAILED
-                                       | zypp.KeyRing.ACCEPT_UNKNOWNKEY
-                                       | zypp.KeyRing.TRUST_KEY_TEMPORARILY
-                                     )
-        self.repo_manager_options = zypp.RepoManagerOptions(zypp.Pathname(self.creator._instroot))
-        self.repo_manager_options.knownReposPath = zypp.Pathname(self.creator.cachedir + "/etc/zypp/repos.d")
-        self.repo_manager_options.repoCachePath = zypp.Pathname(self.creator.cachedir + "/var/cache/zypp")
-        self.repo_manager_options.repoRawCachePath = zypp.Pathname(self.creator.cachedir + "/raw")
-        self.repo_manager_options.repoSolvCachePath = zypp.Pathname(self.creator.cachedir + "/solv")
-        self.repo_manager_options.repoPackagesCachePath = zypp.Pathname(self.creator.cachedir + "/packages")
-        
-        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
-
-
-    def __build_repo_cache(self, name):
-        repos = self.repo_manager.knownRepositories()
-        for repo in repos:
-            if not repo.enabled():
-                continue
-            reponame = "%s" % repo.name()
-            if reponame != name:
-                continue
-            if self.repo_manager.isCached( repo ):
-                return
-            #print "Retrieving repo metadata from %s ..." % repo.url()
-            self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
-
-
-    def __initialize_zypp(self):
-        if self.Z:
-            return
-
-        zconfig = zypp.ZConfig_instance()
-
-        """ Set system architecture """
-        if self.creator.target_arch and self.creator.target_arch.startswith("arm"):
-            arches = ["armv7l", "armv7nhl", "armv7hl"]
-            if self.creator.target_arch not in arches:
-                raise CreatorError("Invalid architecture: %s" % self.creator.target_arch)
-            arch_map = {}
-            if self.creator.target_arch == "armv7l":
-                arch_map["armv7l"] = zypp.Arch_armv7l()
-            elif self.creator.target_arch == "armv7nhl":
-                arch_map["armv7nhl"] = zypp.Arch_armv7nhl()
-            elif self.creator.target_arch == "armv7hl":
-                arch_map["armv7hl"] = zypp.Arch_armv7hl() 
-            zconfig.setSystemArchitecture(arch_map[self.creator.target_arch])
-
-        print "zypp architecture: %s" % zconfig.systemArchitecture()
-
-        """ repoPackagesCachePath is corrected by this """
-        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
-        repos = self.repo_manager.knownRepositories()
-        for repo in repos:
-            if not repo.enabled():
-                continue
-            if not self.repo_manager.isCached( repo ):
-                print "Retrieving repo metadata from %s ..." % repo.url()
-                self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
-            else:
-                self.repo_manager.refreshMetadata(repo, zypp.RepoManager.BuildIfNeeded)
-            self.repo_manager.loadFromCache( repo );
-
-        self.Z = zypp.ZYppFactory_instance().getZYpp()
-        self.Z.initializeTarget( zypp.Pathname(self.creator._instroot) )
-        self.Z.target().load();
-
-
-    def buildTransaction(self):
-        if not self.Z.resolver().resolvePool():
-            print "Problem count: %d" % len(self.Z.resolver().problems())
-            for problem in self.Z.resolver().problems():
-                print "Problem: %s, %s" % (problem.description().decode("utf-8"), problem.details().decode("utf-8"))
-
-    def getLocalPkgPath(self, po):
-        repoinfo = po.repoInfo()
-        name = po.name()
-        cacheroot = repoinfo.packagesPath()
-        arch =  po.arch()
-        edition = po.edition()
-        version = "%s-%s" % (edition.version(), edition.release())
-        pkgpath = "%s/%s/%s-%s.%s.rpm" % (cacheroot, arch, name, version, arch)
-        return pkgpath
-
-    def installLocal(self, pkg, po=None, updateonly=False):
-        if not self.ts:
-            self.__initialize_transaction()
-        pkgname = self.__get_pkg_name(pkg)
-        self.localpkgs[pkgname] = pkg
-        self.selectPackage(pkgname)
-
-    def __get_pkg_name(self, pkgpath):
-        h = readRpmHeader(self.ts, pkgpath)
-        return h["name"]
-
-    def downloadPkgs(self, package_objects):
-        localpkgs = self.localpkgs.keys()
-        for po in package_objects:
-            if po.name() in localpkgs:
-                continue
-            filename = self.getLocalPkgPath(po)
-            if os.path.exists(filename):
-                if self.checkPkg(filename) == 0:
-                    continue
-            dir = os.path.dirname(filename)
-            if not os.path.exists(dir):
-                makedirs(dir)
-            baseurl = po.repoInfo().baseUrls()[0].__str__()
-            proxy = self.get_proxy(po.repoInfo())
-            proxies = {}
-            if proxy:
-                proxies = {str(proxy.split(":")[0]):str(proxy)}
-          
-            location = zypp.asKindPackage(po).location()
-            location = location.filename().__str__()
-            if location.startswith("./"):
-                location = location[2:]
-            url = baseurl + "/%s" % location
-            try:
-                filename = myurlgrab(url, filename, proxies)
-            except CreatorError, e:
-                self.close()
-                raise CreatorError("%s" % e)
-
-    def installPkgs(self, package_objects):
-        if not self.ts:
-            self.__initialize_transaction()
-
-        """ Set filters """
-        probfilter = 0
-        for flag in self.probFilterFlags:
-            probfilter |= flag
-        self.ts.setProbFilter(probfilter)
-
-        localpkgs = self.localpkgs.keys()
-        for po in package_objects:
-            pkgname = po.name()
-            if pkgname in localpkgs:
-                rpmpath = self.localpkgs[pkgname]
-            else:
-                rpmpath = self.getLocalPkgPath(po)
-            if not os.path.exists(rpmpath):
-                """ Maybe it is a local repo """
-                baseurl = po.repoInfo().baseUrls()[0].__str__()
-                baseurl = baseurl.strip()
-                if baseurl.startswith("file:/"):
-                    rpmpath = baseurl[5:] + "/%s/%s" % (po.arch(), os.path.basename(rpmpath))
-            if not os.path.exists(rpmpath):
-                raise RpmError("Error: %s doesn't exist" % rpmpath)
-            h = readRpmHeader(self.ts, rpmpath)
-            self.ts.addInstall(h, rpmpath, 'u')
-
-        unresolved_dependencies = self.ts.check()
-        if not unresolved_dependencies:
-            self.ts.order()
-            cb = RPMInstallCallback(self.ts)
-            self.ts.run(cb.callback, '')
-            self.ts.closeDB()
-            self.ts = None
-        else:
-            print unresolved_dependencies
-            raise RepoError("Error: Unresolved dependencies, transaction failed.")
-
-    def __initialize_transaction(self):
-        if not self.ts:
-            self.ts = rpm.TransactionSet(self.creator._instroot)
-            # Set to not verify DSA signatures.
-            self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
-
-    def checkPkg(self, pkg):
-        ret = 1
-        if not os.path.exists(pkg):
-            return ret
-        ret = checkRpmIntegrity(self.bin_rpm, pkg)
-        if ret != 0:
-            print "Package %s is damaged: %s" % (os.path.basename(pkg), pkg)
-        return ret
-
-    def zypp_install(self):
-        policy = zypp.ZYppCommitPolicy()
-        policy.downloadMode(zypp.DownloadInAdvance)
-        policy.dryRun( False )
-        policy.syncPoolAfterCommit( False )
-        result = self.Z.commit( policy )
-        print result
-
-    def _add_prob_flags(self, *flags):
-        for flag in flags:
-           if flag not in self.probFilterFlags:
-               self.probFilterFlags.append(flag)
-
-    def get_proxy(self, repoinfo):
-        proxy = None
-        reponame = "%s" % repoinfo.name()
-        for repo in self.repos:
-            if repo.name == reponame:
-                proxy = repo.proxy
-                break
-        if proxy:
-            return proxy
-        else:
-            repourl = repoinfo.baseUrls()[0].__str__()
-            return get_proxy(repourl)
-
-_pkgmgr = ["zypp", Zypp]
-
index e69de29..d75a08b 100644 (file)
@@ -0,0 +1,449 @@
+#
+# yum.py : yum utilities
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import glob
+import os
+import sys
+import logging
+
+import yum
+import rpmUtils
+import pykickstart.parser
+
+import urlparse
+import urllib2 as u2
+import tempfile
+import shutil
+import subprocess
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.pluginbase.backend_plugin import BackendPlugin
+from micng.imager.baseimager import BaseImageCreator as ImageCreator
+
+class MyYumRepository(yum.yumRepo.YumRepository):
+    def __init__(self, repoid):
+        yum.yumRepo.YumRepository.__init__(self, repoid)
+        self.sslverify = False
+
+    def _setupGrab(self):
+        self.sslverify = False
+        yum.yumRepo.YumRepository._setupGrab(self)
+
+    def __del__(self):
+        pass
+
+class Yum(BackendPlugin, yum.YumBase):
+    def __init__(self, creator = None, recording_pkgs=None):
+        if not isinstance(creator, ImageCreator):
+            raise CreatorError("Invalid argument: creator")
+        yum.YumBase.__init__(self)
+        
+        self.creator = creator
+        
+        if self.creator.target_arch:
+            if rpmUtils.arch.arches.has_key(self.creator.target_arch):
+                self.arch.setup_arch(self.creator.target_arch)
+            else:
+                raise CreatorError("Invalid target arch: %s" % self.creator.target_arch)
+
+        self.__recording_pkgs = recording_pkgs
+        self.__pkgs_content = {}
+
+    def doFileLogSetup(self, uid, logfile):
+        # don't do the file log for the livecd as it can lead to open fds
+        # being left and an inability to clean up after ourself
+        pass
+
+    def close(self):
+        try:
+            os.unlink(self.conf.installroot + "/yum.conf")
+        except:
+            pass
+        self.closeRpmDB()
+        yum.YumBase.close(self)
+        self._delRepos()
+        self._delSacks()
+
+        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
+            for i in range(3, os.sysconf("SC_OPEN_MAX")):
+                try:
+                    os.close(i)
+                except:
+                    pass
+
+    def __del__(self):
+        pass
+
+    def _writeConf(self, confpath, installroot):
+        conf  = "[main]\n"
+        conf += "installroot=%s\n" % installroot
+        conf += "cachedir=/var/cache/yum\n"
+        conf += "plugins=0\n"
+        conf += "reposdir=\n"
+        conf += "failovermethod=priority\n"
+        conf += "http_caching=packages\n"
+        conf += "sslverify=0\n"
+
+        f = file(confpath, "w+")
+        f.write(conf)
+        f.close()
+
+        os.chmod(confpath, 0644)
+
+    def _cleanupRpmdbLocks(self, installroot):
+        # cleans up temporary files left by bdb so that differing
+        # versions of rpm don't cause problems
+        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
+            os.unlink(f)
+
+    def setup(self, confpath, installroot):
+        self._writeConf(confpath, installroot)
+        self._cleanupRpmdbLocks(installroot)
+        self.doConfigSetup(fn = confpath, root = installroot)
+        self.conf.cache = 0
+        self.doTsSetup()
+        self.doRpmDBSetup()
+        self.doRepoSetup()
+        self.doSackSetup()
+
+    def selectPackage(self, pkg):
+        """Select a given package.  Can be specified with name.arch or name*"""
+        try:
+            self.install(pattern = pkg)
+            return None
+        except yum.Errors.InstallError, e:
+            return e
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def deselectPackage(self, pkg):
+        """Deselect package.  Can be specified as name.arch or name*"""
+        sp = pkg.rsplit(".", 2)
+        txmbrs = []
+        if len(sp) == 2:
+            txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1])
+
+        if len(txmbrs) == 0:
+            exact, match, unmatch = yum.packages.parsePackages(self.pkgSack.returnPackages(), [pkg], casematch=1)
+            for p in exact + match:
+                txmbrs.append(p)
+
+        if len(txmbrs) > 0:
+            for x in txmbrs:
+                self.tsInfo.remove(x.pkgtup)
+                # we also need to remove from the conditionals
+                # dict so that things don't get pulled back in as a result
+                # of them.  yes, this is ugly.  conditionals should die.
+                for req, pkgs in self.tsInfo.conditionals.iteritems():
+                    if x in pkgs:
+                        pkgs.remove(x)
+                        self.tsInfo.conditionals[req] = pkgs
+        else:
+            logging.warn("No such package %s to remove" %(pkg,))
+
+    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
+        try:
+            yum.YumBase.selectGroup(self, grp)
+            if include == pykickstart.parser.GROUP_REQUIRED:
+                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
+            elif include == pykickstart.parser.GROUP_ALL:
+                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
+            return None
+        except (yum.Errors.InstallError, yum.Errors.GroupsError), e:
+            return e
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def __checkAndDownloadURL(self, u2opener, url, savepath):
+        try:
+            if u2opener:
+                f = u2opener.open(url)
+            else:
+                f = u2.urlopen(url)
+        except u2.HTTPError, httperror:
+            if httperror.code in (404, 503):
+                return None
+            else:
+                raise CreatorError(httperror)
+        except OSError, oserr:
+            if oserr.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except IOError, oserr:
+            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except u2.URLError, err:
+            raise CreatorError(err)
+
+        # save to file
+        licf = open(savepath, "w")
+        licf.write(f.read())
+        licf.close()
+        f.close()
+
+        return savepath
+
+    def __pagerFile(self, savepath):
+        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
+            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
+        else:
+            pagers = ('less', 'more')
+
+        file_showed = None
+        for pager in pagers:
+            try:
+                subprocess.call([pager, savepath])
+            except OSError:
+                continue
+            else:
+                file_showed = True
+                break
+        if not file_showed:
+            f = open(savepath)
+            print f.read()
+            f.close()
+            raw_input('press <ENTER> to continue...')
+
+    def checkRepositoryEULA(self, name, repo):
+        """ This function is to check the LICENSE file if provided. """
+
+        # when proxy needed, make urllib2 follow it
+        proxy = repo.proxy
+        proxy_username = repo.proxy_username
+        proxy_password = repo.proxy_password
+
+        handlers = []
+        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
+        u2opener = None
+        if proxy:
+            if proxy_username:
+                proxy_netloc = urlparse.urlsplit(proxy).netloc
+                if proxy_password:
+                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
+                else:
+                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
+            else:
+                proxy_url = proxy
+
+            proxy_support = u2.ProxyHandler({'http': proxy_url,
+                                             'ftp': proxy_url})
+            handlers.append(proxy_support)
+
+        # download all remote files to one temp dir
+        baseurl = None
+        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
+
+        for url in repo.baseurl:
+            if not url.endswith('/'):
+                url += '/'
+            tmphandlers = handlers
+            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+            if scheme not in ("http", "https", "ftp", "ftps", "file"):
+                raise CreatorError("Error: invalid url %s" % url)
+            if '@' in host:
+                try:
+                    user_pass, host = host.split('@', 1)
+                    if ':' in user_pass:
+                        user, password = user_pass.split(':', 1)
+                except ValueError, e:
+                    raise CreatorError('Bad URL: %s' % url)
+                print "adding HTTP auth: %s, %s" %(user, password)
+                auth_handler.add_password(None, host, user, password)
+                tmphandlers.append(auth_handler)
+                url = scheme + "://" + host + path + parm + query + frag
+            if len(tmphandlers) != 0:
+                u2opener = u2.build_opener(*tmphandlers)
+            # try to download
+            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
+            repo_eula_path = self.__checkAndDownloadURL(
+                                    u2opener,
+                                    repo_eula_url,
+                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
+            if repo_eula_path:
+                # found
+                baseurl = url
+                break
+
+        if not baseurl:
+            return True
+
+        # show the license file
+        print 'For the software packages in this yum repo:'
+        print '    %s: %s' % (name, baseurl)
+        print 'There is an "End User License Agreement" file that need to be checked.'
+        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
+        raw_input('press <ENTER> to continue...')
+
+        self.__pagerFile(repo_eula_path)
+
+        # Asking for the "Accept/Decline"
+        accept = True
+        while accept:
+            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
+            if input_accept.upper() in ('YES', 'Y'):
+                break
+            elif input_accept.upper() in ('NO', 'N'):
+                accept = None
+                print 'Will not install pkgs from this repo.'
+
+        if not accept:
+            #cleanup
+            shutil.rmtree(repo_lic_dir)
+            return None
+
+        # try to find support_info.html for extra infomation
+        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
+        repo_info_path = self.__checkAndDownloadURL(
+                                u2opener,
+                                repo_info_url,
+                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
+        if repo_info_path:
+            print 'There is one more file in the repo for additional support information, please read it'
+            raw_input('press <ENTER> to continue...')
+            self.__pagerFile(repo_info_path)
+
+        #cleanup
+        shutil.rmtree(repo_lic_dir)
+        return True
+
+    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
+        def _varSubstitute(option):
+            # takes a variable and substitutes like yum configs do
+            option = option.replace("$basearch", rpmUtils.arch.getBaseArch())
+            option = option.replace("$arch", rpmUtils.arch.getCanonArch())
+            return option
+
+        repo = MyYumRepository(name)
+        repo.sslverify = False
+
+        """Set proxy"""
+        repo.proxy = proxy
+        repo.proxy_username = proxy_username
+        repo.proxy_password = proxy_password
+
+        if url:
+            repo.baseurl.append(_varSubstitute(url))
+
+        # check LICENSE files
+        if not self.checkRepositoryEULA(name, repo):
+            return None
+
+        if mirrorlist:
+            repo.mirrorlist = _varSubstitute(mirrorlist)
+        conf = yum.config.RepoConf()
+        for k, v in conf.iteritems():
+            if v or not hasattr(repo, k):
+                repo.setAttribute(k, v)
+        repo.basecachedir = self.conf.cachedir
+        repo.failovermethod = "priority"
+        repo.metadata_expire = 0
+        # Enable gpg check for verifying corrupt packages
+        repo.gpgcheck = 1
+        repo.enable()
+        repo.setup(0)
+        repo.setCallback(TextProgress())
+        self.repos.add(repo)
+        return repo
+
+    def installHasFile(self, file):
+        provides_pkg = self.whatProvides(file, None, None)
+        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
+        for p in dlpkgs:
+            for q in provides_pkg:
+                if (p == q):
+                    return True
+        return False
+
+    def runInstall(self, checksize = 0):
+        os.environ["HOME"] = "/"
+        try:
+            (res, resmsg) = self.buildTransaction()
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" %(e,))
+        if res != 2:
+            raise CreatorError("Failed to build transaction : %s" % str.join("\n", resmsg))
+
+        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
+
+        # record the total size of installed pkgs
+        pkgs_total_size = sum(map(lambda x: int(x.size), dlpkgs))
+
+        # check needed size before actually download and install
+        if checksize and pkgs_total_size > checksize:
+            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
+
+        if self.__recording_pkgs:
+            # record all pkg and the content
+            for pkg in dlpkgs:
+                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name, pkg.printVer(), pkg.arch)
+                self.__pkgs_content[pkg_long_name] = pkg.files
+
+        total_count = len(dlpkgs)
+        cached_count = 0
+        print "Checking packages cache and packages integrity..."
+        for po in dlpkgs:
+            local = po.localPkg()
+            if not os.path.exists(local):
+                continue
+            if not self.verifyPkg(local, po, False):
+                print "Package %s is damaged: %s" % (os.path.basename(local), local)
+            else:
+                cached_count +=1
+        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
+        try:
+            self.downloadPkgs(dlpkgs)
+            # FIXME: sigcheck?
+    
+            self.initActionTs()
+            self.populateTs(keepold=0)
+            deps = self.ts.check()
+            if len(deps) != 0:
+                """ This isn't fatal, Ubuntu has this issue but it is ok. """
+                print deps
+                logging.warn("Dependency check failed!")
+            rc = self.ts.order()
+            if rc != 0:
+                raise CreatorError("ordering packages for installation failed!")
+    
+            # FIXME: callback should be refactored a little in yum
+            sys.path.append('/usr/share/yum-cli')
+            import callback
+            cb = callback.RPMInstallCallback()
+            cb.tsInfo = self.tsInfo
+            cb.filelog = False
+            ret = self.runTransaction(cb)
+            print ""
+            self._cleanupRpmdbLocks(self.conf.installroot)
+            return ret
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def getAllContent(self):
+        return self.__pkgs_content
+
+mic_plugin = ["yum", Yum]
index e69de29..b5d0b32 100644 (file)
@@ -0,0 +1,753 @@
+#!/usr/bin/python
+
+import os
+import sys
+import glob
+import re
+import zypp
+import rpm
+import shutil
+import tempfile
+import urlparse
+import urllib2 as u2
+import pykickstart.parser
+from micng.utils.errors import *
+from micng.imager.baseimager import BaseImageCreator as ImageCreator
+from micng.utils.fs_related import *
+from micng.utils.misc import *
+from micng.utils.rpmmisc import *
+from micng.pluginbase.backend_plugin import BackendPlugin
+
+class RepositoryStub:
+    def __init__(self):
+        self.name = None
+        self.baseurl = []
+        self.mirrorlist = None
+        self.proxy = None
+        self.proxy_username = None
+        self.proxy_password = None
+        self.includepkgs = None
+        self.includepkgs = None
+        self.exclude = None
+
+        self.enabled = True
+        self.autorefresh = True
+        self.keeppackages = True
+
+class RepoError(CreatorError):
+    pass
+
+class RpmError(CreatorError):
+    pass
+
+class Zypp(BackendPlugin):
+    def __init__(self, creator = None, recording_pkgs=None):
+        if not isinstance(creator, ImageCreator):
+            raise CreatorError("Invalid argument: creator")
+
+        self.__recording_pkgs = recording_pkgs
+        self.__pkgs_content = {}
+        self.creator = creator
+        self.repos = []
+        self.packages = []
+        self.patterns = []
+        self.localpkgs = {}
+        self.repo_manager = None
+        self.repo_manager_options = None
+        self.Z = None
+        self.ts = None
+        self.probFilterFlags = []
+        self.bin_rpm = find_binary_path("rpm")
+        self.incpkgs = []
+        self.excpkgs = []
+
+    def doFileLogSetup(self, uid, logfile):
+        # don't do the file log for the livecd as it can lead to open fds
+        # being left and an inability to clean up after ourself
+        pass
+
+    def closeRpmDB(self):
+        pass
+
+    def close(self):
+        try:
+            os.unlink(self.installroot + "/yum.conf")
+        except:
+            pass
+        self.closeRpmDB()
+        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
+            for i in range(3, os.sysconf("SC_OPEN_MAX")):
+                try:
+                    os.close(i)
+                except:
+                    pass
+        if self.ts:
+            self.ts.closeDB()
+            self.ts = None
+
+    def __del__(self):
+        self.close()
+
+    def _writeConf(self, confpath, installroot):
+        conf  = "[main]\n"
+        conf += "installroot=%s\n" % installroot
+        conf += "cachedir=/var/cache/yum\n"
+        conf += "plugins=0\n"
+        conf += "reposdir=\n"
+        conf += "failovermethod=priority\n"
+        conf += "http_caching=packages\n"
+
+        f = file(confpath, "w+")
+        f.write(conf)
+        f.close()
+
+        os.chmod(confpath, 0644)
+
+    def _cleanupRpmdbLocks(self, installroot):
+        # cleans up temporary files left by bdb so that differing
+        # versions of rpm don't cause problems
+        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
+            os.unlink(f)
+
+    def setup(self, confpath, installroot):
+        self._writeConf(confpath, installroot)
+        self._cleanupRpmdbLocks(installroot)
+        self.installroot = installroot
+
+    def selectPackage(self, pkg):
+        """ Select a given package or package pattern, can be specified with name.arch or name* or *name """
+        if not self.Z:
+            self.__initialize_zypp()
+        
+        found = False
+        startx = pkg.startswith("*")
+        endx = pkg.endswith("*")
+        ispattern = startx or endx
+        sp = pkg.rsplit(".", 2)
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "package":
+                name = "%s" % item.name()
+                if not ispattern:
+                    if name in self.incpkgs or self.excpkgs:
+                        found = True
+                        break
+                    if len(sp) == 2:
+                        arch = "%s" % item.arch()
+                        if name == sp[0] and arch == sp[1]:
+                            found = True
+                            if name not in self.packages:
+                                self.packages.append(name)
+                                item.status().setToBeInstalled (zypp.ResStatus.USER)
+                            break
+                    else:
+                        if name == sp[0]:
+                            found = True
+                            if name not in self.packages:
+                                self.packages.append(name)
+                                item.status().setToBeInstalled (zypp.ResStatus.USER)
+                            break
+                else:
+                    if name in self.incpkgs or self.excpkgs:
+                        found =  True
+                        continue
+                    if startx and name.endswith(sp[0][1:]):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+
+                    if endx and name.startswith(sp[0][:-1]):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+        if found:
+            return None
+        else:
+            e = CreatorError("Unable to find package: %s" % (pkg,))
+            return e
+
+    def deselectPackage(self, pkg):
+        """Deselect package.  Can be specified as name.arch or name*"""
+        
+        if not self.Z:
+            self.__initialize_zypp()
+        
+        startx = pkg.startswith("*")
+        endx = pkg.endswith("*")
+        ispattern = startx or endx
+        sp = pkg.rsplit(".", 2)
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "package":
+                name = "%s" % item.name()
+                if not ispattern:
+                    if len(sp) == 2:
+                        arch = "%s" % item.arch()
+                        if name == sp[0] and arch == sp[1]:
+                            if item.status().isToBeInstalled():
+                                item.status().resetTransact(zypp.ResStatus.USER)
+                            if name in self.packages:
+                                self.packages.remove(name)
+                            break
+                    else:
+                        if name == sp[0]:
+                            if item.status().isToBeInstalled():
+                                item.status().resetTransact(zypp.ResStatus.USER)
+                            if name in self.packages:
+                                self.packages.remove(name)
+                            break                             
+                else:
+                    if startx and name.endswith(sp[0][1:]):
+                        if item.status().isToBeInstalled():
+                            item.status().resetTransact(zypp.ResStatus.USER)
+                        if name in self.packages:
+                            self.packages.remove(name)
+
+                    if endx and name.startswith(sp[0][:-1]):
+                        if item.status().isToBeInstalled():
+                            item.status().resetTransact(zypp.ResStatus.USER)
+                        if name in self.packages:
+                            self.packages.remove(name)
+    
+    def __selectIncpkgs(self):        
+        found = False
+        for pkg in self.incpkgs:
+            for item in self.Z.pool():
+                kind = "%s" % item.kind()
+                if kind == "package":
+                    name = "%s" % item.name()
+                    repoalias = "%s" % item.repoInfo().alias()
+                    if name == pkg and repoalias.endswith("include"):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+                        break         
+        if not found:
+            raise CreatorError("Unable to find package: %s" % (pkg,))
+    
+    def __selectExcpkgs(self):    
+        found = False        
+        for pkg in self.excpkgs:
+            for item in self.Z.pool():
+                kind = "%s" % item.kind()
+                if kind == "package":
+                    name = "%s" % item.name()
+                    repoalias = "%s" % item.repoInfo().alias()
+                    if name == pkg and not repoalias.endswith("exclude"):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+                        break                     
+        if not found:
+            raise CreatorError("Unable to find package: %s" % (pkg,))
+
+        
+    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
+        if not self.Z:
+            self.__initialize_zypp()
+        found = False
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "pattern":
+                summary = "%s" % item.summary()
+                name = "%s" % item.name()
+                if name == grp or summary == grp:
+                    found = True
+                    if name not in self.patterns:
+                        self.patterns.append(name)
+                        item.status().setToBeInstalled (zypp.ResStatus.USER)
+                    break
+                
+        if found:
+            if include == pykickstart.parser.GROUP_REQUIRED:
+                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
+            elif include == pykickstart.parser.GROUP_ALL:
+                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
+            return None
+        else:
+            e = CreatorError("Unable to find pattern: %s" % (grp,))
+            return e
+
+    def __checkAndDownloadURL(self, u2opener, url, savepath):
+        try:
+            if u2opener:
+                f = u2opener.open(url)
+            else:
+                f = u2.urlopen(url)
+        except u2.HTTPError, httperror:
+            if httperror.code in (404, 503):
+                return None
+            else:
+                raise CreatorError(httperror)
+        except OSError, oserr:
+            if oserr.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except IOError, oserr:
+            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except u2.URLError, err:
+            raise CreatorError(err)
+
+        # save to file
+        licf = open(savepath, "w")
+        licf.write(f.read())
+        licf.close()
+        f.close()
+
+        return savepath
+
+    def __pagerFile(self, savepath):
+        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
+            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
+        else:
+            pagers = ('less', 'more')
+
+        file_showed = None
+        for pager in pagers:
+            try:
+                subprocess.call([pager, savepath])
+            except OSError:
+                continue
+            else:
+                file_showed = True
+                break
+        if not file_showed:
+            f = open(savepath)
+            print f.read()
+            f.close()
+            raw_input('press <ENTER> to continue...')
+
+    def checkRepositoryEULA(self, name, repo):
+        """ This function is to check the LICENSE file if provided. """
+
+        # when proxy needed, make urllib2 follow it
+        proxy = repo.proxy
+        proxy_username = repo.proxy_username
+        proxy_password = repo.proxy_password
+
+        handlers = []
+        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
+        u2opener = None
+        if proxy:
+            if proxy_username:
+                proxy_netloc = urlparse.urlsplit(proxy).netloc
+                if proxy_password:
+                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
+                else:
+                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
+            else:
+                proxy_url = proxy
+
+            proxy_support = u2.ProxyHandler({'http': proxy_url,
+                                             'ftp': proxy_url})
+            handlers.append(proxy_support)
+
+        # download all remote files to one temp dir
+        baseurl = None
+        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
+
+        for url in repo.baseurl:
+            if not url.endswith('/'):
+                url += '/'
+            tmphandlers = handlers
+            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+            if scheme not in ("http", "https", "ftp", "ftps", "file"):
+                raise CreatorError("Error: invalid url %s" % url)
+            if '@' in host:
+                try:
+                    user_pass, host = host.split('@', 1)
+                    if ':' in user_pass:
+                        user, password = user_pass.split(':', 1)
+                except ValueError, e:
+                    raise CreatorError('Bad URL: %s' % url)
+                print "adding HTTP auth: %s, %s" %(user, password)
+                auth_handler.add_password(None, host, user, password)
+                tmphandlers.append(auth_handler)
+                url = scheme + "://" + host + path + parm + query + frag
+            if len(tmphandlers) != 0:
+                u2opener = u2.build_opener(*tmphandlers)
+            # try to download
+            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
+            repo_eula_path = self.__checkAndDownloadURL(
+                                    u2opener,
+                                    repo_eula_url,
+                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
+            if repo_eula_path:
+                # found
+                baseurl = url
+                break
+
+        if not baseurl:
+            return True
+
+        # show the license file
+        print 'For the software packages in this yum repo:'
+        print '    %s: %s' % (name, baseurl)
+        print 'There is an "End User License Agreement" file that need to be checked.'
+        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
+        raw_input('press <ENTER> to continue...')
+
+        self.__pagerFile(repo_eula_path)
+
+        # Asking for the "Accept/Decline"
+        accept = True
+        while accept:
+            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
+            if input_accept.upper() in ('YES', 'Y'):
+                break
+            elif input_accept.upper() in ('NO', 'N'):
+                accept = None
+                print 'Will not install pkgs from this repo.'
+
+        if not accept:
+            #cleanup
+            shutil.rmtree(repo_lic_dir)
+            return None
+
+        # try to find support_info.html for extra infomation
+        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
+        repo_info_path = self.__checkAndDownloadURL(
+                                u2opener,
+                                repo_info_url,
+                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
+        if repo_info_path:
+            print 'There is one more file in the repo for additional support information, please read it'
+            raw_input('press <ENTER> to continue...')
+            self.__pagerFile(repo_info_path)
+
+        #cleanup
+        shutil.rmtree(repo_lic_dir)
+        return True
+
+    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
+        if not self.repo_manager:
+            self.__initialize_repo_manager()
+
+        repo = RepositoryStub()
+        repo.name = name
+        repo.id = name
+        repo.proxy = proxy
+        repo.proxy_username = proxy_username
+        repo.proxy_password = proxy_password
+        repo.baseurl.append(url)
+        repo_alias = repo.id
+        if inc:
+            repo_alias = name + "include"
+            self.incpkgs = inc
+        if exc:
+            repo_alias = name + "exclude"
+            self.excpkgs = exc
+
+        # check LICENSE files
+        if not self.checkRepositoryEULA(name, repo):
+            return None
+
+        if mirrorlist:
+            repo.mirrorlist = mirrorlist
+
+        # Enable gpg check for verifying corrupt packages
+        repo.gpgcheck = 1
+        self.repos.append(repo)
+
+
+        try:
+            repo_info = zypp.RepoInfo()
+            repo_info.setAlias(repo_alias)
+            repo_info.setName(repo.name)
+            repo_info.setEnabled(repo.enabled)
+            repo_info.setAutorefresh(repo.autorefresh)
+            repo_info.setKeepPackages(repo.keeppackages)
+            repo_info.addBaseUrl(zypp.Url(repo.baseurl[0]))
+            self.repo_manager.addRepository(repo_info)
+            self.__build_repo_cache(name)
+        except RuntimeError, e:
+            raise CreatorError("%s" % (e,))
+
+        return repo
+
+    def installHasFile(self, file):
+        return False
+
+    def runInstall(self, checksize = 0):
+        if self.incpkgs:
+            self.__selectIncpkgs()
+        if self.excpkgs:
+            self.__selectExcpkgs()
+        
+        os.environ["HOME"] = "/"
+        self.buildTransaction()
+
+        todo = zypp.GetResolvablesToInsDel(self.Z.pool())
+        installed_pkgs = todo._toInstall
+        dlpkgs = []
+        for item in installed_pkgs:
+            if not zypp.isKindPattern(item):
+                dlpkgs.append(item)
+
+        # record the total size of installed pkgs
+        pkgs_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs))
+
+        # check needed size before actually download and install
+        if checksize and pkgs_total_size > checksize:
+            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
+
+        if self.__recording_pkgs:
+            # record all pkg and the content
+            for pkg in dlpkgs:
+                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name(), pkg.edition(), pkg.arch())
+                self.__pkgs_content[pkg_long_name] = {} #TBD: to get file list
+
+        total_count = len(dlpkgs)
+        cached_count = 0
+        localpkgs = self.localpkgs.keys()
+        print "Checking packages cache and packages integrity..."
+        for po in dlpkgs:
+            """ Check if it is cached locally """
+            if po.name() in localpkgs:
+                cached_count += 1
+            else:
+                local = self.getLocalPkgPath(po)
+                if os.path.exists(local):
+                    if self.checkPkg(local) != 0:
+                        os.unlink(local)
+                    else:
+                        cached_count += 1
+        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
+        try:
+            print "downloading packages..."
+            self.downloadPkgs(dlpkgs)
+            self.installPkgs(dlpkgs)
+    
+        except RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except RpmError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def getAllContent(self):
+        return self.__pkgs_content
+
+    def __initialize_repo_manager(self):
+        if self.repo_manager:
+            return
+
+        """ Clean up repo metadata """
+        shutil.rmtree(self.creator.cachedir + "/var", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/etc", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/raw", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/solv", ignore_errors = True)
+        
+        zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE
+                                       | zypp.KeyRing.ACCEPT_VERIFICATION_FAILED
+                                       | zypp.KeyRing.ACCEPT_UNKNOWNKEY
+                                       | zypp.KeyRing.TRUST_KEY_TEMPORARILY
+                                     )
+        self.repo_manager_options = zypp.RepoManagerOptions(zypp.Pathname(self.creator._instroot))
+        self.repo_manager_options.knownReposPath = zypp.Pathname(self.creator.cachedir + "/etc/zypp/repos.d")
+        self.repo_manager_options.repoCachePath = zypp.Pathname(self.creator.cachedir + "/var/cache/zypp")
+        self.repo_manager_options.repoRawCachePath = zypp.Pathname(self.creator.cachedir + "/raw")
+        self.repo_manager_options.repoSolvCachePath = zypp.Pathname(self.creator.cachedir + "/solv")
+        self.repo_manager_options.repoPackagesCachePath = zypp.Pathname(self.creator.cachedir + "/packages")
+        
+        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
+
+
+    def __build_repo_cache(self, name):
+        repos = self.repo_manager.knownRepositories()
+        for repo in repos:
+            if not repo.enabled():
+                continue
+            reponame = "%s" % repo.name()
+            if reponame != name:
+                continue
+            if self.repo_manager.isCached( repo ):
+                return
+            #print "Retrieving repo metadata from %s ..." % repo.url()
+            self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
+
+
+    def __initialize_zypp(self):
+        if self.Z:
+            return
+
+        zconfig = zypp.ZConfig_instance()
+
+        """ Set system architecture """
+        if self.creator.target_arch and self.creator.target_arch.startswith("arm"):
+            arches = ["armv7l", "armv7nhl", "armv7hl"]
+            if self.creator.target_arch not in arches:
+                raise CreatorError("Invalid architecture: %s" % self.creator.target_arch)
+            arch_map = {}
+            if self.creator.target_arch == "armv7l":
+                arch_map["armv7l"] = zypp.Arch_armv7l()
+            elif self.creator.target_arch == "armv7nhl":
+                arch_map["armv7nhl"] = zypp.Arch_armv7nhl()
+            elif self.creator.target_arch == "armv7hl":
+                arch_map["armv7hl"] = zypp.Arch_armv7hl() 
+            zconfig.setSystemArchitecture(arch_map[self.creator.target_arch])
+
+        print "zypp architecture: %s" % zconfig.systemArchitecture()
+
+        """ repoPackagesCachePath is corrected by this """
+        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
+        repos = self.repo_manager.knownRepositories()
+        for repo in repos:
+            if not repo.enabled():
+                continue
+            if not self.repo_manager.isCached( repo ):
+                print "Retrieving repo metadata from %s ..." % repo.url()
+                self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
+            else:
+                self.repo_manager.refreshMetadata(repo, zypp.RepoManager.BuildIfNeeded)
+            self.repo_manager.loadFromCache( repo );
+
+        self.Z = zypp.ZYppFactory_instance().getZYpp()
+        self.Z.initializeTarget( zypp.Pathname(self.creator._instroot) )
+        self.Z.target().load();
+
+
+    def buildTransaction(self):
+        if not self.Z.resolver().resolvePool():
+            print "Problem count: %d" % len(self.Z.resolver().problems())
+            for problem in self.Z.resolver().problems():
+                print "Problem: %s, %s" % (problem.description().decode("utf-8"), problem.details().decode("utf-8"))
+
+    def getLocalPkgPath(self, po):
+        repoinfo = po.repoInfo()
+        name = po.name()
+        cacheroot = repoinfo.packagesPath()
+        arch =  po.arch()
+        edition = po.edition()
+        version = "%s-%s" % (edition.version(), edition.release())
+        pkgpath = "%s/%s/%s-%s.%s.rpm" % (cacheroot, arch, name, version, arch)
+        return pkgpath
+
+    def installLocal(self, pkg, po=None, updateonly=False):
+        if not self.ts:
+            self.__initialize_transaction()
+        pkgname = self.__get_pkg_name(pkg)
+        self.localpkgs[pkgname] = pkg
+        self.selectPackage(pkgname)
+
+    def __get_pkg_name(self, pkgpath):
+        h = readRpmHeader(self.ts, pkgpath)
+        return h["name"]
+
+    def downloadPkgs(self, package_objects):
+        localpkgs = self.localpkgs.keys()
+        for po in package_objects:
+            if po.name() in localpkgs:
+                continue
+            filename = self.getLocalPkgPath(po)
+            if os.path.exists(filename):
+                if self.checkPkg(filename) == 0:
+                    continue
+            dir = os.path.dirname(filename)
+            if not os.path.exists(dir):
+                makedirs(dir)
+            baseurl = po.repoInfo().baseUrls()[0].__str__()
+            proxy = self.get_proxy(po.repoInfo())
+            proxies = {}
+            if proxy:
+                proxies = {str(proxy.split(":")[0]):str(proxy)}
+          
+            location = zypp.asKindPackage(po).location()
+            location = location.filename().__str__()
+            if location.startswith("./"):
+                location = location[2:]
+            url = baseurl + "/%s" % location
+            try:
+                filename = myurlgrab(url, filename, proxies)
+            except CreatorError, e:
+                self.close()
+                raise CreatorError("%s" % e)
+
+    def installPkgs(self, package_objects):
+        if not self.ts:
+            self.__initialize_transaction()
+
+        """ Set filters """
+        probfilter = 0
+        for flag in self.probFilterFlags:
+            probfilter |= flag
+        self.ts.setProbFilter(probfilter)
+
+        localpkgs = self.localpkgs.keys()
+        for po in package_objects:
+            pkgname = po.name()
+            if pkgname in localpkgs:
+                rpmpath = self.localpkgs[pkgname]
+            else:
+                rpmpath = self.getLocalPkgPath(po)
+            if not os.path.exists(rpmpath):
+                """ Maybe it is a local repo """
+                baseurl = po.repoInfo().baseUrls()[0].__str__()
+                baseurl = baseurl.strip()
+                if baseurl.startswith("file:/"):
+                    rpmpath = baseurl[5:] + "/%s/%s" % (po.arch(), os.path.basename(rpmpath))
+            if not os.path.exists(rpmpath):
+                raise RpmError("Error: %s doesn't exist" % rpmpath)
+            h = readRpmHeader(self.ts, rpmpath)
+            self.ts.addInstall(h, rpmpath, 'u')
+
+        unresolved_dependencies = self.ts.check()
+        if not unresolved_dependencies:
+            self.ts.order()
+            cb = RPMInstallCallback(self.ts)
+            self.ts.run(cb.callback, '')
+            self.ts.closeDB()
+            self.ts = None
+        else:
+            print unresolved_dependencies
+            raise RepoError("Error: Unresolved dependencies, transaction failed.")
+
+    def __initialize_transaction(self):
+        if not self.ts:
+            self.ts = rpm.TransactionSet(self.creator._instroot)
+            # Set to not verify DSA signatures.
+            self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
+
+    def checkPkg(self, pkg):
+        ret = 1
+        if not os.path.exists(pkg):
+            return ret
+        ret = checkRpmIntegrity(self.bin_rpm, pkg)
+        if ret != 0:
+            print "Package %s is damaged: %s" % (os.path.basename(pkg), pkg)
+        return ret
+
+    def zypp_install(self):
+        policy = zypp.ZYppCommitPolicy()
+        policy.downloadMode(zypp.DownloadInAdvance)
+        policy.dryRun( False )
+        policy.syncPoolAfterCommit( False )
+        result = self.Z.commit( policy )
+        print result
+
+    def _add_prob_flags(self, *flags):
+        for flag in flags:
+           if flag not in self.probFilterFlags:
+               self.probFilterFlags.append(flag)
+
+    def get_proxy(self, repoinfo):
+        proxy = None
+        reponame = "%s" % repoinfo.name()
+        for repo in self.repos:
+            if repo.name == reponame:
+                proxy = repo.proxy
+                break
+        if proxy:
+            return proxy
+        else:
+            repourl = repoinfo.baseUrls()[0].__str__()
+            return get_proxy(repourl)
+
+mic_plugin = ["zypp", Zypp]
+
similarity index 100%
rename from plugins/hook/_hook.py
rename to plugins/hook/.py
index f90b94a..e40e1fd 100644 (file)
@@ -1,47 +1,83 @@
 #!/usr/bin/python
+import sys
+import subprocess
+import logging
 
 from micng.pluginbase.imager_plugin import ImagerPlugin
-from micng.imager.fs import *
+import micng.utils.cmdln as cmdln
+import micng.utils.errors as errors
 import micng.configmgr as configmgr
-try:
-    import argparse
-except:
-    import micng.utils.argparse
+import micng.pluginmgr as pluginmgr
+import micng.imager.fs as fs
+import micng.chroot as chroot
+
 
 class FsPlugin(ImagerPlugin):
-    """hello livecd
-    """
     @classmethod
-    def do_options(self, parser):
-        parser.add_argument('ksfile', nargs='?', help='kickstart file')
-        parser.add_argument('--release', help='fs options test')
+    @cmdln.option("--include-src", dest="include_src", help="include source pakcage")
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: create fs image
 
-    @classmethod
-    def do_create(self, args):
-        if args.release:
-            print "fs option release: ", args.release
-        if not args.ksfile:
-            print "please specify a kickstart file"
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+        if len(args) == 0:
             return
-#        print "ksfile", args.ksfile
-        self.configmgr = configmgr.getConfigMgr()
-        self.configmgr.setProperty('ksfile', args.ksfile)
-#        print "ksfile", self.configmgr.getProperty('ksfile')
-        self.ks = self.configmgr.getProperty('kickstart')
-        self.name = self.configmgr.getProperty('name')
-        fs = FsImageCreator(self.ks, self.name)
-        try:
-            fs.outdir = self.configmgr.getProperty('outdir')
-            fs.mount(None, self.configmgr.cache)
-            fs.install()
-            fs.configure(self.configmgr.repometadata)
-            fs.unmount()
-            fs.package(self.configmgr.outdir)
-            print "Finished"
-        except Exception, e:
-            print "failed to create image: %s" % e
-        finally:
-            fs.cleanup()
+        if len(args) == 1:
+            ksconf = args[0]
+        else:
+            raise errors.Usage("Extra arguments given")
 
+        cfgmgr = configmgr.getConfigMgr()
+        createopts = cfgmgr.create
+        cfgmgr.setProperty("ksconf", ksconf)
 
+        plgmgr = pluginmgr.PluginMgr()
+        plgmgr.loadPlugins()
+        for (key, pcls) in plgmgr.getBackendPlugins():
+            if key == createopts['pkgmgr']:
+                pkgmgr = pcls
+        if not pkgmgr:
+            #logging.warn("Can't find backend plugin: %s" % createopts['pkgmgr'])
+            raise CreatorError("Can't find backend plugin: %s" % createopts['pkgmgr'])
+            # try other backend plugin
+            #try:
+            #except:    
+            #    raise CreatorError("None backend found, please check it")
+
+        creator = fs.FsImageCreator(createopts, pkgmgr)
+        try:
+            creator.check_depend_tools()
+            creator.mount(None, createopts["cachedir"])  
+            creator.install()
+            #Download the source packages ###private options
+            if opts.include_src:
+                installed_pkgs =  creator.get_installed_packages()
+                print '--------------------------------------------------'
+                print 'Generating the image with source rpms included, The number of source packages is %d.' %(len(installed_pkgs))
+                if not misc.SrcpkgsDownload(installed_pkgs, createopts["repomd"], creator._instroot, createopts["cachedir"]):
+                    print "Source packages can't be downloaded"
+    
+            creator.configure(createopts["repomd"])
+            creator.unmount()
+            creator.package(createopts["outdir"])
+            outimage = creator.outimage
+            creator.print_outimage_info()
+        except errors.CreatorError, e:
+            raise errors.CreatorError("failed to create image : %s" % e)
+        finally:
+            creator.cleanup()
+            print "Finished."
+        return 0    
+           
+    @classmethod
+    def do_chroot(self, target):#chroot.py parse opts&args
+            try:
+                chroot.chroot(target, None, "/bin/env HOME=/root /bin/bash")
+            except:
+                print >> sys.stderr, "Failed to chroot to %s." % target
+            finally:
+                chroot.cleanup_after_chroot("dir", None, None, None)
+                return 1
+            
 mic_plugin = ["fs", FsPlugin]
index 9edddb8..1ab28ce 100644 (file)
 #!/usr/bin/python
+import os.path
+import sys
+import subprocess
+import logging
+import shutil
+
 from micng.pluginbase.imager_plugin import ImagerPlugin
-import micng.imager as imager
-import micng.configmgr as cfgmgr
-import micng.utils as utils
+import micng.chroot as chroot
+import micng.utils.misc as misc
+import micng.utils.fs_related as fs_related
 import micng.utils.cmdln as cmdln
-import os, time
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+import micng.imager.livecd as livecd
+from micng.utils.errors import *
 
-class LivecdPlugin(ImagerPlugin):
-    @classmethod
-    def do_options(self, parser):
-        parser.add_argument("-vid", "--volumeid", type="string", default=None, help="Specify volume id")
-        parser.add_argument("ksfile", help="kickstart file")
+class LiveCDPlugin(ImagerPlugin):
 
     @classmethod
-    def do_create(self, args):
-        if not args.ksfile:
-            print "please specify kickstart file"
-            return
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: create livecd image
 
-        self.configmgr = cfgmgr.getConfigMgr()
-        self.configmgr.setProperty('ksfile', args.ksfile)
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+        if len(args) == 0:
+            return
+        if len(args) == 1:
+            ksconf = args[0]
+        else:
+            raise errors.Usage("Extra arguments given")
 
-        fs_label = utils.kickstart.build_name(
-                     args.ksfile,
-                     "%s-" % self.configmgr.name,
-                     maxlen = 32,
-                     suffix = "%s-%s" %(os.uname()[4], time.strftime("%Y%m%d%H%M")))
-        
-        creator = imager.livecd.LivecdImageCreator(
-                    self.configmgr.kickstart, self.configmgr.name, fs_label)
-        
-        creator.skip_compression = False
-        creator.skip_minimize = False
-            
-        creator.tmpdir = self.configmgr.tmpdir
-        creator._alt_initrd_name = None
-        creator._recording_pkgs = None
-        creator._include_src = False
-        creator._local_pkgs_path = None
-        creator._genchecksum = False
-        creator.distro_name = self.configmgr.name
-        creator.image_format = "livecd"
-    
+        cfgmgr = configmgr.getConfigMgr()
+        cfgmgr.setProperty("ksconf", ksconf)
+        creatoropts = cfgmgr.create
+        plgmgr = pluginmgr.PluginMgr()
+        plgmgr.loadPlugins()
         
-        utils.kickstart.resolve_groups(creator, self.configmgr.repometadata, False)
-    
-        imgname = creator.name
-            
+        for (key, pcls) in plgmgr.getBackendPlugins():
+            if key == creatoropts['pkgmgr']:
+                pkgmgr = pcls
+
+        if not pkgmgr:
+            raise CreatorError("Can't find backend %s" % pkgmgr)
+
+        creator = livecd.LiveCDImageCreator(creatoropts, pkgmgr)
         try:
             creator.check_depend_tools()
-            creator.mount(None, self.configmgr.cache)
+            creator.mount(None, creatoropts["cachedir"])
             creator.install()
-    
-            creator.configure(self.configmgr.repometadata)
+            creator.configure(creatoropts["repomd"])
             creator.unmount()
-            creator.package(self.configmgr.outdir)
+            creator.package(creatoropts["outdir"])
             outimage = creator.outimage
-                
-            creator.package_output("livecd", self.configmgr.outdir, "none")
             creator.print_outimage_info()
             outimage = creator.outimage
-            
-        except Exception, e:
-            raise Exception("failed to create image : %s" % e)
+        except CreatorError, e:
+            raise CreatorError("failed to create image : %s" % e)
         finally:
             creator.cleanup()
-    
-        print "Finished."        
+#        if not creatoropts["image_info"]:
+            print "Finished."
+        return 0
+
+    @classmethod
+    def do_chroot(cls, target):
+        img = target
+        imgmnt = misc.mkdtemp()
+        imgloop = fs_related.DiskMount(fs_related.LoopbackDisk(img, 0), imgmnt)
+        try:
+            imgloop.mount()
+        except MountError, e:
+            imgloop.cleanup()
+            raise CreatorError("Failed to loopback mount '%s' : %s" %(img, e))
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(imgmnt + "/squashfs.img"):
+            squashimg = imgmnt + "/squashfs.img"
+        else:
+            squashimg = imgmnt + "/LiveOS/squashfs.img"
+
+        tmpoutdir = misc.mkdtemp()
+        # unsquashfs requires outdir mustn't exist
+        shutil.rmtree(tmpoutdir, ignore_errors = True)
+        misc.uncompress_squashfs(squashimg, tmpoutdir)
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(tmpoutdir + "/os.img"):
+            os_image = tmpoutdir + "/os.img"
+        else:
+            os_image = tmpoutdir + "/LiveOS/ext3fs.img"
+
+        if not os.path.exists(os_image):
+            imgloop.cleanup()
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            shutil.rmtree(imgmnt, ignore_errors = True)
+            raise CreatorError("'%s' is not a valid live CD ISO : neither "
+                               "LiveOS/ext3fs.img nor os.img exist" %img)
+
+        #unpack image to target dir
+        imgsize = misc.get_file_size(os_image) * 1024L * 1024L
+        extmnt = misc.mkdtemp()
+        tfstype = "ext3"
+        tlabel = "ext3 label"
+        MyDiskMount = fs_related.ExtDiskMount
+        #if misc.fstype_is_btrfs(os_image):
+        #    tfstype = "btrfs"
+        #    tlabel = "btrfs label"
+        #    MyDiskMount = fs_related.BtrfsDiskMount
+        extloop = MyDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize),
+                                              extmnt,
+                                              tfstype,
+                                              4096,
+                                              tlabel)
+        try:
+            extloop.mount()
+        except MountError, e:
+            extloop.cleanup()
+            shutil.rmtree(extmnt, ignore_errors = True)
+            imgloop.cleanup()
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            shutil.rmtree(imgmnt, ignore_errors = True)
+            raise CreatorError("Failed to loopback mount '%s' : %s" %(os_image, e))
+        try:
+            chroot.chroot(extmnt, None,  "/bin/env HOME=/root /bin/bash")
+        except:
+            print >> sys.stderr, "Failed to chroot to %s." % img
+        finally:
+            chroot.cleanup_after_chroot("img",extloop,None,None)
+            return 1
+        
+    def do_pack(self):              
+        def __mkinitrd(instance):
+            kernelver = instance._get_kernel_versions().values()[0][0]
+            args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ]
+            try:
+                subprocess.call(args, preexec_fn = instance._chroot)
+            except OSError, (err, msg):
+               raise CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg)
+                   
+        def __run_post_cleanups(instance):
+            kernelver = instance._get_kernel_versions().values()[0][0]
+            args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver]
+            try:
+                subprocess.call(args, preexec_fn = instance._chroot)
+            except OSError, (err, msg):
+               raise CreatorError("Failed to run post cleanups: %s" % msg)
+               
+        __mkinitrd(convertor)
+        convertor._create_bootconfig()
+        __run_post_cleanups(convertor)
+        convertor.unmount()
+        convertor.package()
+        convertor.print_outimage_info()
+            
+    def do_unpack(self):
+        convertoropts = configmgr.getConfigMgr().convert
+        convertor = convertoropts["convertor"](convertoropts)        #consistent with destfmt
+        srcimgsize = (misc.get_file_size(convertoropts["srcimg"])) * 1024L * 1024L
+        convertor._set_fstype("ext3")
+        convertor._set_image_size(srcimgsize)
+        base_on = convertoropts["srcimg"]
+        convertor.check_depend_tools()
+        convertor.mount(base_on, None)
+        return convertor
 
-mic_plugin = ["livecd", LivecdPlugin]
+mic_plugin = ["livecd", LiveCDPlugin]
diff --git a/plugins/imager/liveusb_plugin.py b/plugins/imager/liveusb_plugin.py
new file mode 100644 (file)
index 0000000..ecc77b5
--- /dev/null
@@ -0,0 +1,182 @@
+#!/usr/bin/python
+import os.path
+import sys
+import subprocess
+import logging
+import shutil
+
+from micng.pluginbase.imager_plugin import ImagerPlugin
+import micng.imager.liveusb as liveusb
+import micng.utils.misc as misc
+import micng.utils.fs_related as fs_related
+import micng.utils.cmdln as cmdln
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+from micng.utils.partitionedfs import PartitionedMount
+from micng.utils.errors import *
+import micng.chroot as chroot
+
+class LiveUSBPlugin(ImagerPlugin):
+    #@cmdln.option
+    @classmethod
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: create livecd image
+
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+        if len(args) == 0:
+            return
+        if len(args) == 1:
+            ksconf = args[0]
+        else:
+            raise errors.Usage("Extra arguments given")
+
+
+        cfgmgr = configmgr.getConfigMgr()
+        creatoropts = cfgmgr.create
+        cfgmgr.setProperty("ksconf", args[0])
+        plgmgr = pluginmgr.PluginMgr()
+        plgmgr.loadPlugins()       
+
+        for (key, pcls) in plgmgr.getBackendPlugins():
+            if key == creatoropts['pkgmgr']:
+                pkgmgr = pcls
+    
+        creator = liveusb.LiveUSBImageCreator(creatoropts, pkgmgr)
+        try:
+            creator.check_depend_tools()
+            creator.mount(None, creatoropts["cachedir"])
+            creator.install()
+            creator.configure(creatoropts["repomd"])
+            creator.unmount()
+            creator.package(creatoropts["outdir"])
+            outimage = creator.outimage
+            creator.print_outimage_info()
+            outimage = creator.outimage
+        except CreatorError, e:
+            logging.exception(e)
+            raise CreatorError("failed to create image : %s" % e)
+        finally:
+            creator.cleanup()
+            print "Finished."
+        return 0
+
+    @classmethod    
+    def do_chroot(cls, target):
+        img = target
+        imgsize = misc.get_file_size(img) * 1024L * 1024L
+        imgmnt = misc.mkdtemp()
+        disk = fs_related.SparseLoopbackDisk(img, imgsize)
+        imgloop = PartitionedMount({'/dev/sdb':disk}, imgmnt, skipformat = True)
+        imgloop.add_partition(imgsize/1024/1024, "/dev/sdb", "/", "vfat", boot=False)
+        try:
+            imgloop.mount()
+        except MountError, e:
+            imgloop.cleanup()
+            raise CreatorError("Failed to loopback mount '%s' : %s" %(img, e))
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(imgmnt + "/squashfs.img"):
+            squashimg = imgmnt + "/squashfs.img"
+        else:
+            squashimg = imgmnt + "/LiveOS/squashfs.img"
+
+        tmpoutdir = misc.mkdtemp()
+        # unsquashfs requires outdir mustn't exist
+        shutil.rmtree(tmpoutdir, ignore_errors = True)
+        misc.uncompress_squashfs(squashimg, tmpoutdir)
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(tmpoutdir + "/os.img"):
+            os_image = tmpoutdir + "/os.img"
+        else:
+            os_image = tmpoutdir + "/LiveOS/ext3fs.img"
+
+        if not os.path.exists(os_image):
+            imgloop.cleanup()
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            shutil.rmtree(imgmnt, ignore_errors = True)
+            raise CreatorError("'%s' is not a valid live CD ISO : neither "
+                               "LiveOS/ext3fs.img nor os.img exist" %img)
+
+        #unpack image to target dir
+        imgsize = misc.get_file_size(os_image) * 1024L * 1024L
+        extmnt = misc.mkdtemp()
+        tfstype = "ext3"
+        tlabel = "ext3 label"
+        MyDiskMount = fs_related.ExtDiskMount
+        #if imgcreate.fstype_is_btrfs(os_image):
+        #    tfstype = "btrfs"
+        #    tlabel = "btrfs label"
+        #    MyDiskMount = fs_related.BtrfsDiskMount
+        extloop = MyDiskMount(fs_related.SparseLoopbackDisk(os_image, imgsize),
+                                              extmnt,
+                                              tfstype,
+                                              4096,
+                                              tlabel)
+        try:
+            extloop.mount()
+        except MountError, e:
+            extloop.cleanup()
+            shutil.rmtree(extmnt, ignore_errors = True)
+            imgloop.cleanup()
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            shutil.rmtree(imgmnt, ignore_errors = True)
+            raise CreatorError("Failed to loopback mount '%s' : %s" %(os_image, e))
+        try:
+            chroot.chroot(extmnt, None,  "/bin/env HOME=/root /bin/bash")
+        except:
+            chroot.cleanup_after_chroot("img", extloop, None, None)
+            print >> sys.stderr, "Failed to chroot to %s." % img
+            return 1
+    
+    @classmethod
+    def do_pack(cls, base_on):              
+        def __mkinitrd(instance):
+            kernelver = instance._get_kernel_versions().values()[0][0]
+            args = [ "/usr/libexec/mkliveinitrd", "/boot/initrd-%s.img" % kernelver, "%s" % kernelver ]
+            try:
+                subprocess.call(args, preexec_fn = instance._chroot)
+            except OSError, (err, msg):
+               raise CreatorError("Failed to execute /usr/libexec/mkliveinitrd: %s" % msg)
+                   
+        def __run_post_cleanups(instance):
+            kernelver = instance._get_kernel_versions().values()[0][0]
+            args = ["rm", "-f", "/boot/initrd-%s.img" % kernelver]
+            try:
+                subprocess.call(args, preexec_fn = instance._chroot)
+            except OSError, (err, msg):
+               raise CreatorError("Failed to run post cleanups: %s" % msg)
+        
+        convertoropts = configmgr.getConfigMgr().convert
+        convertoropts["ks"] = None
+        convertor = liveusb.LiveUSBImageCreator(convertoropts)        #consistent with destfmt
+        srcimgsize = (misc.get_file_size(base_on)) * 1024L * 1024L
+        convertor._set_fstype("ext3")
+        convertor._set_image_size(srcimgsize)
+        convertor._image = base_on
+        #convertor.check_depend_tools()
+        __mkinitrd(convertor)
+        convertor._create_bootconfig()
+        __run_post_cleanups(convertor)
+        convertor.unmount()
+        convertor.package()
+        #convertor.print_outimage_info()
+    
+    @classmethod        
+    def do_unpack(cls, srcimg):
+        convertoropts = configmgr.getConfigMgr().convert
+        convertoropts["ks"] = None
+        convertor = liveusb.LiveUSBImageCreator(convertoropts)        #consistent with destfmt
+        srcimgsize = (misc.get_file_size(srcimg)) * 1024L * 1024L
+        convertor._srcfmt = 'livecd'
+        convertor._set_fstype("ext3")
+        convertor._set_image_size(srcimgsize)
+        #convertor.check_depend_tools()
+        convertor.mount(srcimg, None)
+
+        return convertor._image, convertor._instroot
+
+mic_plugin = ["liveusb", LiveUSBPlugin]
+
diff --git a/plugins/imager/loop_plugin.py b/plugins/imager/loop_plugin.py
new file mode 100644 (file)
index 0000000..0db0938
--- /dev/null
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+import sys
+import subprocess
+import logging
+import shutil
+
+from micng.pluginbase.imager_plugin import ImagerPlugin
+import micng.utils.misc as misc
+import micng.utils.cmdln as cmdln
+import micng.utils.fs_related as fs_related
+from micng.utils.errors import * 
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+import micng.imager.loop as loop
+import micng.chroot as chroot
+
+class LoopPlugin(ImagerPlugin):
+    @classmethod
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: create fs image
+
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+        if len(args) == 0:
+            return
+        if len(args) == 1:
+            ksconf = args[0]
+        else:
+            raise errors.Usage("Extra arguments given")
+
+        cfgmgr = configmgr.getConfigMgr()
+        creatoropts = cfgmgr.create
+        cfgmgr.setProperty("ksconf", ksconf)
+
+        plgmgr = pluginmgr.PluginMgr()
+        plgmgr.loadPlugins()
+        
+        for (key, pcls) in plgmgr.getBackendPlugins():
+            if key == creatoropts['pkgmgr']:
+                pkgmgr = pcls
+
+        if not pkgmgr:
+            raise CreatorError("Can't find backend %s" % pkgmgr)
+
+        creator = loop.LoopImageCreator(creatoropts, pkgmgr)
+        try:
+            creator.check_depend_tools()
+            creator.mount(None, creatoropts["cachedir"])  
+            creator.install()
+            creator.configure(creatoropts["repomd"])
+            creator.unmount()
+            creator.package(creatoropts["outdir"])
+        except CreatorError, e:
+            raise CreatorError("failed to create image : %s" % e)
+        finally:
+            creator.cleanup()
+        print "Finished."
+        return 0    
+           
+    @classmethod
+    def do_chroot(cls, target):#chroot.py parse opts&args
+        #import pdb
+        #pdb.set_trace()
+        img = target
+        imgsize = misc.get_file_size(img)
+        extmnt = misc.mkdtemp()
+        extloop = fs_related.ExtDiskMount(fs_related.SparseLoopbackDisk(img, imgsize),
+                                                         extmnt,
+                                                         "ext3",
+                                                         4096,
+                                                         "ext3 label")
+        try:
+            extloop.mount()
+            #os_image = img
+        except MountError, e:
+            extloop.cleanup()
+            shutil.rmtree(extmnt, ignore_errors = True)
+            raise CreatorError("Failed to loopback mount '%s' : %s" %(img, e))
+        try:
+            chroot.chroot(extmnt, None,  "/bin/env HOME=/root /bin/bash")
+        except:
+            chroot.cleanup_after_chroot("img", extloop, None, None)
+            print >> sys.stderr, "Failed to chroot to %s." % extloop
+            return 1
+        
+mic_plugin = ["loop", LoopPlugin]
+
diff --git a/plugins/imager/raw_plugin.py b/plugins/imager/raw_plugin.py
new file mode 100644 (file)
index 0000000..5b0f20a
--- /dev/null
@@ -0,0 +1,171 @@
+#!/usr/bin/python
+import os.path
+import sys
+import subprocess
+import logging
+import shutil
+import re
+
+from micng.pluginbase.imager_plugin import ImagerPlugin
+import micng.utils.misc as misc
+import micng.utils.fs_related as fs_related
+import micng.utils.cmdln as cmdln
+from micng.utils.errors import *
+from micng.utils.partitionedfs import PartitionedMount
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+import micng.imager.raw as raw
+import micng.chroot as chroot
+
+class RawPlugin(ImagerPlugin):
+
+    @classmethod
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: create fs image
+
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+        if len(args) == 0:
+            return
+        if len(args) == 1:
+            ksconf = args[0]
+        else:
+            raise errors.Usage("Extra arguments given")
+
+        cfgmgr = configmgr.getConfigMgr()
+        creatoropts = cfgmgr.create
+        cfgmgr.setProperty("ksconf", ksconf)
+        plgmgr = pluginmgr.PluginMgr()
+        plgmgr.loadPlugins()
+        
+        for (key, pcls) in plgmgr.getBackendPlugins():
+            if key == creatoropts['pkgmgr']:
+                pkgmgr = pcls
+
+        if not pkgmgr:
+            raise CreatorError("Can't find backend %s" % pkgmgr)
+
+        creator = raw.RawImageCreator(creatoropts, pkgmgr)
+        try:
+            creator.check_depend_tools()
+            creator.mount(None, creatoropts["cachedir"])
+            creator.install()
+            creator.configure(creatoropts["repomd"])
+            creator.unmount()
+            creator.package(creatoropts["outdir"])
+            outimage = creator.outimage
+            creator.print_outimage_info()
+            outimage = creator.outimage
+        except CreatorError, e:
+            raise CreatorError("failed to create image : %s" % e)
+        finally:
+            creator.cleanup()
+            print "Finished."
+        return 0
+    
+    @classmethod    
+    def do_chroot(cls, target):
+        img = target
+        imgsize = misc.get_file_size(img) * 1024L * 1024L
+        partedcmd = fs_related.find_binary_path("parted")
+        disk = fs_related.SparseLoopbackDisk(img, imgsize)
+        extmnt = misc.mkdtemp()
+        tmpoutdir = misc.mkdtemp()
+        imgloop = PartitionedMount({'/dev/sdb':disk}, extmnt, skipformat = True)
+        img_fstype = "ext3"
+        extloop = None
+        
+        # Check the partitions from raw disk.
+        p1 = subprocess.Popen([partedcmd,"-s",img,"unit","B","print"],
+                              stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+        out,err = p1.communicate()
+        lines = out.strip().split("\n")
+        
+        root_mounted = False
+        partition_mounts = 0
+
+        for line in lines:
+            line = line.strip()
+            # Lines that start with number are the partitions,
+            # because parted can be translated we can't refer to any text lines.
+            if not line or not line[0].isdigit():
+                continue
+            
+            # Some vars have extra , as list seperator.
+            line = line.replace(",","")
+            
+            # Example of parted output lines that are handled:
+            # Number  Start        End          Size         Type     File system     Flags
+            #  1      512B         3400000511B  3400000000B  primary
+            #  2      3400531968B  3656384511B  255852544B   primary  linux-swap(v1)
+            #  3      3656384512B  3720347647B  63963136B    primary  fat16           boot, lba
+
+            partition_info = re.split("\s+",line)
+
+            size = partition_info[3].split("B")[0]
+
+            if len(partition_info) < 6 or partition_info[5] in ["boot"]:
+                # No filesystem can be found from partition line. Assuming
+                # btrfs, because that is the only MeeGo fs that parted does 
+                # not recognize properly.
+                # TODO: Can we make better assumption?
+                fstype = "btrfs"
+            elif partition_info[5] in ["ext2","ext3","ext4","btrfs"]:
+                fstype = partition_info[5]
+            elif partition_info[5] in ["fat16","fat32"]:
+                fstype = "vfat"
+            elif "swap" in partition_info[5]:
+                fstype = "swap"
+            else:
+                raise CreatorError("Could not recognize partition fs type '%s'." % partition_info[5])
+
+            if not root_mounted and fstype in ["ext2","ext3","ext4","btrfs"]:
+                # TODO: Check that this is actually the valid root partition from /etc/fstab
+                mountpoint = "/"
+                root_mounted = True
+            elif fstype == "swap":
+                mountpoint = "swap"
+            else:
+                # TODO: Assing better mount points for the rest of the partitions.
+                partition_mounts += 1
+                mountpoint = "/media/partition_%d" % partition_mounts
+
+            if "boot" in partition_info:
+                boot = True
+            else:
+                boot = False
+            
+            print "Size: %s Bytes, fstype: %s, mountpoint: %s, boot: %s" % ( size, fstype, mountpoint, boot )
+            # TODO: add_partition should take bytes as size parameter.
+            imgloop.add_partition((int)(size)/1024/1024, "/dev/sdb", mountpoint, fstype = fstype, boot = boot)
+        
+        try:
+            imgloop.mount()
+            os_image = img
+        except MountError, e:
+            imgloop.cleanup()
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (img, e))
+
+        try:
+            chroot.chroot(extmnt, None,  "/bin/env HOME=/root /bin/bash")
+        except:
+            chroot.cleanup_after_chroot("img", imgloop, None, None)
+            print >> sys.stderr, "Failed to chroot to %s." % img
+            return 1
+            
+    def do_unpack(self):
+        convertoropts = configmgr.getConfigMgr().convert
+        convertor = convertoropts["convertor"](convertoropts)        #consistent with destfmt
+        srcimgsize = (misc.get_file_size(convertoropts["srcimg"])) * 1024L * 1024L
+        convertor._set_fstype("ext3")
+        convertor._set_image_size(srcimgsize)
+        srcloop = RawImageCreator._mount_srcimg(convertoropts["srcimg"])
+        base_on = srcloop.partitions[0]['device']
+        convertor.check_depend_tools()
+        convertor.mount(base_on, None)
+        return convertor
+
+mic_plugin = ["raw", RawPlugin]
+
index 3e1a699..157116c 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,7 @@
 #!/usr/bin/env python
 
 import os, sys
+import glob
 from distutils.core import setup
 #try:
 #    import setuptools
@@ -32,7 +33,6 @@ except IOError:
 PACKAGES = [MOD_NAME,
             MOD_NAME + '/utils',
             MOD_NAME + '/utils/kscommands',
-            MOD_NAME + '/utils/pkgmanagers',
             MOD_NAME + '/imager',
             MOD_NAME + '/pluginbase',
            ]
@@ -44,8 +44,15 @@ setup(name=MOD_NAME,
       url='https://meego.gitorious.org/meego-developer-tools/image-creator',
       scripts=[
           'tools/micng',
-          'tools/mic-image-create',
           ],
       packages = PACKAGES,
+      data_files = [("/usr/lib/micng/plugins/imager", ["plugins/imager/fs_plugin.py",
+                                                "plugins/imager/livecd_plugin.py",
+                                                "plugins/imager/liveusb_plugin.py",
+                                                "plugins/imager/loop_plugin.py",
+                                                "plugins/imager/raw_plugin.py"]),
+                    ("/usr/lib/micng/plugins/backend", ["plugins/backend/zypppkgmgr.py",
+                                                "plugins/backend/yumpkgmgr.py"]),
+                    ("/etc/micng", ["distfiles/micng.conf"])]
 )
 
diff --git a/tests/addcase.sh b/tests/addcase.sh
new file mode 100755 (executable)
index 0000000..2ea1f22
--- /dev/null
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+if [ $# -ne 2 ]; then
+  echo "Usage addcase.sh <case-name> <case-ksfile>"
+  exit 1
+fi
+
+CaseName=$1
+CaseKs=$2
+
+diff -upN ./mic_cases/base/test.ks ${CaseKs} > ks.p
+
+cd ./mic_cases
+mkdir test-${CaseName}
+cd test-${CaseName}
+
+mv ../../ks.p .
+vi options
+vi expect
+
+echo 'Ks diff:'
+cat ks.p
diff --git a/tests/mic-test.py b/tests/mic-test.py
new file mode 100644 (file)
index 0000000..4dbba3d
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+import unittest
+import os, sys, glob, tempfile, shutil
+from testbase import *
+
+class MICTest(unittest.TestCase):
+    cases_dir = "mic_cases"
+    if os.path.isdir(cases_dir):
+        for case in glob.glob(os.path.join(cases_dir,'test-*')):
+            case = os.path.basename(case)[5:]
+            method = """
+def test_%s(self):
+    self._testTemplate("%s")
+""" % (case, case)
+            exec method in locals()
+   
+    def setUp(self):
+        self.work_env = tempfile.mkdtemp()
+    
+    def tearDown(self):
+        shutil.rmtree(self.work_env, ignore_errors = True)
+            
+    def _testTemplate(self, case):
+        """test function"""
+        PrepEnv(self.cases_dir, case, self.work_env)
+        RunandCheck(self, self.work_env)
+                               
+def MICtestsuite():
+    suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
+    alltests = unittest.TestSuite(suite)
+    return alltests       
+
+if __name__ == '__main__':
+    suite = MICtestsuite()
+    unittest.TextTestRunner(verbosity=2).run(suite)
diff --git a/tests/mic_cases/base/test.ks b/tests/mic_cases/base/test.ks
new file mode 100644 (file)
index 0000000..fc43108
--- /dev/null
@@ -0,0 +1,66 @@
+#
+# Do not Edit! Generated by:
+# kickstarter.py
+# 
+
+lang en_US.UTF-8
+keyboard us
+timezone --utc America/New_York
+auth --useshadow --enablemd5
+part / --size 3000 --ondisk sda --fstype=ext3
+rootpw meego 
+xconfig --startxonboot
+bootloader --timeout=0 --append="quiet"
+desktop --autologinuser=meego 
+user --name meego  --groups audio,video --password meego 
+
+repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
+
+%packages
+@MeeGo Core
+@MeeGo Netbook Desktop
+@X for Netbooks
+@MeeGo Compliance
+@MeeGo X Window System
+@Virtual Machine Support
+@Games
+@Printing
+@Base Double Byte IME Support
+@MeeGo Base Development
+
+kernel
+chromium 
+%end
+
+%post
+
+# save a little bit of space at least...
+rm -f /boot/initrd*
+
+# make sure there aren't core files lying around
+rm -f /core*
+
+
+
+# Prelink can reduce boot time
+if [ -x /usr/sbin/prelink ]; then
+    /usr/sbin/prelink -aRqm
+fi
+
+
+# work around for poor key import UI in PackageKit
+rm -f /var/lib/rpm/__db*
+rpm --rebuilddb
+
+if [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-meego ]; then
+    rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+fi
+
+
+%end
+
+%post --nochroot
+if [ -n "$IMG_NAME" ]; then
+    echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release
+fi
+%end
diff --git a/tests/mic_cases/test-bootstrap/expect b/tests/mic_cases/test-bootstrap/expect
new file mode 100644 (file)
index 0000000..c785ffa
--- /dev/null
@@ -0,0 +1 @@
+Please use -b | --build-bootstrap with -B | --bootstrap together
diff --git a/tests/mic_cases/test-bootstrap/options b/tests/mic_cases/test-bootstrap/options
new file mode 100644 (file)
index 0000000..013c204
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd -b
diff --git a/tests/mic_cases/test-creatprc/expect b/tests/mic_cases/test-creatprc/expect
new file mode 100644 (file)
index 0000000..4033a56
--- /dev/null
@@ -0,0 +1 @@
+Unable to set unencrypted password due to lack of /usr/sbin/chpasswd
diff --git a/tests/mic_cases/test-creatprc/ks.p b/tests/mic_cases/test-creatprc/ks.p
new file mode 100644 (file)
index 0000000..a7b200d
--- /dev/null
@@ -0,0 +1,41 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ /home/zhou/Code/min_nb.ks  2011-02-23 12:19:12.453405953 +0800
+@@ -1,4 +1,4 @@
+-#
++# 
+ # Do not Edit! Generated by:
+ # kickstarter.py
+ # 
+@@ -15,25 +15,17 @@ desktop --autologinuser=meego
+ user --name meego  --groups audio,video --password meego 
+ repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
++repo   --name=oss-source --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/source/ --source
+-%packages
+-@MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
+-@MeeGo Compliance
+-@MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
+-@MeeGo Base Development
+-
+-kernel
+-chromium 
++%packages --ignoremissing
++libacl
++alsa-lib
++libattr
++basesystem
++bash
+ %end
+ %post
+-
+ # save a little bit of space at least...
+ rm -f /boot/initrd*
diff --git a/tests/mic_cases/test-creatprc/options b/tests/mic_cases/test-creatprc/options
new file mode 100644 (file)
index 0000000..9620766
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd
diff --git a/tests/mic_cases/test-genimg/ks.p b/tests/mic_cases/test-genimg/ks.p
new file mode 100644 (file)
index 0000000..a5d57ea
--- /dev/null
@@ -0,0 +1,376 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ /home/zhou/Code/minimal.ks 2011-02-25 15:22:26.878620886 +0800
+@@ -1,66 +1,322 @@
+-#
+-# Do not Edit! Generated by:
+-# kickstarter.py
+-# 
++# kickstart file for MeeGo minimal compliance image
++# Usage: $ sudo mic-image-creator --run-mode=0 --cache=mycachedir --format=fs --config=<meego-minimal-compliance-xxxx.ks> --package=tar.bz2 --include-source
+ lang en_US.UTF-8
+ keyboard us
+-timezone --utc America/New_York
++timezone --utc America/Los_Angeles
+ auth --useshadow --enablemd5
+-part / --size 3000 --ondisk sda --fstype=ext3
+-rootpw meego 
+-xconfig --startxonboot
++part / --size 1600 --ondisk sda --fstype=ext3
++rootpw meego
+ bootloader --timeout=0 --append="quiet"
+-desktop --autologinuser=meego 
+-user --name meego  --groups audio,video --password meego 
++user --name meego  --groups audio,video --password meego
++repo   --name=non-oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/non-oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///eetc/pki/rpm-gpg/RPM-GPG-KEY-meego 
+ repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
+-%packages
+-@MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
+-@MeeGo Compliance
+-@MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
+-@MeeGo Base Development
+-
+-kernel
+-chromium 
++%packages --ignoremissing
++libacl
++alsa-lib
++libattr
++basesystem
++bash
++bluez
++bluez-libs
++buteo-mtp
++buteo-syncfw
++buteo-syncml
++buteo-sync-plugins
++bzip2-libs
++cairo
++chkconfig
++connman
++ConsoleKit
++ConsoleKit-libs
++contextkit
++coreutils
++cryptsetup-luks
++curl
++libcurl
++cyrus-sasl-lib
++db4
++db4-utils
++dbus
++dbus-libs
++dbus-x11
++dbus-glib
++desktop-file-utils
++device-mapper-libs
++dsme
++libiphb
++e2fsprogs
++e2fsprogs-libs
++libcom_err
++libss
++eggdbus
++elfutils-libelf
++exempi
++expat
++farsight2
++file-libs
++filesystem
++flac
++fontconfig
++freetype
++gamin
++libgcc
++libstdc++
++GConf-dbus
++gdbm
++giflib
++glib2
++glibc
++glibc-common
++gmime
++gnupg2
++gnutls
++grep
++gssdp
++gst-plugins-base
++gst-plugins-good
++gstreamer
++gtk2
++gupnp
++gupnp-igd
++hwdata
++libicu
++jasper
++jasper-libs
++kcalcore
++kernel>=2.6.35
++keyutils-libs
++krb5-libs
++lcms
++lcms-libs
++libaccounts-glib
++libaccounts-qt
++libarchive
++libatasmart
++libcap
++libcreds2
++libdaemon
++libdres
++ohm-plugin-resolver
++libdrm
++libdsme
++libexif
++libffi
++libfontenc
++libgcrypt
++libgdbus
++libgee
++libgnome-keyring
++libgpg-error
++libgsf
++libical
++libICE
++libidn
++libiodata
++libjpeg
++libmng
++libnice
++libnl
++libogg
++libpng
++libprolog
++libqmlog
++libqttracker
++libresource
++libresource-client
++satsolver-tools
++libsignon
++libsignon-passwordplugin
++libsignon-saslplugin
++libSM
++libsndfile
++libsoup
++libtasn1
++libtelepathy
++libthai
++libtheora
++libtiff
++libtool-ltdl
++libtrace
++libusb
++libuser
++libvisual
++libvorbis
++libX11
++libXau
++libXaw
++libxcb
++libXcomposite
++libXcursor
++libXdamage
++libXext
++libXfixes
++libXfont
++libXft
++libXi
++libXinerama
++libxkbfile
++libxml2
++libXmu
++libXpm
++libXrandr
++libXrender
++libXt
++libXtst
++libXv
++libXxf86vm
++libzypp
++/usr/lib/libGL.so.1
++/usr/lib/libEGL.so.1
++mesa-libGLUT
++mesa-libOSMesa
++mingetty
++ncurses
++ncurses-base
++ncurses-libs
++nspr
++nss
++nss-softokn-freebl
++nss-sysinit
++o3read
++obexd
++ofono
++ohm
++ohm-config
++ohm-plugin-core
++ohm-plugins-misc
++openjpeg-libs
++openobex
++openssl
++orc
++PackageKit
++PackageKit-glib
++PackageKit-gtk-module
++PackageKit-qt
++PackageKit-zypp
++pam
++pango
++passwd
++pciutils
++pcre
++libphonon4
++pixman
++pm-utils
++polkit
++poppler
++poppler-utils
++popt
++pulseaudio
++python
++python-libs
++qjson
++libqtcore4
++libqtdbus4
++libqtdeclarative4
++libqtdeclarative4-folderlistmodel
++libqtdeclarative4-gestures
++libqtdeclarative4-particles
++libqtdesigner4
++libqtgui4
++libqtnetwork4
++libqtopengl4
++libqtscript4
++libqtsql4
++libqtsql4-sqlite
++libqtsvg4
++libqttest4
++libqtxml4
++libqtxmlpatterns4
++qtcontacts-tracker
++libdeclarative-contacts
++libdeclarative-multimedia
++libdeclarative-publishsubscribe
++libdeclarative-sensors
++libdeclarative-serviceframework
++libqtcontacts1
++libqtlocation1
++libqtmessaging1
++libqtmultimediakit1
++libqtpublishsubscribe1
++libqtsensors1
++libqtserviceframework1
++libqtsysteminfo1
++libqtversit1
++qt-mobility
++servicefw
++libqtwebkit4
++libqtwebkit-qmlwebkitplugin
++readline
++rpm
++rpm-libs
++rtkit
++sed
++sensorfw
++setup
++sg3_utils-libs
++shadow-utils
++shared-mime-info
++sofia-sip
++sofia-sip-glib
++speex
++sqlite
++swi-prolog
++swi-prolog-library
++swi-prolog-library-core
++sysvinit
++sysvinit-tools
++taglib
++telepathy-farsight
++telepathy-gabble
++telepathy-glib
++telepathy-mission-control
++telepathy-qt4
++telepathy-qt4-farsight
++telepathy-ring
++telepathy-sofiasip
++telepathy-stream-engine
++timed
++tinycdb
++totem-pl-parser
++tracker
++tzdata
++libgudev1
++libudev
++udev
++udisks
++upower
++usermode
++libblkid
++libuuid
++util-linux-ng
++wpa_supplicant
++xcb-util
++xorg-x11-font-utils
++xorg-x11-server
++xorg-x11-server-common
++xorg-x11-utils
++xorg-x11-utils-xdpyinfo
++xorg-x11-utils-xdriinfo
++xorg-x11-utils-xev
++xorg-x11-utils-xfd
++xorg-x11-utils-xfontsel
++xorg-x11-utils-xlsatoms
++xorg-x11-utils-xlsclients
++xorg-x11-utils-xlsfonts
++xorg-x11-utils-xprop
++xorg-x11-utils-xrandr
++xorg-x11-utils-xvinfo
++xorg-x11-utils-xwininfo
++xorg-x11-xauth
++xorg-x11-xkb-utils
++xz-libs
++zlib
+ %end
+ %post
+-
+-# save a little bit of space at least...
+-rm -f /boot/initrd*
+-
+ # make sure there aren't core files lying around
+ rm -f /core*
+-
+-
+-
+-# Prelink can reduce boot time
+-if [ -x /usr/sbin/prelink ]; then
+-    /usr/sbin/prelink -aRqm
+-fi
+-
+-
+-# work around for poor key import UI in PackageKit
+-rm -f /var/lib/rpm/__db*
+ rpm --rebuilddb
+-
+-if [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-meego ]; then
+-    rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+-fi
+-
+-
+-%end
+-
+-%post --nochroot
+-if [ -n "$IMG_NAME" ]; then
+-    echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release
+-fi
+ %end
diff --git a/tests/mic_cases/test-genimg/options b/tests/mic_cases/test-genimg/options
new file mode 100644 (file)
index 0000000..fe7c479
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f fs
diff --git a/tests/mic_cases/test-imageformat/expect b/tests/mic_cases/test-imageformat/expect
new file mode 100644 (file)
index 0000000..c07440b
--- /dev/null
@@ -0,0 +1 @@
+Image format 'tar' isn't supported
diff --git a/tests/mic_cases/test-imageformat/ks.p b/tests/mic_cases/test-imageformat/ks.p
new file mode 100644 (file)
index 0000000..5b68493
--- /dev/null
@@ -0,0 +1,108 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ /home/zhou/Code/handset.ks 2011-02-22 18:05:32.693146002 +0800
+@@ -1,62 +1,80 @@
+-#
++# 
+ # Do not Edit! Generated by:
+ # kickstarter.py
+ # 
+ lang en_US.UTF-8
+ keyboard us
+-timezone --utc America/New_York
++timezone --utc America/Los_Angeles
+ auth --useshadow --enablemd5
+-part / --size 3000 --ondisk sda --fstype=ext3
++part / --size 1700 --ondisk sda --fstype=ext3
+ rootpw meego 
+ xconfig --startxonboot
+-bootloader --timeout=0 --append="quiet"
+-desktop --autologinuser=meego 
++bootloader --timeout=2 --append="ro pci=noearly console=tty1 console=ttyS0 console=ttyMFD2 earlyprintk=mrst loglevel=8 s0ix_latency=160"
++desktop --autologinuser=meego  --defaultdesktop=DUI --session="/usr/bin/mcompositor"
+ user --name meego  --groups audio,video --password meego 
+-repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
++repo   --name=oss     --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego
++repo   --name=non-oss  --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/non-oss/repos/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego
++repo   --name=oss-source  --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/source/ --source
++repo   --name=non-oss-source  --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/non-oss/repos/source/ --source
++
++%packages 
+-%packages
+ @MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
++@Minimal MeeGo X Window System
++@X for Handsets 
+ @MeeGo Compliance
+-@MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
++@MeeGo Handset Desktop
++@MeeGo Handset Applications
++@Moorestown Support
+ @MeeGo Base Development
+-kernel
+-chromium 
++kernel-adaptation-medfield
++#pulseaudio-modules-mfld
++
+ %end
+ %post
+-# save a little bit of space at least...
+-rm -f /boot/initrd*
++#Create Initrd if it does not exist and create symlinks for bzImage and initrd for kboot autoboot
++echo "ro pci=noearly console=tty1 console=ttyS0 console=ttyMFD2 earlyprintk=mrst loglevel=8 s0ix_latency=160" > /boot/kboot.cmdline
++
++echo "Checking for kernel......."
++Kernel_Name=`ls /boot | grep vmlinuz`
++if [ -f /boot/$Kernel_Name ]; then
++        Kernel_Ver=`echo $Kernel_Name | sed s/vmlinuz-//`
++        if [ -f /boot/initrd* ]; then
++                echo "Initrd exists" > /dev/null
++        else
++                /usr/libexec/mkmrstinitrd /boot/initrd-$Kernel_Ver.img $Kernel_Ver
++        fi
++        #Create Symlinks
++        cd /boot
++        ln -s $Kernel_Name bzImage
++        ln -s initrd-$Kernel_Ver.img initrd
++        ln -s kboot.cmdline cmdline
++else
++        echo "No Kernels were found"
++fi
+ # make sure there aren't core files lying around
+ rm -f /core*
+-
+-
+ # Prelink can reduce boot time
+ if [ -x /usr/sbin/prelink ]; then
+     /usr/sbin/prelink -aRqm
+ fi
++# open serial line console for embedded system
++echo "s0:235:respawn:/sbin/agetty -L 115200 ttyS0 vt100" >> /etc/inittab
++echo "s1:235:respawn:/sbin/agetty -L 115200 ttyMFD2 vt100" >> /etc/inittab
++echo "ttyMFD2" >> /etc/securetty
+ # work around for poor key import UI in PackageKit
+ rm -f /var/lib/rpm/__db*
+ rpm --rebuilddb
+-if [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-meego ]; then
+-    rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+-fi
+-
+-
+ %end
+ %post --nochroot
diff --git a/tests/mic_cases/test-imageformat/options b/tests/mic_cases/test-imageformat/options
new file mode 100644 (file)
index 0000000..a1c0b2b
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f tar
diff --git a/tests/mic_cases/test-invalidrepostr/expect b/tests/mic_cases/test-invalidrepostr/expect
new file mode 100644 (file)
index 0000000..3ee4fbf
--- /dev/null
@@ -0,0 +1 @@
+unable to load kickstart file
diff --git a/tests/mic_cases/test-invalidrepostr/ks.p b/tests/mic_cases/test-invalidrepostr/ks.p
new file mode 100644 (file)
index 0000000..38c6578
--- /dev/null
@@ -0,0 +1,86 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ ../tools/invalidrepo.ks    2011-02-21 17:04:40.663145997 +0800
+@@ -1,4 +1,4 @@
+-#
++# 
+ # Do not Edit! Generated by:
+ # kickstarter.py
+ # 
+@@ -7,47 +7,45 @@ lang en_US.UTF-8
+ keyboard us
+ timezone --utc America/New_York
+ auth --useshadow --enablemd5
+-part / --size 3000 --ondisk sda --fstype=ext3
++part / --size 2200 --ondisk sda --fstype=ext3
+ rootpw meego 
+ xconfig --startxonboot
+ bootloader --timeout=0 --append="quiet"
+-desktop --autologinuser=meego 
++desktop --autologinuser=meego  --defaultdesktop=X-IVI --session=/usr/bin/startivi
+ user --name meego  --groups audio,video --password meego 
+-repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
+-
+-%packages
++repo --save=0 --debuginfo --source --name=core       --baseurl=http://xxx.com --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego
++%packages 
+ @MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
+ @MeeGo Compliance
++@X for IVI
++@IVI Desktop
+ @MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
++@MeeGo IVI Applications
+ @MeeGo Base Development
++@Development Tools
++
++kernel-adaptation-intel-automotive
++
++# forced for compliance...
++mesa-libEGL
+-kernel
+-chromium 
+ %end
+ %post
++
+ # save a little bit of space at least...
+ rm -f /boot/initrd*
+ # make sure there aren't core files lying around
+ rm -f /core*
+-
+-
+ # Prelink can reduce boot time
+ if [ -x /usr/sbin/prelink ]; then
+     /usr/sbin/prelink -aRqm
+ fi
+-
+ # work around for poor key import UI in PackageKit
+ rm -f /var/lib/rpm/__db*
+ rpm --rebuilddb
+@@ -56,7 +54,6 @@ if [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-mee
+     rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+ fi
+-
+ %end
+ %post --nochroot
+@@ -64,3 +61,7 @@ if [ -n "$IMG_NAME" ]; then
+     echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release
+ fi
+ %end
++
++
++
++
diff --git a/tests/mic_cases/test-invalidrepostr/options b/tests/mic_cases/test-invalidrepostr/options
new file mode 100644 (file)
index 0000000..9620766
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd
diff --git a/tests/mic_cases/test-misspkgs/expect b/tests/mic_cases/test-misspkgs/expect
new file mode 100644 (file)
index 0000000..b0b7d7f
--- /dev/null
@@ -0,0 +1 @@
+Failed to find package 'zhou'
diff --git a/tests/mic_cases/test-misspkgs/ks.p b/tests/mic_cases/test-misspkgs/ks.p
new file mode 100644 (file)
index 0000000..9c75428
--- /dev/null
@@ -0,0 +1,41 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ losepkg.ks 2011-02-25 15:10:02.358622001 +0800
+@@ -1,4 +1,4 @@
+-#
++# 
+ # Do not Edit! Generated by:
+ # kickstarter.py
+ # 
+@@ -15,25 +15,18 @@ desktop --autologinuser=meego
+ user --name meego  --groups audio,video --password meego 
+ repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
++repo   --name=oss-source --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/source/ --source
+ %packages
+-@MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
+-@MeeGo Compliance
+-@MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
+-@MeeGo Base Development
+-
+-kernel
+-chromium 
++libacl
++alsa-lib
++libattr
++basesystem
++bash
++zhou
+ %end
+ %post
+-
+ # save a little bit of space at least...
+ rm -f /boot/initrd*
diff --git a/tests/mic_cases/test-misspkgs/options b/tests/mic_cases/test-misspkgs/options
new file mode 100644 (file)
index 0000000..9620766
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd
diff --git a/tests/mic_cases/test-norepo/expect b/tests/mic_cases/test-norepo/expect
new file mode 100644 (file)
index 0000000..636d818
--- /dev/null
@@ -0,0 +1 @@
+No repositories found
diff --git a/tests/mic_cases/test-norepo/ks.p b/tests/mic_cases/test-norepo/ks.p
new file mode 100644 (file)
index 0000000..9eff66a
--- /dev/null
@@ -0,0 +1,85 @@
+--- ./mic_cases/base/test.ks   2011-02-25 09:21:41.778621925 +0800
++++ ../tools/norepo.ks 2011-02-21 16:29:29.933146021 +0800
+@@ -1,4 +1,4 @@
+-#
++# 
+ # Do not Edit! Generated by:
+ # kickstarter.py
+ # 
+@@ -7,47 +7,44 @@ lang en_US.UTF-8
+ keyboard us
+ timezone --utc America/New_York
+ auth --useshadow --enablemd5
+-part / --size 3000 --ondisk sda --fstype=ext3
++part / --size 2200 --ondisk sda --fstype=ext3
+ rootpw meego 
+ xconfig --startxonboot
+ bootloader --timeout=0 --append="quiet"
+-desktop --autologinuser=meego 
++desktop --autologinuser=meego  --defaultdesktop=X-IVI --session=/usr/bin/startivi
+ user --name meego  --groups audio,video --password meego 
+-repo   --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo-other/trunk-daily/builds/trunk/1.1.90.3.20110214.2/oss/repos/ia32/packages/ --save  --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego 
+-
+-%packages
++%packages 
+ @MeeGo Core
+-@MeeGo Netbook Desktop
+-@X for Netbooks
+ @MeeGo Compliance
++@X for IVI
++@IVI Desktop
+ @MeeGo X Window System
+-@Virtual Machine Support
+-@Games
+-@Printing
+-@Base Double Byte IME Support
++@MeeGo IVI Applications
+ @MeeGo Base Development
++@Development Tools
++
++kernel-adaptation-intel-automotive
++
++# forced for compliance...
++mesa-libEGL
+-kernel
+-chromium 
+ %end
+ %post
++
+ # save a little bit of space at least...
+ rm -f /boot/initrd*
+ # make sure there aren't core files lying around
+ rm -f /core*
+-
+-
+ # Prelink can reduce boot time
+ if [ -x /usr/sbin/prelink ]; then
+     /usr/sbin/prelink -aRqm
+ fi
+-
+ # work around for poor key import UI in PackageKit
+ rm -f /var/lib/rpm/__db*
+ rpm --rebuilddb
+@@ -56,7 +53,6 @@ if [ -f /etc/pki/rpm-gpg/RPM-GPG-KEY-mee
+     rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+ fi
+-
+ %end
+ %post --nochroot
+@@ -64,3 +60,7 @@ if [ -n "$IMG_NAME" ]; then
+     echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release
+ fi
+ %end
++
++
++
++
diff --git a/tests/mic_cases/test-norepo/options b/tests/mic_cases/test-norepo/options
new file mode 100644 (file)
index 0000000..9620766
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd
diff --git a/tests/mic_cases/test-root/expect b/tests/mic_cases/test-root/expect
new file mode 100644 (file)
index 0000000..b21a32e
--- /dev/null
@@ -0,0 +1 @@
+You must run mic-image-creator as root
diff --git a/tests/mic_cases/test-root/options b/tests/mic_cases/test-root/options
new file mode 100644 (file)
index 0000000..b2de23a
--- /dev/null
@@ -0,0 +1 @@
+mic-image-creator -f livecd
diff --git a/tests/mic_cases/test-runmode/expect b/tests/mic_cases/test-runmode/expect
new file mode 100644 (file)
index 0000000..78355bb
--- /dev/null
@@ -0,0 +1 @@
+invalid run mode
diff --git a/tests/mic_cases/test-runmode/ks.p b/tests/mic_cases/test-runmode/ks.p
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/tests/mic_cases/test-runmode/options b/tests/mic_cases/test-runmode/options
new file mode 100644 (file)
index 0000000..ef8fa9d
--- /dev/null
@@ -0,0 +1 @@
+sudo mic-image-creator -f livecd --run-mode=2
diff --git a/tests/testbase.py b/tests/testbase.py
new file mode 100644 (file)
index 0000000..098a02c
--- /dev/null
@@ -0,0 +1,61 @@
+#!/usr/bin/python
+import os 
+import subprocess, re, shutil, glob
+import gettext
+
+_ = gettext.lgettext
+COLOR_BLACK = "\033[00m"
+COLOR_RED = "\033[1;31m"
+
+def PrepEnv(cases_dir, case, work_env):
+    """prepare working env"""
+    for one in glob.glob(os.path.join(cases_dir, 'base', '*')):
+        shutil.copy(one, work_env)
+    for other in glob.glob(os.path.join(cases_dir, 'test-'+case, '*')):
+        shutil.copy(other, work_env)
+def ImgCheck(work_env):
+    """check image generate"""
+    genImage = False
+    for file in os.listdir(work_env):
+        m = re.match('^meego-.*', file)
+        if m:
+            genImage = True
+            break
+    return genImage
+
+def RunandCheck(object, work_env):
+    """run mic-image-creator command and check something"""
+    ret = False  
+  
+    cwd = os.getcwd()
+    os.chdir(work_env)
+    os.system("patch -s < ks.p")
+    #set value of "expect"
+    expect = None
+    if "expect" in os.listdir(work_env):
+        exp_f = open('expect', 'r')
+        exp = exp_f.read()
+        if len(exp) > 0:
+            expect = exp.strip()
+    #set cmdline    
+    opt_f = open('options','r')
+    args = opt_f.read().strip()+' -c test.ks'
+    
+    dev_null = os.open('/dev/null',os.O_WRONLY)
+    proc = subprocess.Popen(args,stdout = dev_null,stderr=subprocess.PIPE,shell=True)
+    os.close(dev_null)
+    errorinfo = proc.communicate()[1]
+    #check    
+    if expect:
+        if errorinfo.find(expect) != -1:#FIXME
+            ret =True
+    else:
+        proc.wait()
+        ret = ImgCheck(work_env)    
+    os.chdir(cwd)
+    
+    try:
+        object.assertTrue(ret)
+    except object.failureException:
+        raise object.failureException(_("%s%s%s") %(COLOR_RED,errorinfo,COLOR_BLACK))    
diff --git a/tools/mic-image-create b/tools/mic-image-create
deleted file mode 100755 (executable)
index 49bf4b4..0000000
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/python -t
-
-import sys, os, os.path, string
-import micng.utils.argparse as argparse
-import micng.configmgr as configmgr
-import micng.pluginmgr as pluginmgr
-
-class Creator(object):
-    name = 'create'
-
-    def __init__(self):
-        self.configmgr = configmgr.getConfigMgr()
-        self.pluginmgr = pluginmgr.PluginMgr()
-        self.pluginmgr.loadPlugins()
-        self.plugincmds = self.pluginmgr.getPluginByCateg('imager')
-
-    def main(self, argv=None):
-#        import pdb
-#        pdb.set_trace()
-        if os.getuid() != 0:
-            print "Please run the program as root"
-            return 0
-        prog = os.path.basename(sys.argv[0])
-        parser = argparse.ArgumentParser(
-                  usage='%s [COMMONOPT] <subcommand> [SUBOPT] ARGS' % prog,
-                  ) 
-        parser.add_argument('-k', '--cache', dest='cache', help='cache diretory')
-        parser.add_argument('-o', '--outdir', dest='outdir', help='output diretory')
-        parser.add_argument('-t', '--tmpdir', dest='tmpdir', help='temp diretory')
-
-
-        subparsers = parser.add_subparsers(title='subcommands')
-        for subcmd, klass in self.plugincmds:
-            subcmd_help = 'create ' + subcmd + ' image'
-            subcmd_parser = subparsers.add_parser(
-                              subcmd, 
-                              usage=prog+' [COMMONOPT] '+subcmd+'  [SUBOPT] ARGS',
-                              help=subcmd_help
-                              )
-            if hasattr(klass, 'do_options'):
-                add_subopt = getattr(klass, 'do_options')
-                add_subopt(subcmd_parser)
-            if hasattr(klass, 'do_create'):
-                do_create = getattr(klass, 'do_create')
-                subcmd_parser.set_defaults(func=do_create)
-
-        if not argv:
-            parser.print_help()
-            return True
-
-        args = parser.parse_args(argv)
-        if args.outdir:
-            self.configmgr.setProperty('outdir', args.outdir)
-        if args.tmpdir:
-            self.configmgr.setProperty('tmpdir', args.tmpdir)
-        if args.cache:
-            self.configmgr.setProperty('cache', args.cache)
-#        print 'outdir', self.configmgr.getProperty('outdir')
-#        print 'tmpdir', self.configmgr.getProperty('tmpdir')
-#        print 'cache', self.configmgr.getProperty('cache')
-        args.func(args)
-        return True
-
-if __name__ == "__main__":
-    create = Creator()
-    ret = create.main(sys.argv[1:])
-    sys.exit(ret)
old mode 100755 (executable)
new mode 100644 (file)
index ea98dbd..86e81c0
 #!/usr/bin/python -t
 
-import sys, os
-import subprocess
+import os
+import sys
+import logging
 import micng.utils.cmdln as cmdln
+import micng.utils.misc as misc
+import micng.utils.errors as errors
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+import micng.creator as creator
+
+class Micng(cmdln.Cmdln):
+    """Usage: micng SUBCOMMAND [OPTS] [ARGS...]
+
+    MeeGo Image Creator Tool.
+
+    ${command_list}
+    ${help_list}
+    global ${option_list}
+    For additional information, see
+    * http://www.meego.com/
+    """
+    name = 'micng'
+    version = None
 
-class Mic(cmdln.Cmdln):
-    def run_subcmd(self, subcmd, opts, args):
-        creator = "mic-image-create"
-        tools = {
-                 "cr":creator, "create":creator,
-                }
-        
-        argv = [tools[subcmd]]
-        argv.extend(args)
-        subprocess.call(argv)        
-       
     @cmdln.alias("cr")
     def do_create(self, argv):
         """${cmd_name}: create image
 
-           ${cmd_usage}
-           ${cmd_option_list}
+        ${cmd_usage}
+        ${cmd_option_list}
         """
-        self.run_subcmd("create", None, argv[1:])
-    
-    @cmdln.alias("cv")
-    def do_convert(self, argv):
-        """${cmd_name}: convert an image format to another one
+        cr = creator.Creator()
+        ret = cr.main(argv[1:])
+        return ret
+
+#    @cmdln.alias("cv")
+#    def do_convert(self, argv):
+#        """${cmd_name}: convert an image format to another one
+#        """
+#        pass
+
+    @cmdln.option("-d", "--debug", action="store_true", help="debug message")
+    @cmdln.option("-v", "--verbose", action="store_true", help="verbose infomation")
+    @cmdln.alias("ch")
+    def do_chroot(self, subcmd, opts, *args):
+        """${cmd_name}: chroot an image
+
+        usage:
+            micng chroot <imagefile>
+
+        ${cmd_option_list}
         """
+        if len(args) == 0:
+            self.emptyline()
+            # print help
+            return
+        if len(args) == 1:
+            targetimage = args[0]
+        else:
+            raise errors.Usage("Extra argument given")
+
+        if os.geteuid() != 0:
+            raise errors.Usage("You must run as root")
+
+        # Fixeme? sub-logger to be used
+        if opts.verbose:
+            logging.getLogger().setLevel(logging.INFO)
+        if opts.debug:
+            logging.getLogger().setLevel(logging.DEBUG)
+
+        imagetype = misc.get_image_type(targetimage)
+        if not imagetype:
+            imagetype = "fs"
+        if imagetype == "ext3fsimg":
+            imagetype = "loop"
+
+        pkgmgr = pluginmgr.PluginMgr()
+        pkgmgr.loadPlugins()
+
+        chrootclass = None
+        for (pname, pcls) in pkgmgr.getImagerPlugins():
+            if pname == imagetype and hasattr(pcls, "do_chroot"):
+                chrootclass = pcls
+                break
+
+        if not chrootclass:
+            raise CreatorError("Don't support image type: %s" % imagetype)
+
+        chrootclass.do_chroot(targetimage)
 
 if __name__ == "__main__":
-       mic = Mic()
-       ret = mic.main()
-       sys.exit(ret)
+    logging.getLogger().setLevel(logging.ERROR)
+    micng = Micng()
+    try:
+        ret = micng.main()
+    except errors.CreatorError, msg:
+        ret = 2
+        print >> sys.stderr, msg
+    except errors.Usage, msg:
+        ret = 2
+        print >> sys.stderr, msg
+    sys.exit(ret)
diff --git a/tools/micng.ref b/tools/micng.ref
deleted file mode 100755 (executable)
index 5d2ad7b..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/python
-
-# Copyright (C) 2010 Intel Inc.  All rights reserved.
-# This program is free software; it may be used, copied, modified
-# and distributed under the terms of the GNU General Public Licence,
-# either version 2, or version 3 (at your option).
-
-import sys
-import mic3.cmdln as cmdln
-import optparse as _optparse
-
-try:
-    import mic3.__version__
-    VERSION = mic3.__version__.version
-except:
-    VERSION = 'unknown'
-
-class MIC3(cmdln.Cmdln):
-    """Usage: mic [GLOBALOPTS] SUBCOMMAND [OPTS] [ARGS...]
-    or: mic help SUBCOMMAND
-
-    MeeGo Image Tool.
-    Type 'mic help <subcommand>' for help on a specific subcommand.
-
-    ${command_list}
-    ${help_list}
-    global ${option_list}
-    For additional information, see
-    * http://www.meego.com/
-    """
-
-    name = 'mic'
-    version = VERSION
-
-    @cmdln.option("-v", "--verbose", action="store_true",
-                           help="print extra information")
-
-    def get_cmd_help(self, cmdname):
-        doc = self._get_cmd_handler(cmdname).__doc__
-        doc = self._help_reindent(doc)
-        doc = self._help_preprocess(doc, cmdname)
-        doc = doc.rstrip() + '\n' # trim down trailing space
-        return self._str(doc)
-
-    """ create image """
-    @cmdln.alias('cr')
-    @cmdln.option("-c", "--config", type="string", dest="config",
-                    help="Path to kickstart config file")
-
-    @cmdln.option("-f", "--format", type="string", dest="format",
-                    help="Image format, you can specify as fs, livecd, liveusb, loop, raw, nand, mrstnand, ubi, jffs2, vdi or vmdk")
-
-    @cmdln.option("-t", "--tmpdir", type="string",
-                      dest="tmpdir",
-                      help="Temporary directory to use (default: /var/tmp)")
-    @cmdln.option("-k", "--cache", type="string",
-                      dest="cachedir", default=None,
-                      help="Cache directory to use (default: private cache)")
-    @cmdln.option("-o", "--outdir", type="string",
-                      dest="outdir", default=None,
-                      help="Output directory to use (default: current work dir)")
-    @cmdln.option("", "--release", type="string",
-                      dest="release", default=None,
-                      help="Generate a MeeGo release with all necessary files for publishing.")
-    @cmdln.option("", "--genchecksum", action="store_true",
-                      dest="genchecksum", default=False,
-                      help="Generate checksum for image file if this option is provided")
-    @cmdln.option("-P", "--prefix", type="string",
-                      dest="prefix", default=None,
-                      help="Image name prefix (default: meego)")
-    @cmdln.option("-S", "--suffix", type="string",
-                      dest="suffix", default=None,
-                      help="Image name suffix (default: date stamp)")
-    @cmdln.option("-a", "--arch", type="string",
-                      dest="arch", default=None,
-                      help="Specify target arch of image, for example: arm")
-    @cmdln.option("", "--use-comps", action="store_true",
-                      dest="use_comps", default=False,
-                      help="Use comps instead of patterns if comps exists")
-    @cmdln.option("", "--record-pkgs", type="string",
-                      dest="record_pkgs", default=None,
-                      help="Record the installed packages, valid values: name, content")
-    @cmdln.option("", "--fstype", type="string",
-                      dest="fstype", default="vfat",
-                      help="File system type for live USB file image, ext3 or vfat, the default is vfat.")
-    @cmdln.option("", "--overlay-size-mb", type="int", default=64,
-                      help="Overlay size in MB as unit, it means how size changes you can save in your live USB disk.")
-    @cmdln.option('-d', '--debug', action='store_true',
-                      help='Output debugging information')
-    @cmdln.option('-v', '--verbose', dest='verbose', action='store_true',
-                      help='Output verbose information')
-    @cmdln.option('', '--logfile', type="string", dest="file",
-                      help='Save debug information to FILE')
-    @cmdln.option("", "--save-kernel", action="store_true",
-                      dest="save_kernel", default=False,
-                      help="Save kernel image file into outdir")
-    @cmdln.option("", "--pkgmgr", type="string",
-                      help="Specify the package manager, the available package managers have zypper and yum currently.")
-    @cmdln.option("", "--volumeid", type="string", default=None,
-                      help="Specify volume id, valid only for livecd")
-    def do_create(self, subcmd, opts, *args):
-        """${cmd_name}: Create an image
-
-        This command is used to create various images, including
-        live CD, live USB, loop, raw/KVM/QEMU, VMWare/vmdk,
-        VirtualBox/vdi, Moorestown/mrstnand, jffs2 and ubi.
-
-        Examples:
-           mic create                         # create an image according to the default config
-           mic create --format=liveusb        # create a live USB image
-
-        ${cmd_usage}
-        ${cmd_option_list}
-        """
-
-        print subcmd, opts, args
-
-if __name__ == "__main__":
-    mic = MIC3()
-    sys.exit(mic.main(sys.argv))