initial import code into git
authorJF Ding <Jian-feng.Ding@intel.com>
Wed, 20 Jul 2011 22:07:16 +0000 (06:07 +0800)
committerJF Ding <Jian-feng.Ding@intel.com>
Wed, 20 Jul 2011 22:07:16 +0000 (06:07 +0800)
46 files changed:
Makefile [new file with mode: 0644]
VERSION [new file with mode: 0644]
distfiles/micng.conf [new file with mode: 0644]
micng/__init__.py [new file with mode: 0644]
micng/__version__.py [new file with mode: 0644]
micng/chroot.py [new file with mode: 0644]
micng/configmgr.py [new file with mode: 0644]
micng/convertor.py [new file with mode: 0644]
micng/imager/BaseImageCreator.py [new file with mode: 0644]
micng/imager/__init__.py [new file with mode: 0644]
micng/imager/fs.py [new file with mode: 0644]
micng/imager/livecd.py [new file with mode: 0644]
micng/pluginbase/__init__.py [new file with mode: 0644]
micng/pluginbase/backend_plugin.py [new file with mode: 0644]
micng/pluginbase/base_plugin.py [new file with mode: 0644]
micng/pluginbase/hook_plugin.py [new file with mode: 0644]
micng/pluginbase/imager_plugin.py [new file with mode: 0644]
micng/pluginmgr.py [new file with mode: 0644]
micng/utils/__init__.py [new file with mode: 0644]
micng/utils/argparse.py [new file with mode: 0644]
micng/utils/cmdln.py [new file with mode: 0644]
micng/utils/error.py [new file with mode: 0644]
micng/utils/errors.py [new file with mode: 0644]
micng/utils/fs_related.py [new file with mode: 0644]
micng/utils/kickstart.py [new file with mode: 0644]
micng/utils/kscommands/__init__.py [new file with mode: 0644]
micng/utils/kscommands/desktop.py [new file with mode: 0644]
micng/utils/kscommands/micboot.py [new file with mode: 0644]
micng/utils/kscommands/moblinrepo.py [new file with mode: 0644]
micng/utils/logger.py [new file with mode: 0644]
micng/utils/misc.py [new file with mode: 0644]
micng/utils/pkgmanagers/__init__.py [new file with mode: 0644]
micng/utils/pkgmanagers/yumpkgmgr.py [new file with mode: 0644]
micng/utils/pkgmanagers/zypppkgmgr.py [new file with mode: 0644]
micng/utils/rpmmisc.py [new file with mode: 0644]
plugins/backend/yumpkgmgr.py [new file with mode: 0644]
plugins/backend/zypppkgmgr.py [new file with mode: 0644]
plugins/hook/_hook.py [new file with mode: 0644]
plugins/imager/fs_plugin.py [new file with mode: 0644]
plugins/imager/livecd_plugin.py [new file with mode: 0644]
setup.py [new file with mode: 0644]
tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks [new file with mode: 0644]
tests/micng.conf [new file with mode: 0644]
tools/mic-image-create [new file with mode: 0755]
tools/micng [new file with mode: 0755]
tools/micng.ref [new file with mode: 0755]

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..23a4b5c
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,35 @@
+PYTHON ?= python
+VERSION = $(shell cat VERSION)
+TAGVER = $(shell cat VERSION | sed -e "s/\([0-9\.]*\).*/\1/")
+
+PKGNAME = micng
+
+ifeq ($(VERSION), $(TAGVER))
+       TAG = $(TAGVER)
+else
+       TAG = "HEAD"
+endif
+
+
+all:
+       $(PYTHON) setup.py build
+
+dist-bz2:
+       git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(TAG) | \
+               bzip2  > $(PKGNAME)-$(VERSION).tar.bz2
+
+dist-gz:
+       git archive --format=tar --prefix=$(PKGNAME)-$(VERSION)/ $(TAG) | \
+               gzip  > $(PKGNAME)-$(VERSION).tar.gz
+
+install: all
+       $(PYTHON) setup.py install --root=${DESTDIR}
+
+develop: all
+       $(PYTHON) setup.py develop
+
+clean:
+       rm -f tools/*.py[co]
+       rm -rf *.egg-info
+       rm -rf build/
+       rm -rf dist/
diff --git a/VERSION b/VERSION
new file mode 100644 (file)
index 0000000..9cf9d12
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+0.1git
diff --git a/distfiles/micng.conf b/distfiles/micng.conf
new file mode 100644 (file)
index 0000000..0bc4a3f
--- /dev/null
@@ -0,0 +1,13 @@
+[main]
+cachedir= /var/tmp/cache
+tmpdir= /var/tmp
+outdir= .
+distro_name=MeeGo
+#proxy=http://proxy.yourcompany.com:8080/
+#no_proxy=localhost,127.0.0.0/8,.yourcompany.com
+format=livecd
+default_ks=default.ks
+use_comps=1
+
+#run mode: 0 - legacy, 1 - bootstrap
+run_mode=0
diff --git a/micng/__init__.py b/micng/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/__version__.py b/micng/__version__.py
new file mode 100644 (file)
index 0000000..aa9bee7
--- /dev/null
@@ -0,0 +1 @@
+VERSION = "0.1git"
diff --git a/micng/chroot.py b/micng/chroot.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/configmgr.py b/micng/configmgr.py
new file mode 100644 (file)
index 0000000..6a35c21
--- /dev/null
@@ -0,0 +1,87 @@
+#!/usr/bin/python -t
+
+import os
+import micng.utils as utils
+
+DEFAULT_OUTDIR='.'
+DEFAULT_TMPDIR='/tmp'
+DEFAULT_CACHE='/var/tmp'
+DEFAULT_GSITECONF='/etc/micng/micng.conf'
+DEFAULT_USITECONF='~/.micng.conf'
+
+class ConfigMgr(object):
+    def __init__(self, siteconf=None, ksfile=None):
+        self.outdir = DEFAULT_OUTDIR
+        self.tmpdir = DEFAULT_TMPDIR
+        self.cache = DEFAULT_CACHE
+        self.siteconf = siteconf
+        self.name = 'meego'
+        self.ksfile = ksfile
+        self.kickstart = None
+        self.ksrepos = None
+        self.repometadata = None
+        self.init_siteconf(self.siteconf)
+        self.init_kickstart(self.ksfile)
+
+    def init_siteconf(self, siteconf = None):
+        from ConfigParser import SafeConfigParser
+        siteconf_parser = SafeConfigParser()
+        siteconf_files = [DEFAULT_GSITECONF, DEFAULT_USITECONF]
+
+        if siteconf:
+            self.siteconf = siteconf
+            siteconf_files = [self.siteconf]
+        siteconf_parser.read(siteconf_files)
+
+        for option in siteconf_parser.options('main'):
+            value = siteconf_parser.get('main', option)
+            setattr(self, option, value)
+
+    def init_kickstart(self, ksfile=None):
+        if not ksfile:
+            return
+        self.ksfile = ksfile
+        try:
+            self.kickstart = utils.kickstart.read_kickstart(self.ksfile)
+            self.ksrepos = utils.misc.get_repostrs_from_ks(self.kickstart)
+            print "retrieving repo metadata..."
+            self.repometadata = utils.misc.get_metadata_from_repos(self.ksrepos, self.cache)
+        except OSError, e:
+            raise Exception("failed to create image: %s" % e)
+        except Exception, e:
+            raise Exception("unable to load kickstart file '%s': %s" % (self.ksfile, e))
+
+
+    def setProperty(self, name, value):
+        if not hasattr(self, name):
+            return None
+        #print ">>", name, value
+        if name == 'ksfile':
+            self.init_kickstart(value)
+            return True
+        if name == 'siteconf':
+            self.init_siteconf(value)
+            return True
+        return setattr(self, name, value)
+
+    def getProperty(self, name):
+        if not hasattr(self, name):
+            return None
+        return getattr(self, name)
+
+configmgr = ConfigMgr()
+
+def getConfigMgr():
+    return configmgr
+
+def setProperty(cinfo, name):
+    if not isinstance(cinfo, ConfigMgr):
+        return None
+    if not hasattr(cinfo, name):
+        return None
+
+def getProperty(cinfo, name):
+    if not isinstance(cinfo, ConfigMgr):
+        return None
+    if not hasattr(cinfo, name):
+        return None
diff --git a/micng/convertor.py b/micng/convertor.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/imager/BaseImageCreator.py b/micng/imager/BaseImageCreator.py
new file mode 100644 (file)
index 0000000..a5862c6
--- /dev/null
@@ -0,0 +1,1603 @@
+#
+# creator.py : ImageCreator and LoopImageCreator base classes
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import stat
+import sys
+import tempfile
+import shutil
+import logging
+import subprocess
+import re
+import tarfile
+import glob
+
+import rpm
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.utils import kickstart
+from micng.utils import pkgmanagers
+from micng.utils.rpmmisc import *
+from micng.utils.misc import *
+
+FSLABEL_MAXLEN = 32
+"""The maximum string length supported for LoopImageCreator.fslabel."""
+
+class ImageCreator(object):
+    """Installs a system to a chroot directory.
+
+    ImageCreator is the simplest creator class available; it will install and
+    configure a system image according to the supplied kickstart file.
+
+    e.g.
+
+      import micng.imgcreate as imgcreate
+      ks = imgcreate.read_kickstart("foo.ks")
+      imgcreate.ImageCreator(ks, "foo").create()
+
+    """
+
+    def __init__(self, ks, name):
+        """Initialize an ImageCreator instance.
+
+        ks -- a pykickstart.KickstartParser instance; this instance will be
+              used to drive the install by e.g. providing the list of packages
+              to be installed, the system configuration and %post scripts
+
+        name -- a name for the image; used for e.g. image filenames or
+                filesystem labels
+
+        """
+
+        """ Initialize package managers """
+#package plugin manager
+        self.pkgmgr = pkgmanagers.pkgManager()
+        self.pkgmgr.load_pkg_managers()
+
+        self.ks = ks
+        """A pykickstart.KickstartParser instance."""
+
+        self.name = name
+        """A name for the image."""
+
+        self.distro_name = "MeeGo"
+
+        """Output image file names"""
+        self.outimage = []
+
+        """A flag to generate checksum"""
+        self._genchecksum = False
+
+        self.tmpdir = "/var/tmp"
+        """The directory in which all temporary files will be created."""
+
+        self.cachedir = None
+
+        self._alt_initrd_name = None
+
+        self.__builddir = None
+        self.__bindmounts = []
+
+        """ Contains the compression method that is used to compress
+        the disk image after creation, e.g., bz2.
+        This value is set with compression_method function. """
+        self.__img_compression_method = None
+
+        # dependent commands to check
+        self._dep_checks = ["ls", "bash", "cp", "echo", "modprobe", "passwd"]
+
+        self._recording_pkgs = None
+
+        self._include_src = None
+
+        self._local_pkgs_path = None
+
+        # available size in root fs, init to 0
+        self._root_fs_avail = 0
+
+        # target arch for non-x86 image
+        self.target_arch = None
+
+        """ Name of the disk image file that is created. """
+        self._img_name = None
+
+        """ Image format """
+        self.image_format = None
+
+        """ Save qemu emulator file name in order to clean up it finally """
+        self.qemu_emulator = None
+
+        """ No ks provided when called by convertor, so skip the dependency check """
+        if self.ks:
+            """ If we have btrfs partition we need to check that we have toosl for those """
+            for part in self.ks.handler.partition.partitions:
+                if part.fstype and part.fstype == "btrfs":
+                    self._dep_checks.append("mkfs.btrfs")
+                    break
+
+    def set_target_arch(self, arch):
+        if arch not in arches.keys():
+            return False
+        self.target_arch = arch
+        if self.target_arch.startswith("arm"):
+            for dep in self._dep_checks:
+                if dep == "extlinux":
+                    self._dep_checks.remove(dep)
+
+            if not os.path.exists("/usr/bin/qemu-arm") or not is_statically_linked("/usr/bin/qemu-arm"):
+                self._dep_checks.append("qemu-arm-static")
+                
+            if os.path.exists("/proc/sys/vm/vdso_enabled"):
+                vdso_fh = open("/proc/sys/vm/vdso_enabled","r")
+                vdso_value = vdso_fh.read().strip()
+                vdso_fh.close()
+                if (int)(vdso_value) == 1:
+                    print "\n= WARNING ="
+                    print "vdso is enabled on your host, which might cause problems with arm emulations."
+                    print "You can disable vdso with following command before starting image build:"
+                    print "echo 0 | sudo tee /proc/sys/vm/vdso_enabled"
+                    print "= WARNING =\n"
+
+        return True
+
+
+    def __del__(self):
+        self.cleanup()
+
+    #
+    # Properties
+    #
+    def __get_instroot(self):
+        if self.__builddir is None:
+            raise CreatorError("_instroot is not valid before calling mount()")
+        return self.__builddir + "/install_root"
+    _instroot = property(__get_instroot)
+    """The location of the install root directory.
+
+    This is the directory into which the system is installed. Subclasses may
+    mount a filesystem image here or copy files to/from here.
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    def __get_outdir(self):
+        if self.__builddir is None:
+            raise CreatorError("_outdir is not valid before calling mount()")
+        return self.__builddir + "/out"
+    _outdir = property(__get_outdir)
+    """The staging location for the final image.
+
+    This is where subclasses should stage any files that are part of the final
+    image. ImageCreator.package() will copy any files found here into the
+    requested destination directory.
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    #
+    # Hooks for subclasses
+    #
+    def _mount_instroot(self, base_on = None):
+        """Mount or prepare the install root directory.
+
+        This is the hook where subclasses may prepare the install root by e.g.
+        mounting creating and loopback mounting a filesystem image to
+        _instroot.
+
+        There is no default implementation.
+
+        base_on -- this is the value passed to mount() and can be interpreted
+                   as the subclass wishes; it might e.g. be the location of
+                   a previously created ISO containing a system image.
+
+        """
+        pass
+
+    def _unmount_instroot(self):
+        """Undo anything performed in _mount_instroot().
+
+        This is the hook where subclasses must undo anything which was done
+        in _mount_instroot(). For example, if a filesystem image was mounted
+        onto _instroot, it should be unmounted here.
+
+        There is no default implementation.
+
+        """
+        pass
+
+    def _create_bootconfig(self):
+        """Configure the image so that it's bootable.
+
+        This is the hook where subclasses may prepare the image for booting by
+        e.g. creating an initramfs and bootloader configuration.
+
+        This hook is called while the install root is still mounted, after the
+        packages have been installed and the kickstart configuration has been
+        applied, but before the %post scripts have been executed.
+
+        There is no default implementation.
+
+        """
+        pass
+
+    def _stage_final_image(self):
+        """Stage the final system image in _outdir.
+
+        This is the hook where subclasses should place the image in _outdir
+        so that package() can copy it to the requested destination directory.
+
+        By default, this moves the install root into _outdir.
+
+        """
+        shutil.move(self._instroot, self._outdir + "/" + self.name)
+
+    def get_installed_packages(self):
+        return self._pkgs_content.keys()
+
+    def _save_recording_pkgs(self, destdir):
+        """Save the list or content of installed packages to file.
+        """
+        if self._recording_pkgs not in ('content', 'name'):
+            return
+
+        pkgs = self._pkgs_content.keys()
+        pkgs.sort() # inplace op
+
+        # save package name list anyhow
+        if not os.path.exists(destdir):
+            makedirs(destdir)
+
+        namefile = os.path.join(destdir, self.name + '-pkgs.txt')
+        f = open(namefile, "w")
+        content = '\n'.join(pkgs)
+        f.write(content)
+        f.close()
+        self.outimage.append(namefile);
+
+        # if 'content', save more details
+        if self._recording_pkgs == 'content':
+            contfile = os.path.join(destdir, self.name + '-pkgs-content.txt')
+            f = open(contfile, "w")
+
+            for pkg in pkgs:
+                content = pkg + '\n'
+
+                pkgcont = self._pkgs_content[pkg]
+                items = []
+                if pkgcont.has_key('dir'):
+                    items = map(lambda x:x+'/', pkgcont['dir'])
+                if pkgcont.has_key('file'):
+                    items.extend(pkgcont['file'])
+
+                if items:
+                    content += '    '
+                    content += '\n    '.join(items)
+                    content += '\n'
+
+                content += '\n'
+                f.write(content)
+            f.close()
+            self.outimage.append(contfile)
+
+    def _get_required_packages(self):
+        """Return a list of required packages.
+
+        This is the hook where subclasses may specify a set of packages which
+        it requires to be installed.
+
+        This returns an empty list by default.
+
+        Note, subclasses should usually chain up to the base class
+        implementation of this hook.
+
+        """
+        return []
+
+    def _get_excluded_packages(self):
+        """Return a list of excluded packages.
+
+        This is the hook where subclasses may specify a set of packages which
+        it requires _not_ to be installed.
+
+        This returns an empty list by default.
+
+        Note, subclasses should usually chain up to the base class
+        implementation of this hook.
+
+        """
+        excluded_packages = []
+        for rpm_path in self._get_local_packages():
+            rpm_name = os.path.basename(rpm_path)
+            package_name = splitFilename(rpm_name)[0]
+            excluded_packages += [package_name]
+        return excluded_packages
+
+    def _get_local_packages(self):
+        """Return a list of rpm path to be local installed.
+
+        This is the hook where subclasses may specify a set of rpms which
+        it requires to be installed locally.
+
+        This returns an empty list by default.
+
+        Note, subclasses should usually chain up to the base class
+        implementation of this hook.
+
+        """
+        if self._local_pkgs_path:
+            if os.path.isdir(self._local_pkgs_path):
+                return glob.glob(
+                        os.path.join(self._local_pkgs_path, '*.rpm'))
+            elif os.path.splitext(self._local_pkgs_path)[-1] == '.rpm':
+                return [self._local_pkgs_path]
+
+        return []
+
+    def _get_fstab(self):
+        """Return the desired contents of /etc/fstab.
+
+        This is the hook where subclasses may specify the contents of
+        /etc/fstab by returning a string containing the desired contents.
+
+        A sensible default implementation is provided.
+
+        """
+        s =  "/dev/root  /         %s    %s 0 0\n" % (self._fstype, "defaults,noatime" if not self._fsopts else self._fsopts)
+        s += self._get_fstab_special()
+        return s
+
+    def _get_fstab_special(self):
+        s = "devpts     /dev/pts  devpts  gid=5,mode=620   0 0\n"
+        s += "tmpfs      /dev/shm  tmpfs   defaults         0 0\n"
+        s += "proc       /proc     proc    defaults         0 0\n"
+        s += "sysfs      /sys      sysfs   defaults         0 0\n"
+        return s
+
+    def _get_post_scripts_env(self, in_chroot):
+        """Return an environment dict for %post scripts.
+
+        This is the hook where subclasses may specify some environment
+        variables for %post scripts by return a dict containing the desired
+        environment.
+
+        By default, this returns an empty dict.
+
+        in_chroot -- whether this %post script is to be executed chroot()ed
+                     into _instroot.
+
+        """
+        return {}
+
+    def __get_imgname(self):
+        return self.name
+    _name = property(__get_imgname)
+    """The name of the image file.
+
+    """
+
+    def _get_kernel_versions(self):
+        """Return a dict detailing the available kernel types/versions.
+
+        This is the hook where subclasses may override what kernel types and
+        versions should be available for e.g. creating the booloader
+        configuration.
+
+        A dict should be returned mapping the available kernel types to a list
+        of the available versions for those kernels.
+
+        The default implementation uses rpm to iterate over everything
+        providing 'kernel', finds /boot/vmlinuz-* and returns the version
+        obtained from the vmlinuz filename. (This can differ from the kernel
+        RPM's n-v-r in the case of e.g. xen)
+
+        """
+        def get_version(header):
+            version = None
+            for f in header['filenames']:
+                if f.startswith('/boot/vmlinuz-'):
+                    version = f[14:]
+            return version
+
+        ts = rpm.TransactionSet(self._instroot)
+
+        ret = {}
+        for header in ts.dbMatch('provides', 'kernel'):
+            version = get_version(header)
+            if version is None:
+                continue
+
+            name = header['name']
+            if not name in ret:
+                ret[name] = [version]
+            elif not version in ret[name]:
+                ret[name].append(version)
+
+        return ret
+
+    #
+    # Helpers for subclasses
+    #
+    def _do_bindmounts(self):
+        """Mount various system directories onto _instroot.
+
+        This method is called by mount(), but may also be used by subclasses
+        in order to re-mount the bindmounts after modifying the underlying
+        filesystem.
+
+        """
+        for b in self.__bindmounts:
+            b.mount()
+
+    def _undo_bindmounts(self):
+        """Unmount the bind-mounted system directories from _instroot.
+
+        This method is usually only called by unmount(), but may also be used
+        by subclasses in order to gain access to the filesystem obscured by
+        the bindmounts - e.g. in order to create device nodes on the image
+        filesystem.
+
+        """
+        self.__bindmounts.reverse()
+        for b in self.__bindmounts:
+            b.unmount()
+
+    def _chroot(self):
+        """Chroot into the install root.
+
+        This method may be used by subclasses when executing programs inside
+        the install root e.g.
+
+          subprocess.call(["/bin/ls"], preexec_fn = self.chroot)
+
+        """
+        os.chroot(self._instroot)
+        os.chdir("/")
+
+    def _mkdtemp(self, prefix = "tmp-"):
+        """Create a temporary directory.
+
+        This method may be used by subclasses to create a temporary directory
+        for use in building the final image - e.g. a subclass might create
+        a temporary directory in order to bundle a set of files into a package.
+
+        The subclass may delete this directory if it wishes, but it will be
+        automatically deleted by cleanup().
+
+        The absolute path to the temporary directory is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the directory;
+                  defaults to "tmp-".
+
+        """
+        self.__ensure_builddir()
+        return tempfile.mkdtemp(dir = self.__builddir, prefix = prefix)
+
+    def _mkstemp(self, prefix = "tmp-"):
+        """Create a temporary file.
+
+        This method may be used by subclasses to create a temporary file
+        for use in building the final image - e.g. a subclass might need
+        a temporary location to unpack a compressed file.
+
+        The subclass may delete this file if it wishes, but it will be
+        automatically deleted by cleanup().
+
+        A tuple containing a file descriptor (returned from os.open() and the
+        absolute path to the temporary directory is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the file;
+                  defaults to "tmp-".
+
+        """
+        self.__ensure_builddir()
+        return tempfile.mkstemp(dir = self.__builddir, prefix = prefix)
+
+    def _mktemp(self, prefix = "tmp-"):
+        """Create a temporary file.
+
+        This method simply calls _mkstemp() and closes the returned file
+        descriptor.
+
+        The absolute path to the temporary file is returned.
+
+        Note, this method should only be called after mount() has been called.
+
+        prefix -- a prefix which should be used when creating the file;
+                  defaults to "tmp-".
+
+        """
+
+        (f, path) = self._mkstemp(prefix)
+        os.close(f)
+        return path
+
+    #
+    # Actual implementation
+    #
+    def __ensure_builddir(self):
+        if not self.__builddir is None:
+            return
+
+        try:
+            self.__builddir = tempfile.mkdtemp(dir = self.tmpdir,
+                                               prefix = "imgcreate-")
+        except OSError, (err, msg):
+            raise CreatorError("Failed create build directory in %s: %s" %
+                               (self.tmpdir, msg))
+
+    def get_cachedir(self, cachedir = None):
+        if self.cachedir:
+            return self.cachedir
+
+        self.__ensure_builddir()
+        if cachedir:
+            self.cachedir = cachedir
+        else:
+            self.cachedir = self.__builddir + "/yum-cache"
+        makedirs(self.cachedir)
+        return self.cachedir
+
+    def __sanity_check(self):
+        """Ensure that the config we've been given is sane."""
+        if not (kickstart.get_packages(self.ks) or
+                kickstart.get_groups(self.ks)):
+            raise CreatorError("No packages or groups specified")
+
+        kickstart.convert_method_to_repo(self.ks)
+
+        if not kickstart.get_repos(self.ks):
+            raise CreatorError("No repositories specified")
+
+    def __write_fstab(self):
+        fstab = open(self._instroot + "/etc/fstab", "w")
+        fstab.write(self._get_fstab())
+        fstab.close()
+
+    def __create_minimal_dev(self):
+        """Create a minimal /dev so that we don't corrupt the host /dev"""
+        origumask = os.umask(0000)
+        devices = (('null',   1, 3, 0666),
+                   ('urandom',1, 9, 0666),
+                   ('random', 1, 8, 0666),
+                   ('full',   1, 7, 0666),
+                   ('ptmx',   5, 2, 0666),
+                   ('tty',    5, 0, 0666),
+                   ('zero',   1, 5, 0666))
+        links = (("/proc/self/fd", "/dev/fd"),
+                 ("/proc/self/fd/0", "/dev/stdin"),
+                 ("/proc/self/fd/1", "/dev/stdout"),
+                 ("/proc/self/fd/2", "/dev/stderr"))
+
+        for (node, major, minor, perm) in devices:
+            if not os.path.exists(self._instroot + "/dev/" + node):
+                os.mknod(self._instroot + "/dev/" + node, perm | stat.S_IFCHR, os.makedev(major,minor))
+        for (src, dest) in links:
+            if not os.path.exists(self._instroot + dest):
+                os.symlink(src, self._instroot + dest)
+        os.umask(origumask)
+
+
+    def mount(self, base_on = None, cachedir = None):
+        """Setup the target filesystem in preparation for an install.
+
+        This function sets up the filesystem which the ImageCreator will
+        install into and configure. The ImageCreator class merely creates an
+        install root directory, bind mounts some system directories (e.g. /dev)
+        and writes out /etc/fstab. Other subclasses may also e.g. create a
+        sparse file, format it and loopback mount it to the install root.
+
+        base_on -- a previous install on which to base this install; defaults
+                   to None, causing a new image to be created
+
+        cachedir -- a directory in which to store the Yum cache; defaults to
+                    None, causing a new cache to be created; by setting this
+                    to another directory, the same cache can be reused across
+                    multiple installs.
+
+        """
+        self.__ensure_builddir()
+
+        makedirs(self._instroot)
+        makedirs(self._outdir)
+
+        self._mount_instroot(base_on)
+
+        for d in ("/dev/pts", "/etc", "/boot", "/var/log", "/var/cache/yum", "/sys", "/proc", "/usr/bin"):
+            makedirs(self._instroot + d)
+
+        if self.target_arch and self.target_arch.startswith("arm"):
+            self.qemu_emulator = setup_qemu_emulator(self._instroot, self.target_arch)
+
+        self.get_cachedir(cachedir)
+
+        # bind mount system directories into _instroot
+        for (f, dest) in [("/sys", None), ("/proc", None), ("/proc/sys/fs/binfmt_misc", None),
+                          ("/dev/pts", None),
+                          (self.get_cachedir(), "/var/cache/yum")]:
+            self.__bindmounts.append(BindChrootMount(f, self._instroot, dest))
+
+
+        self._do_bindmounts()
+
+        self.__create_minimal_dev()
+
+        if os.path.exists(self._instroot + "/etc/mtab"):
+            os.unlink(self._instroot + "/etc/mtab")
+        os.symlink("../proc/mounts", self._instroot + "/etc/mtab")
+
+        self.__write_fstab()
+
+        # get size of available space in 'instroot' fs
+        self._root_fs_avail = get_filesystem_avail(self._instroot)
+
+    def unmount(self):
+        """Unmounts the target filesystem.
+
+        The ImageCreator class detaches the system from the install root, but
+        other subclasses may also detach the loopback mounted filesystem image
+        from the install root.
+
+        """
+        try:
+            os.unlink(self._instroot + "/etc/mtab")
+            if self.qemu_emulator:
+                os.unlink(self._instroot + self.qemu_emulator)
+            """ Clean up yum garbage """
+            instroot_pdir = os.path.dirname(self._instroot + self._instroot)
+            if os.path.exists(instroot_pdir):
+                shutil.rmtree(instroot_pdir, ignore_errors = True)
+        except OSError:
+            pass
+
+
+        self._undo_bindmounts()
+
+        self._unmount_instroot()
+
+    def cleanup(self):
+        """Unmounts the target filesystem and deletes temporary files.
+
+        This method calls unmount() and then deletes any temporary files and
+        directories that were created on the host system while building the
+        image.
+
+        Note, make sure to call this method once finished with the creator
+        instance in order to ensure no stale files are left on the host e.g.:
+
+          creator = ImageCreator(ks, name)
+          try:
+              creator.create()
+          finally:
+              creator.cleanup()
+
+        """
+        if not self.__builddir:
+            return
+
+        self.unmount()
+
+        shutil.rmtree(self.__builddir, ignore_errors = True)
+        self.__builddir = None
+
+    def __is_excluded_pkg(self, pkg):
+        if pkg in self._excluded_pkgs:
+            self._excluded_pkgs.remove(pkg)
+            return True
+
+        for xpkg in self._excluded_pkgs:
+            if xpkg.endswith('*'):
+                if pkg.startswith(xpkg[:-1]):
+                    return True
+            elif xpkg.startswith('*'):
+                if pkg.endswith(xpkg[1:]):
+                    return True
+
+        return None
+
+    def __select_packages(self, pkg_manager):
+        skipped_pkgs = []
+        for pkg in self._required_pkgs:
+            e = pkg_manager.selectPackage(pkg)
+            if e:
+                if kickstart.ignore_missing(self.ks):
+                    skipped_pkgs.append(pkg)
+                elif self.__is_excluded_pkg(pkg):
+                    skipped_pkgs.append(pkg)
+                else:
+                    raise CreatorError("Failed to find package '%s' : %s" %
+                                       (pkg, e))
+
+        for pkg in skipped_pkgs:
+            logging.warn("Skipping missing package '%s'" % (pkg,))
+
+    def __select_groups(self, pkg_manager):
+        skipped_groups = []
+        for group in self._required_groups:
+            e = pkg_manager.selectGroup(group.name, group.include)
+            if e:
+                if kickstart.ignore_missing(self.ks):
+                    skipped_groups.append(group)
+                else:
+                    raise CreatorError("Failed to find group '%s' : %s" %
+                                       (group.name, e))
+
+        for group in skipped_groups:
+            logging.warn("Skipping missing group '%s'" % (group.name,))
+
+    def __deselect_packages(self, pkg_manager):
+        for pkg in self._excluded_pkgs:
+            pkg_manager.deselectPackage(pkg)
+
+    def __localinst_packages(self, pkg_manager):
+        for rpm_path in self._get_local_packages():
+            pkg_manager.installLocal(rpm_path)
+
+    def install(self, repo_urls = {}):
+        """Install packages into the install root.
+
+        This function installs the packages listed in the supplied kickstart
+        into the install root. By default, the packages are installed from the
+        repository URLs specified in the kickstart.
+
+        repo_urls -- a dict which maps a repository name to a repository URL;
+                     if supplied, this causes any repository URLs specified in
+                     the kickstart to be overridden.
+
+        """
+
+
+        # initialize pkg list to install
+        #import pdb
+        #pdb.set_trace()
+        if self.ks:
+            self.__sanity_check()
+
+            self._required_pkgs = \
+                kickstart.get_packages(self.ks, self._get_required_packages())
+            self._excluded_pkgs = \
+                kickstart.get_excluded(self.ks, self._get_excluded_packages())
+            self._required_groups = kickstart.get_groups(self.ks)
+        else:
+            self._required_pkgs = None
+            self._excluded_pkgs = None
+            self._required_groups = None
+
+        yum_conf = self._mktemp(prefix = "yum.conf-")
+
+        keep_record = None
+        if self._include_src:
+            keep_record = 'include_src'
+        if self._recording_pkgs in ('name', 'content'):
+            keep_record = self._recording_pkgs
+
+        pkg_manager = self.get_pkg_manager(keep_record)
+        pkg_manager.setup(yum_conf, self._instroot)
+
+        for repo in kickstart.get_repos(self.ks, repo_urls):
+            (name, baseurl, mirrorlist, inc, exc, proxy, proxy_username, proxy_password, debuginfo, source, gpgkey, disable) = repo
+
+            yr = pkg_manager.addRepository(name, baseurl, mirrorlist, proxy, proxy_username, proxy_password, inc, exc)
+        
+        if kickstart.exclude_docs(self.ks):
+            rpm.addMacro("_excludedocs", "1")
+        rpm.addMacro("__file_context_path", "%{nil}")
+        if kickstart.inst_langs(self.ks) != None:
+            rpm.addMacro("_install_langs", kickstart.inst_langs(self.ks))
+
+        try:
+            try:
+                #import pdb
+                #pdb.set_trace()
+                self.__select_packages(pkg_manager)
+                self.__select_groups(pkg_manager)
+                self.__deselect_packages(pkg_manager)
+                self.__localinst_packages(pkg_manager)
+
+                BOOT_SAFEGUARD = 256L * 1024 * 1024 # 256M
+                checksize = self._root_fs_avail
+                if checksize:
+                    checksize -= BOOT_SAFEGUARD
+                if self.target_arch:
+                    pkg_manager._add_prob_flags(rpm.RPMPROB_FILTER_IGNOREARCH)
+                pkg_manager.runInstall(checksize)
+            except CreatorError, e:
+                raise CreatorError("%s" % (e,))
+        finally:
+            if keep_record:
+                self._pkgs_content = pkg_manager.getAllContent()
+
+            pkg_manager.closeRpmDB()
+            pkg_manager.close()
+            os.unlink(yum_conf)
+
+        # do some clean up to avoid lvm info leakage.  this sucks.
+        for subdir in ("cache", "backup", "archive"):
+            lvmdir = self._instroot + "/etc/lvm/" + subdir
+            try:
+                for f in os.listdir(lvmdir):
+                    os.unlink(lvmdir + "/" + f)
+            except:
+                pass
+
+    def __run_post_scripts(self):
+        print "Running scripts"
+        for s in kickstart.get_post_scripts(self.ks):
+            (fd, path) = tempfile.mkstemp(prefix = "ks-script-",
+                                          dir = self._instroot + "/tmp")
+
+            s.script = s.script.replace("\r", "")
+            os.write(fd, s.script)
+            os.close(fd)
+            os.chmod(path, 0700)
+
+            env = self._get_post_scripts_env(s.inChroot)
+
+            if not s.inChroot:
+                env["INSTALL_ROOT"] = self._instroot
+                env["IMG_NAME"] = self._name
+                preexec = None
+                script = path
+            else:
+                preexec = self._chroot
+                script = "/tmp/" + os.path.basename(path)
+
+            try:
+                try:
+                    subprocess.call([s.interp, script],
+                                    preexec_fn = preexec, env = env, stdout = sys.stdout, stderr = sys.stderr)
+                except OSError, (err, msg):
+                    raise CreatorError("Failed to execute %%post script "
+                                       "with '%s' : %s" % (s.interp, msg))
+            finally:
+                os.unlink(path)
+
+    def __save_repo_keys(self, repodata):
+        if not repodata:
+            return None
+        gpgkeydir = "/etc/pki/rpm-gpg"
+        makedirs(self._instroot + gpgkeydir)
+        for repo in repodata:
+            if repo["repokey"]:
+                repokey = gpgkeydir + "/RPM-GPG-KEY-%s" %  repo["name"]
+                shutil.copy(repo["repokey"], self._instroot + repokey)
+
+    def configure(self, repodata = None):
+        """Configure the system image according to the kickstart.
+
+        This method applies the (e.g. keyboard or network) configuration
+        specified in the kickstart and executes the kickstart %post scripts.
+
+        If neccessary, it also prepares the image to be bootable by e.g.
+        creating an initrd and bootloader configuration.
+
+        """
+        ksh = self.ks.handler
+
+        try:
+            kickstart.LanguageConfig(self._instroot).apply(ksh.lang)
+            kickstart.KeyboardConfig(self._instroot).apply(ksh.keyboard)
+            kickstart.TimezoneConfig(self._instroot).apply(ksh.timezone)
+            #kickstart.AuthConfig(self._instroot).apply(ksh.authconfig)
+            kickstart.FirewallConfig(self._instroot).apply(ksh.firewall)
+            kickstart.RootPasswordConfig(self._instroot).apply(ksh.rootpw)
+            kickstart.UserConfig(self._instroot).apply(ksh.user)
+            kickstart.ServicesConfig(self._instroot).apply(ksh.services)
+            kickstart.XConfig(self._instroot).apply(ksh.xconfig)
+            kickstart.NetworkConfig(self._instroot).apply(ksh.network)
+            kickstart.RPMMacroConfig(self._instroot).apply(self.ks)
+            kickstart.DesktopConfig(self._instroot).apply(ksh.desktop)
+            self.__save_repo_keys(repodata)
+            kickstart.MoblinRepoConfig(self._instroot).apply(ksh.repo, repodata)
+        except:
+            print "Failed to apply configuration to image"
+            raise
+
+        self._create_bootconfig()
+        self.__run_post_scripts()
+
+    def launch_shell(self, launch):
+        """Launch a shell in the install root.
+
+        This method is launches a bash shell chroot()ed in the install root;
+        this can be useful for debugging.
+
+        """
+        if launch:
+            print "Launching shell. Exit to continue."
+            print "----------------------------------"
+            subprocess.call(["/bin/bash"], preexec_fn = self._chroot)
+
+    def do_genchecksum(self, image_name):
+        if not self._genchecksum:
+            return
+
+        """ Generate md5sum if /usr/bin/md5sum is available """
+        if os.path.exists("/usr/bin/md5sum"):
+            p = subprocess.Popen(["/usr/bin/md5sum", "-b", image_name],
+                                 stdout=subprocess.PIPE)
+            (md5sum, errorstr) = p.communicate()
+            if p.returncode != 0:
+                logging.warning("Can't generate md5sum for image %s" % image_name)
+            else:
+                pattern = re.compile("\*.*$")
+                md5sum = pattern.sub("*" + os.path.basename(image_name), md5sum)
+                fd = open(image_name + ".md5sum", "w")
+                fd.write(md5sum)
+                fd.close()
+                self.outimage.append(image_name+".md5sum")
+
+    def package(self, destdir = "."):
+        """Prepares the created image for final delivery.
+
+        In its simplest form, this method merely copies the install root to the
+        supplied destination directory; other subclasses may choose to package
+        the image by e.g. creating a bootable ISO containing the image and
+        bootloader configuration.
+
+        destdir -- the directory into which the final image should be moved;
+                   this defaults to the current directory.
+
+        """
+        self._stage_final_image()
+
+        if self.__img_compression_method:
+            if not self._img_name:
+                raise CreatorError("Image name not set.")
+            rc = None
+            img_location = os.path.join(self._outdir,self._img_name)
+            if self.__img_compression_method == "bz2":
+                bzip2 = find_binary_path('bzip2')
+                print "Compressing %s with bzip2. Please wait..." % img_location
+                rc = subprocess.call([bzip2, "-f", img_location])
+                if rc:
+                    raise CreatorError("Failed to compress image %s with %s." % (img_location, self.__img_compression_method))
+                for bootimg in glob.glob(os.path.dirname(img_location) + "/*-boot.bin"):
+                    print "Compressing %s with bzip2. Please wait..." % bootimg
+                    rc = subprocess.call([bzip2, "-f", bootimg])
+                    if rc:
+                        raise CreatorError("Failed to compress image %s with %s." % (bootimg, self.__img_compression_method))
+
+        if self._recording_pkgs:
+            self._save_recording_pkgs(destdir)
+
+        """ For image formats with two or multiple image files, it will be better to put them under a directory """
+        if self.image_format in ("raw", "vmdk", "vdi", "nand", "mrstnand"):
+            destdir = os.path.join(destdir, "%s-%s" % (self.name, self.image_format))
+            logging.debug("creating destination dir: %s" % destdir)
+            makedirs(destdir)
+
+        # Ensure all data is flushed to _outdir
+        synccmd = find_binary_path("sync")
+        subprocess.call([synccmd])
+
+        for f in os.listdir(self._outdir):
+            shutil.move(os.path.join(self._outdir, f),
+                        os.path.join(destdir, f))
+            self.outimage.append(os.path.join(destdir, f))
+            self.do_genchecksum(os.path.join(destdir, f))
+
+    def create(self):
+        """Install, configure and package an image.
+
+        This method is a utility method which creates and image by calling some
+        of the other methods in the following order - mount(), install(),
+        configure(), unmount and package().
+
+        """
+        self.mount()
+        self.install()
+        self.configure()
+        self.unmount()
+        self.package()
+
+    def print_outimage_info(self):
+        print "Your new image can be found here:"
+        self.outimage.sort()
+        for file in self.outimage:
+            print os.path.abspath(file)
+
+    def check_depend_tools(self):
+        for tool in self._dep_checks:
+            find_binary_path(tool)
+
+    def package_output(self, image_format, destdir = ".", package="none"):
+        if not package or package == "none":
+            return
+
+        destdir = os.path.abspath(os.path.expanduser(destdir))
+        (pkg, comp) = os.path.splitext(package)
+        if comp:
+            comp=comp.lstrip(".")
+
+        if pkg == "tar":
+            if comp:
+                dst = "%s/%s-%s.tar.%s" % (destdir, self.name, image_format, comp)
+            else:
+                dst = "%s/%s-%s.tar" % (destdir, self.name, image_format)
+            print "creating %s" % dst
+            tar = tarfile.open(dst, "w:" + comp)
+
+            for file in self.outimage:
+                print "adding %s to %s" % (file, dst)
+                tar.add(file, arcname=os.path.join("%s-%s" % (self.name, image_format), os.path.basename(file)))
+                if os.path.isdir(file):
+                    shutil.rmtree(file, ignore_errors = True)
+                else:
+                    os.remove(file)
+
+
+            tar.close()
+
+            '''All the file in outimage has been packaged into tar.* file'''
+            self.outimage = [dst]
+
+    def release_output(self, config, destdir, name, release):
+        self.outimage = create_release(config, destdir, name, self.outimage, release)
+
+    def save_kernel(self, destdir):
+        if not os.path.exists(destdir):
+            makedirs(destdir)
+        for kernel in glob.glob("%s/boot/vmlinuz-*" % self._instroot):
+            kernelfilename = "%s/%s-%s" % (destdir, self.name, os.path.basename(kernel))
+            shutil.copy(kernel, kernelfilename)
+            self.outimage.append(kernelfilename)
+
+    def compress_disk_image(self, compression_method):
+        """
+        With this you can set the method that is used to compress the disk
+        image after it is created.
+        """
+
+        if compression_method not in ('bz2'):
+            raise CreatorError("Given disk image compression method ('%s') is not valid." % (compression_method))
+
+        self.__img_compression_method = compression_method
+
+    def set_pkg_manager(self, name):
+        self.pkgmgr.set_default_pkg_manager(name)
+
+    def get_pkg_manager(self, recording_pkgs=None):
+        pkgmgr_instance = self.pkgmgr.get_default_pkg_manager()
+        if not pkgmgr_instance:
+            raise CreatorError("No package manager available")
+        return pkgmgr_instance(creator = self, recording_pkgs = recording_pkgs)
+
+class LoopImageCreator(ImageCreator):
+    """Installs a system into a loopback-mountable filesystem image.
+
+    LoopImageCreator is a straightforward ImageCreator subclass; the system
+    is installed into an ext3 filesystem on a sparse file which can be
+    subsequently loopback-mounted.
+
+    """
+
+    def __init__(self, ks, name, fslabel = None):
+        """Initialize a LoopImageCreator instance.
+
+        This method takes the same arguments as ImageCreator.__init__() with
+        the addition of:
+
+        fslabel -- A string used as a label for any filesystems created.
+
+        """
+        ImageCreator.__init__(self, ks, name)
+
+        self.__fslabel = None
+        self.fslabel = fslabel
+
+        self.__minsize_KB = 0
+        self.__blocksize = 4096
+        if self.ks:
+            self.__fstype = kickstart.get_image_fstype(self.ks, "ext3")
+            self.__fsopts = kickstart.get_image_fsopts(self.ks, "defaults,noatime")
+        else:
+            self.__fstype = None
+            self.__fsopts = None
+
+        self.__instloop = None
+        self.__imgdir = None
+
+        if self.ks:
+            self.__image_size = kickstart.get_image_size(self.ks,
+                                                         4096L * 1024 * 1024)
+        else:
+            self.__image_size = 0
+
+        self._img_name = self.name + ".img"
+
+    def _set_fstype(self, fstype):
+        self.__fstype = fstype
+
+    def _set_image_size(self, imgsize):
+        self.__image_size = imgsize
+
+    #
+    # Properties
+    #
+    def __get_fslabel(self):
+        if self.__fslabel is None:
+            return self.name
+        else:
+            return self.__fslabel
+    def __set_fslabel(self, val):
+        if val is None:
+            self.__fslabel = None
+        else:
+            self.__fslabel = val[:FSLABEL_MAXLEN]
+    fslabel = property(__get_fslabel, __set_fslabel)
+    """A string used to label any filesystems created.
+
+    Some filesystems impose a constraint on the maximum allowed size of the
+    filesystem label. In the case of ext3 it's 16 characters, but in the case
+    of ISO9660 it's 32 characters.
+
+    mke2fs silently truncates the label, but mkisofs aborts if the label is too
+    long. So, for convenience sake, any string assigned to this attribute is
+    silently truncated to FSLABEL_MAXLEN (32) characters.
+
+    """
+
+    def __get_image(self):
+        if self.__imgdir is None:
+            raise CreatorError("_image is not valid before calling mount()")
+        return self.__imgdir + "/meego.img"
+    _image = property(__get_image)
+    """The location of the image file.
+
+    This is the path to the filesystem image. Subclasses may use this path
+    in order to package the image in _stage_final_image().
+
+    Note, this directory does not exist before ImageCreator.mount() is called.
+
+    Note also, this is a read-only attribute.
+
+    """
+
+    def __get_blocksize(self):
+        return self.__blocksize
+    def __set_blocksize(self, val):
+        if self.__instloop:
+            raise CreatorError("_blocksize must be set before calling mount()")
+        try:
+            self.__blocksize = int(val)
+        except ValueError:
+            raise CreatorError("'%s' is not a valid integer value "
+                               "for _blocksize" % val)
+    _blocksize = property(__get_blocksize, __set_blocksize)
+    """The block size used by the image's filesystem.
+
+    This is the block size used when creating the filesystem image. Subclasses
+    may change this if they wish to use something other than a 4k block size.
+
+    Note, this attribute may only be set before calling mount().
+
+    """
+
+    def __get_fstype(self):
+        return self.__fstype
+    def __set_fstype(self, val):
+        if val != "ext2" and val != "ext3":
+            raise CreatorError("Unknown _fstype '%s' supplied" % val)
+        self.__fstype = val
+    _fstype = property(__get_fstype, __set_fstype)
+    """The type of filesystem used for the image.
+
+    This is the filesystem type used when creating the filesystem image.
+    Subclasses may change this if they wish to use something other ext3.
+
+    Note, only ext2 and ext3 are currently supported.
+
+    Note also, this attribute may only be set before calling mount().
+
+    """
+
+    def __get_fsopts(self):
+        return self.__fsopts
+    def __set_fsopts(self, val):
+        self.__fsopts = val
+    _fsopts = property(__get_fsopts, __set_fsopts)
+    """Mount options of filesystem used for the image.
+
+    This can be specified by --fsoptions="xxx,yyy" in part command in
+    kickstart file.
+    """
+
+    #
+    # Helpers for subclasses
+    #
+    def _resparse(self, size = None):
+        """Rebuild the filesystem image to be as sparse as possible.
+
+        This method should be used by subclasses when staging the final image
+        in order to reduce the actual space taken up by the sparse image file
+        to be as little as possible.
+
+        This is done by resizing the filesystem to the minimal size (thereby
+        eliminating any space taken up by deleted files) and then resizing it
+        back to the supplied size.
+
+        size -- the size in, in bytes, which the filesystem image should be
+                resized to after it has been minimized; this defaults to None,
+                causing the original size specified by the kickstart file to
+                be used (or 4GiB if not specified in the kickstart).
+
+        """
+        return self.__instloop.resparse(size)
+
+    def _base_on(self, base_on):
+        shutil.copyfile(base_on, self._image)
+
+    #
+    # Actual implementation
+    #
+    def _mount_instroot(self, base_on = None):
+        self.__imgdir = self._mkdtemp()
+
+        if not base_on is None:
+            self._base_on(base_on)
+
+        if self.__fstype in ("ext2", "ext3", "ext4"):
+            MyDiskMount = ExtDiskMount
+        elif self.__fstype == "btrfs":
+            MyDiskMount = BtrfsDiskMount
+
+        self.__instloop = MyDiskMount(SparseLoopbackDisk(self._image, self.__image_size),
+                                       self._instroot,
+                                       self.__fstype,
+                                       self.__blocksize,
+                                       self.fslabel)
+
+        try:
+            self.__instloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (self._image, e))
+
+    def _unmount_instroot(self):
+        if not self.__instloop is None:
+            self.__instloop.cleanup()
+
+    def _stage_final_image(self):
+        self._resparse()
+        shutil.move(self._image, self._outdir + "/" + self._img_name)
+
+class LiveImageCreatorBase(LoopImageCreator):
+    """A base class for LiveCD image creators.
+
+    This class serves as a base class for the architecture-specific LiveCD
+    image creator subclass, LiveImageCreator.
+
+    LiveImageCreator creates a bootable ISO containing the system image,
+    bootloader, bootloader configuration, kernel and initramfs.
+
+    """
+
+    def __init__(self, *args):
+        """Initialise a LiveImageCreator instance.
+
+        This method takes the same arguments as ImageCreator.__init__().
+
+        """
+        LoopImageCreator.__init__(self, *args)
+
+        self.skip_compression = False
+        """Controls whether to use squashfs to compress the image."""
+
+        self.skip_minimize = False
+        """Controls whether an image minimizing snapshot should be created.
+
+        This snapshot can be used when copying the system image from the ISO in
+        order to minimize the amount of data that needs to be copied; simply,
+        it makes it possible to create a version of the image's filesystem with
+        no spare space.
+
+        """
+
+        self.actasconvertor = False
+        """A flag which indicates i act as a convertor"""
+
+        if self.ks:
+            self._timeout = kickstart.get_timeout(self.ks, 10)
+        else:
+            self._timeout = 10
+        """The bootloader timeout from kickstart."""
+
+        if self.ks:
+            self._default_kernel = kickstart.get_default_kernel(self.ks, "kernel")
+        else:
+            self._default_kernel = None
+        """The default kernel type from kickstart."""
+
+        self.__isodir = None
+
+        self.__modules = ["=ata", "sym53c8xx", "aic7xxx", "=usb", "=firewire", "=mmc", "=pcmcia", "mptsas"]
+        if self.ks:
+            self.__modules.extend(kickstart.get_modules(self.ks))
+
+        self._dep_checks.extend(["isohybrid", "unsquashfs", "mksquashfs", "dd", "genisoimage"])
+
+    #
+    # Hooks for subclasses
+    #
+    def _configure_bootloader(self, isodir):
+        """Create the architecture specific booloader configuration.
+
+        This is the hook where subclasses must create the booloader
+        configuration in order to allow a bootable ISO to be built.
+
+        isodir -- the directory where the contents of the ISO are to be staged
+
+        """
+        raise CreatorError("Bootloader configuration is arch-specific, "
+                           "but not implemented for this arch!")
+    def _get_menu_options(self):
+        """Return a menu options string for syslinux configuration.
+
+        """
+        r = kickstart.get_menu_args(self.ks)
+        return r
+
+    def _get_kernel_options(self):
+        """Return a kernel options string for bootloader configuration.
+
+        This is the hook where subclasses may specify a set of kernel options
+        which should be included in the images bootloader configuration.
+
+        A sensible default implementation is provided.
+
+        """
+        r = kickstart.get_kernel_args(self.ks)
+        if os.path.exists(self._instroot + "/usr/bin/rhgb") or \
+           os.path.exists(self._instroot + "/usr/bin/plymouth"):
+            r += " rhgb"
+        return r
+
+    def _get_mkisofs_options(self, isodir):
+        """Return the architecture specific mkisosfs options.
+
+        This is the hook where subclasses may specify additional arguments to
+        mkisofs, e.g. to enable a bootable ISO to be built.
+
+        By default, an empty list is returned.
+
+        """
+        return []
+
+    #
+    # Helpers for subclasses
+    #
+    def _has_checkisomd5(self):
+        """Check whether checkisomd5 is available in the install root."""
+        def exists(instroot, path):
+            return os.path.exists(instroot + path)
+
+        if (exists(self._instroot, "/usr/lib/moblin-installer-runtime/checkisomd5") or
+            exists(self._instroot, "/usr/bin/checkisomd5")):
+            if (os.path.exists("/usr/bin/implantisomd5") or
+               os.path.exists("/usr/lib/anaconda-runtime/implantisomd5")):
+                return True
+
+        return False
+
+    def _uncompress_squashfs(self, squashfsimg, outdir):
+        """Uncompress file system from squshfs image"""
+        unsquashfs = find_binary_path("unsquashfs")
+        args = [unsquashfs, "-d", outdir, squashfsimg ]
+        rc = subprocess.call(args)
+        if (rc != 0):
+            raise CreatorError("Failed to uncompress %s." % squashfsimg)
+    #
+    # Actual implementation
+    #
+    def _base_on(self, base_on):
+        """Support Image Convertor"""
+        if self.actasconvertor:
+            if os.path.exists(base_on) and not os.path.isfile(base_on):
+                ddcmd = find_binary_path("dd")
+                args = [ ddcmd, "if=%s" % base_on, "of=%s" % self._image ]
+                print "dd %s -> %s" % (base_on, self._image)
+                rc = subprocess.call(args)
+                if rc != 0:
+                    raise CreatorError("Failed to dd from %s to %s" % (base_on, self._image))
+                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+            if os.path.isfile(base_on):
+                print "Copying file system..."
+                shutil.copyfile(base_on, self._image)
+                self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+            return
+
+        """helper function to extract ext3 file system from a live CD ISO"""
+        isoloop = DiskMount(LoopbackDisk(base_on, 0), self._mkdtemp())
+
+        try:
+            isoloop.mount()
+        except MountError, e:
+            raise CreatorError("Failed to loopback mount '%s' : %s" %
+                               (base_on, e))
+
+        # legacy LiveOS filesystem layout support, remove for F9 or F10
+        if os.path.exists(isoloop.mountdir + "/squashfs.img"):
+            squashimg = isoloop.mountdir + "/squashfs.img"
+        else:
+            squashimg = isoloop.mountdir + "/LiveOS/squashfs.img"
+
+        tmpoutdir = self._mkdtemp()
+        # unsquashfs requires outdir mustn't exist
+        shutil.rmtree(tmpoutdir, ignore_errors = True)
+        self._uncompress_squashfs(squashimg, tmpoutdir)
+
+        try:
+            # legacy LiveOS filesystem layout support, remove for F9 or F10
+            if os.path.exists(tmpoutdir + "/os.img"):
+                os_image = tmpoutdir + "/os.img"
+            else:
+                os_image = tmpoutdir + "/LiveOS/ext3fs.img"
+
+            if not os.path.exists(os_image):
+                raise CreatorError("'%s' is not a valid live CD ISO : neither "
+                                   "LiveOS/ext3fs.img nor os.img exist" %
+                                   base_on)
+
+            print "Copying file system..."
+            shutil.copyfile(os_image, self._image)
+            self._set_image_size(get_file_size(self._image) * 1024L * 1024L)
+        finally:
+            shutil.rmtree(tmpoutdir, ignore_errors = True)
+            isoloop.cleanup()
+
+    def _mount_instroot(self, base_on = None):
+        LoopImageCreator._mount_instroot(self, base_on)
+        self.__write_initrd_conf(self._instroot + "/etc/sysconfig/mkinitrd")
+
+    def _unmount_instroot(self):
+        try:
+            os.unlink(self._instroot + "/etc/sysconfig/mkinitrd")
+        except:
+            pass
+        LoopImageCreator._unmount_instroot(self)
+
+    def __ensure_isodir(self):
+        if self.__isodir is None:
+            self.__isodir = self._mkdtemp("iso-")
+        return self.__isodir
+
+    def _get_isodir(self):
+        return self.__ensure_isodir()
+
+    def _set_isodir(self, isodir = None):
+        self.__isodir = isodir
+
+    def _create_bootconfig(self):
+        """Configure the image so that it's bootable."""
+        self._configure_bootloader(self.__ensure_isodir())
+
+    def _get_post_scripts_env(self, in_chroot):
+        env = LoopImageCreator._get_post_scripts_env(self, in_chroot)
+
+        if not in_chroot:
+            env["LIVE_ROOT"] = self.__ensure_isodir()
+
+        return env
+
+    def __write_initrd_conf(self, path):
+        content = ""
+        if not os.path.exists(os.path.dirname(path)):
+            makedirs(os.path.dirname(path))
+        f = open(path, "w")
+
+        content += 'LIVEOS="yes"\n'
+        content += 'PROBE="no"\n'
+        content += 'MODULES+="squashfs ext3 ext2 vfat msdos "\n'
+        content += 'MODULES+="sr_mod sd_mod ide-cd cdrom "\n'
+
+        for module in self.__modules:
+            if module == "=usb":
+                content += 'MODULES+="ehci_hcd uhci_hcd ohci_hcd "\n'
+                content += 'MODULES+="usb_storage usbhid "\n'
+            elif module == "=firewire":
+                content += 'MODULES+="firewire-sbp2 firewire-ohci "\n'
+                content += 'MODULES+="sbp2 ohci1394 ieee1394 "\n'
+            elif module == "=mmc":
+                content += 'MODULES+="mmc_block sdhci sdhci-pci "\n'
+            elif module == "=pcmcia":
+                content += 'MODULES+="pata_pcmcia  "\n'
+            else:
+                content += 'MODULES+="' + module + ' "\n'
+        f.write(content)
+        f.close()
+
+    def __create_iso(self, isodir):
+        iso = self._outdir + "/" + self.name + ".iso"
+        genisoimage = find_binary_path("genisoimage")
+        args = [genisoimage,
+                "-J", "-r",
+                "-hide-rr-moved", "-hide-joliet-trans-tbl",
+                "-V", self.fslabel,
+                "-o", iso]
+
+        args.extend(self._get_mkisofs_options(isodir))
+
+        args.append(isodir)
+
+        if subprocess.call(args) != 0:
+            raise CreatorError("ISO creation failed!")
+
+        """ It should be ok still even if you haven't isohybrid """
+        isohybrid = None
+        try:
+            isohybrid = find_binary_path("isohybrid")
+        except:
+            pass
+
+        if isohybrid:
+            args = [isohybrid, "-partok", iso ]
+            if subprocess.call(args) != 0:
+               raise CreatorError("Hybrid ISO creation failed!")
+
+        self.__implant_md5sum(iso)
+
+    def __implant_md5sum(self, iso):
+        """Implant an isomd5sum."""
+        if os.path.exists("/usr/bin/implantisomd5"):
+            implantisomd5 = "/usr/bin/implantisomd5"
+        elif os.path.exists("/usr/lib/anaconda-runtime/implantisomd5"):
+            implantisomd5 = "/usr/lib/anaconda-runtime/implantisomd5"
+        else:
+            logging.warn("isomd5sum not installed; not setting up mediacheck")
+            implantisomd5 = ""
+            return
+
+        subprocess.call([implantisomd5, iso], stdout=sys.stdout, stderr=sys.stderr)
+
+    def _stage_final_image(self):
+        try:
+            makedirs(self.__ensure_isodir() + "/LiveOS")
+
+            minimal_size = self._resparse()
+
+            if not self.skip_minimize:
+                create_image_minimizer(self.__isodir + "/LiveOS/osmin.img",
+                                       self._image, minimal_size)
+
+            if self.skip_compression:
+                shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img")
+            else:
+                makedirs(os.path.join(os.path.dirname(self._image), "LiveOS"))
+                shutil.move(self._image,
+                            os.path.join(os.path.dirname(self._image),
+                                         "LiveOS", "ext3fs.img"))
+                mksquashfs(os.path.dirname(self._image),
+                           self.__isodir + "/LiveOS/squashfs.img")
+
+            self.__create_iso(self.__isodir)
+        finally:
+            shutil.rmtree(self.__isodir, ignore_errors = True)
+            self.__isodir = None
+
diff --git a/micng/imager/__init__.py b/micng/imager/__init__.py
new file mode 100644 (file)
index 0000000..9f9e0e3
--- /dev/null
@@ -0,0 +1,2 @@
+import BaseImageCreator
+import livecd
diff --git a/micng/imager/fs.py b/micng/imager/fs.py
new file mode 100644 (file)
index 0000000..b46c594
--- /dev/null
@@ -0,0 +1,76 @@
+#
+# creator.py : ImageCreator and LoopImageCreator base classes
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import stat
+import sys
+import tempfile
+import shutil
+import logging
+import subprocess
+import re
+import tarfile
+import glob
+
+import rpm
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.utils import kickstart
+from micng.utils import pkgmanagers
+from micng.utils.rpmmisc import *
+from micng.utils.misc import *
+from BaseImageCreator import ImageCreator
+
+
+class FsImageCreator(ImageCreator):
+    def __init__(self, ks, name):
+        """Initialize a LoopImageCreator instance.
+
+        This method takes the same arguments as ImageCreator.__init__()
+        """
+        ImageCreator.__init__(self, ks, name)
+
+        self._fstype = None
+        self._fsopts = None
+
+    def _stage_final_image(self):
+        """ nothing to do """
+        pass
+
+    def package(self, destdir = "."):
+        self._stage_final_image()
+
+        destdir = os.path.abspath(os.path.expanduser(destdir))
+        if self._recording_pkgs:
+            self._save_recording_pkgs(destdir)
+
+        print "Copying %s to %s, please be patient to wait (it is slow if they are on different file systems/partitons/disks)" \
+               % (self._instroot, destdir + "/" + self.name)
+
+        copycmd = find_binary_path("cp")
+        args = [ copycmd, "-af", self._instroot, destdir + "/" + self.name ]
+        subprocess.call(args)
+
+        ignores = ["/dev/fd", "/dev/stdin", "/dev/stdout", "/dev/stderr", "/etc/mtab"]
+        for exclude in ignores:
+            if os.path.exists(destdir + "/" + self.name + exclude):
+                os.unlink(destdir + "/" + self.name + exclude)
+
+        self.outimage.append(destdir + "/" + self.name)
diff --git a/micng/imager/livecd.py b/micng/imager/livecd.py
new file mode 100644 (file)
index 0000000..7fb754d
--- /dev/null
@@ -0,0 +1,407 @@
+#
+#live.py : LiveImageCreator class for creating Live CD images
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import glob
+import shutil
+import subprocess
+import logging
+import re
+import time
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.utils.rpmmisc import *
+from BaseImageCreator import LiveImageCreatorBase
+
+class LivecdImageCreator(LiveImageCreatorBase):
+    """ImageCreator for x86 machines"""
+    def _get_mkisofs_options(self, isodir):
+        return [ "-b", "isolinux/isolinux.bin",
+                 "-c", "isolinux/boot.cat",
+                 "-no-emul-boot", "-boot-info-table",
+                 "-boot-load-size", "4" ]
+
+    def _get_required_packages(self):
+        return ["syslinux", "syslinux-extlinux", "moblin-live"] + LiveImageCreatorBase._get_required_packages(self)
+
+    def _get_isolinux_stanzas(self, isodir):
+        return ""
+
+    def __find_syslinux_menu(self):
+        for menu in ["vesamenu.c32", "menu.c32"]:
+            if os.path.isfile(self._instroot + "/usr/share/syslinux/" + menu):
+                return menu
+
+        raise CreatorError("syslinux not installed : "
+                           "no suitable /usr/share/syslinux/*menu.c32 found")
+
+    def __find_syslinux_mboot(self):
+        #
+        # We only need the mboot module if we have any xen hypervisors
+        #
+        if not glob.glob(self._instroot + "/boot/xen.gz*"):
+            return None
+
+        return "mboot.c32"
+
+    def __copy_syslinux_files(self, isodir, menu, mboot = None):
+        files = ["isolinux.bin", menu]
+        if mboot:
+            files += [mboot]
+
+        for f in files:
+            path = self._instroot + "/usr/share/syslinux/" + f
+
+            if not os.path.isfile(path):
+                raise CreatorError("syslinux not installed : "
+                                   "%s not found" % path)
+
+            shutil.copy(path, isodir + "/isolinux/")
+
+    def __copy_syslinux_background(self, isodest):
+        background_path = self._instroot + \
+                          "/usr/lib/anaconda-runtime/syslinux-vesa-splash.jpg"
+
+        if not os.path.exists(background_path):
+            return False
+
+        shutil.copyfile(background_path, isodest)
+
+        return True
+
+    def __copy_kernel_and_initramfs(self, isodir, version, index):
+        bootdir = self._instroot + "/boot"
+
+        if self._alt_initrd_name:
+            src_initrd_path = os.path.join(bootdir, self._alt_initrd_name)
+        else:
+            src_initrd_path = os.path.join(bootdir, "initrd-" + version + ".img")
+
+        try:
+            shutil.copyfile(bootdir + "/vmlinuz-" + version,
+                            isodir + "/isolinux/vmlinuz" + index)
+            shutil.copyfile(src_initrd_path,
+                            isodir + "/isolinux/initrd" + index + ".img")
+        except:
+            raise CreatorError("Unable to copy valid kernels or initrds, please check the repo")
+
+        is_xen = False
+        if os.path.exists(bootdir + "/xen.gz-" + version[:-3]):
+            shutil.copyfile(bootdir + "/xen.gz-" + version[:-3],
+                            isodir + "/isolinux/xen" + index + ".gz")
+            is_xen = True
+
+        return is_xen
+
+    def __is_default_kernel(self, kernel, kernels):
+        if len(kernels) == 1:
+            return True
+
+        if kernel == self._default_kernel:
+            return True
+
+        if kernel.startswith("kernel-") and kernel[7:] == self._default_kernel:
+            return True
+
+        return False
+
+    def __get_basic_syslinux_config(self, **args):
+        return """
+default %(menu)s
+timeout %(timeout)d
+
+%(background)s
+menu title Welcome to %(distroname)s!
+menu color border 0 #ffffffff #00000000
+menu color sel 7 #ffffffff #ff000000
+menu color title 0 #ffffffff #00000000
+menu color tabmsg 0 #ffffffff #00000000
+menu color unsel 0 #ffffffff #00000000
+menu color hotsel 0 #ff000000 #ffffffff
+menu color hotkey 7 #ffffffff #ff000000
+menu color timeout_msg 0 #ffffffff #00000000
+menu color timeout 0 #ffffffff #00000000
+menu color cmdline 0 #ffffffff #00000000
+""" % args
+
+    def __get_image_stanza(self, is_xen, **args):
+        if not is_xen:
+            template = """label %(short)s
+  menu label %(long)s
+  kernel vmlinuz%(index)s
+  append initrd=initrd%(index)s.img root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s
+"""
+        else:
+            template = """label %(short)s
+  menu label %(long)s
+  kernel mboot.c32
+  append xen%(index)s.gz --- vmlinuz%(index)s root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s --- initrd%(index)s.img
+"""
+        return template % args
+
+    def __get_image_stanzas(self, isodir):
+        versions = []
+        kernels = self._get_kernel_versions()
+        for kernel in kernels:
+            for version in kernels[kernel]:
+                versions.append(version)
+
+        if not versions:
+            raise CreatorError("Unable to find valid kernels, please check the repo")
+
+        kernel_options = self._get_kernel_options()
+        menu_options = self._get_menu_options()
+
+
+        cfg = ""
+
+        default_version = None
+        default_index = None
+        index = "0"
+        for version in versions:
+            is_xen = self.__copy_kernel_and_initramfs(isodir, version, index)
+
+            default = self.__is_default_kernel(kernel, kernels)
+            liveinst = False
+            autoliveinst = False
+            netinst = False
+            checkisomd5 = False
+            basicinst = False
+            
+            if menu_options.find("bootinstall") >= 0:
+                liveinst = True
+            
+            if menu_options.find("autoinst") >= 0:
+                autoliveinst = True
+                
+            if menu_options.find("verify") >= 0 and self._has_checkisomd5():
+                checkisomd5 = True 
+                               
+            if menu_options.find("netinst") >= 0:
+                netinst = True 
+                
+            if default:
+                long = "Boot %s" % self.distro_name
+            elif kernel.startswith("kernel-"):
+                long = "Boot %s(%s)" % (self.name, kernel[7:])
+            else:
+                long = "Boot %s(%s)" % (self.name, kernel)
+
+            cfg += self.__get_image_stanza(is_xen,
+                                           fslabel = self.fslabel,
+                                           liveargs = kernel_options,
+                                           long = long,
+                                           short = "linux" + index,
+                                           extra = "",
+                                           index = index)
+
+            if default:
+                cfg += "menu default\n"
+                default_version = version
+                default_index = index
+            if basicinst:
+                cfg += self.__get_image_stanza(is_xen,
+                                               fslabel = self.fslabel,
+                                               liveargs = kernel_options,
+                                               long = "Installation Only (Text based)",
+                                               short = "basic" + index,
+                                               extra = "basic nosplash 4",
+                                               index = index)
+                
+            if liveinst:
+                cfg += self.__get_image_stanza(is_xen,
+                                               fslabel = self.fslabel,
+                                               liveargs = kernel_options,
+                                               long = "Installation Only",
+                                               short = "liveinst" + index,
+                                               extra = "liveinst nosplash 4",
+                                               index = index)
+            if autoliveinst:
+                cfg += self.__get_image_stanza(is_xen,
+                                               fslabel = self.fslabel,
+                                               liveargs = kernel_options,
+                                               long = "Autoinstall (Deletes all existing content)",
+                                               short = "autoinst" + index,
+                                               extra = "autoinst nosplash 4",
+                                               index = index)
+
+            if checkisomd5:
+                cfg += self.__get_image_stanza(is_xen,
+                                               fslabel = self.fslabel,
+                                               liveargs = kernel_options,
+                                               long = "Verify and " + long,
+                                               short = "check" + index,
+                                               extra = "check",
+                                               index = index)
+
+            index = str(int(index) + 1)
+
+        if not default_version:
+            default_version = versions[0]
+        if not default_index:
+            default_index = "0"
+
+        
+        if netinst:
+            cfg += self.__get_image_stanza(is_xen,
+                                           fslabel = self.fslabel,
+                                           liveargs = kernel_options,
+                                           long = "Network Installation",
+                                           short = "netinst",
+                                           extra = "netinst 4",
+                                           index = default_index)
+
+        return cfg
+
+    def __get_memtest_stanza(self, isodir):
+        memtest = glob.glob(self._instroot + "/boot/memtest86*")
+        if not memtest:
+            return ""
+
+        shutil.copyfile(memtest[0], isodir + "/isolinux/memtest")
+
+        return """label memtest
+  menu label Memory Test
+  kernel memtest
+"""
+
+    def __get_local_stanza(self, isodir):
+        return """label local
+  menu label Boot from local drive
+  localboot 0xffff
+"""
+
+    def _configure_syslinux_bootloader(self, isodir):
+        """configure the boot loader"""
+        makedirs(isodir + "/isolinux")
+
+        menu = self.__find_syslinux_menu()
+
+        self.__copy_syslinux_files(isodir, menu,
+                                   self.__find_syslinux_mboot())
+
+        background = ""
+        if self.__copy_syslinux_background(isodir + "/isolinux/splash.jpg"):
+            background = "menu background splash.jpg"
+
+        cfg = self.__get_basic_syslinux_config(menu = menu,
+                                               background = background,
+                                               name = self.name,
+                                               timeout = self._timeout * 10,
+                                               distroname = self.distro_name)
+
+        cfg += self.__get_image_stanzas(isodir)
+        cfg += self.__get_memtest_stanza(isodir)
+        cfg += self.__get_local_stanza(isodir)
+        cfg += self._get_isolinux_stanzas(isodir)
+
+        cfgf = open(isodir + "/isolinux/isolinux.cfg", "w")
+        cfgf.write(cfg)
+        cfgf.close()
+
+    def __copy_efi_files(self, isodir):
+        if not os.path.exists(self._instroot + "/boot/efi/EFI/redhat/grub.efi"):
+            return False
+        shutil.copy(self._instroot + "/boot/efi/EFI/redhat/grub.efi",
+                    isodir + "/EFI/boot/grub.efi")
+        shutil.copy(self._instroot + "/boot/grub/splash.xpm.gz",
+                    isodir + "/EFI/boot/splash.xpm.gz")
+
+        return True
+
+    def __get_basic_efi_config(self, **args):
+        return """
+default=0
+splashimage=/EFI/boot/splash.xpm.gz
+timeout %(timeout)d
+hiddenmenu
+
+""" %args
+
+    def __get_efi_image_stanza(self, **args):
+        return """title %(long)s
+  kernel /EFI/boot/vmlinuz%(index)s root=CDLABEL=%(fslabel)s rootfstype=iso9660 %(liveargs)s %(extra)s
+  initrd /EFI/boot/initrd%(index)s.img
+""" %args
+
+    def __get_efi_image_stanzas(self, isodir, name):
+        # FIXME: this only supports one kernel right now...
+
+        kernel_options = self._get_kernel_options()
+        checkisomd5 = self._has_checkisomd5()
+
+        cfg = ""
+
+        for index in range(0, 9):
+            # we don't support xen kernels
+            if os.path.exists("%s/EFI/boot/xen%d.gz" %(isodir, index)):
+                continue
+            cfg += self.__get_efi_image_stanza(fslabel = self.fslabel,
+                                               liveargs = kernel_options,
+                                               long = name,
+                                               extra = "", index = index)
+            if checkisomd5:
+                cfg += self.__get_efi_image_stanza(fslabel = self.fslabel,
+                                                   liveargs = kernel_options,
+                                                   long = "Verify and Boot " + name,
+                                                   extra = "check",
+                                                   index = index)
+            break
+
+        return cfg
+
+    def _configure_efi_bootloader(self, isodir):
+        """Set up the configuration for an EFI bootloader"""
+        makedirs(isodir + "/EFI/boot")
+
+        if not self.__copy_efi_files(isodir):
+            shutil.rmtree(isodir + "/EFI")
+            return
+
+        for f in os.listdir(isodir + "/isolinux"):
+            os.link("%s/isolinux/%s" %(isodir, f),
+                    "%s/EFI/boot/%s" %(isodir, f))
+
+
+        cfg = self.__get_basic_efi_config(name = self.name,
+                                          timeout = self._timeout)
+        cfg += self.__get_efi_image_stanzas(isodir, self.name)
+
+        cfgf = open(isodir + "/EFI/boot/grub.conf", "w")
+        cfgf.write(cfg)
+        cfgf.close()
+
+        # first gen mactel machines get the bootloader name wrong apparently
+        if getBaseArch() == "i386":
+            os.link(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot.efi")
+            os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot.conf")
+
+        # for most things, we want them named boot$efiarch
+        efiarch = {"i386": "ia32", "x86_64": "x64"}
+        efiname = efiarch[getBaseArch()]
+        os.rename(isodir + "/EFI/boot/grub.efi", isodir + "/EFI/boot/boot%s.efi" %(efiname,))
+        os.link(isodir + "/EFI/boot/grub.conf", isodir + "/EFI/boot/boot%s.conf" %(efiname,))
+
+
+    def _configure_bootloader(self, isodir):
+        self._configure_syslinux_bootloader(isodir)
+        self._configure_efi_bootloader(isodir)
+
diff --git a/micng/pluginbase/__init__.py b/micng/pluginbase/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/pluginbase/backend_plugin.py b/micng/pluginbase/backend_plugin.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/pluginbase/base_plugin.py b/micng/pluginbase/base_plugin.py
new file mode 100644 (file)
index 0000000..36bd3b3
--- /dev/null
@@ -0,0 +1,5 @@
+#!/usr/bin/python
+class PluginBase(object):
+    plugin_type = None
+    def __init__(self):
+        pass
diff --git a/micng/pluginbase/hook_plugin.py b/micng/pluginbase/hook_plugin.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/pluginbase/imager_plugin.py b/micng/pluginbase/imager_plugin.py
new file mode 100644 (file)
index 0000000..f587d71
--- /dev/null
@@ -0,0 +1,90 @@
+#!/usr/bin/python
+from micng.pluginbase.base_plugin import PluginBase
+import micng.configmgr as configmgr
+
+class ImagerPlugin(PluginBase):
+    plugin_type = "imager"
+    def __init__(self, configinfo=None):
+        if not configinfo:
+            self.configinfo = configmgr.getConfigInfo()
+            return 
+        self.configinfo = configinfo
+
+    def do_mount_instroot(self):
+        """Mount or prepare the install root directory.
+
+        This is the interface where plugin may prepare the install root by e.g.
+        mounting creating and loopback mounting a filesystem image to
+        _instroot.
+        """
+        pass
+
+    def do_umount_instroot(self):
+        """Undo anything performed in do_mount_instroot().
+
+        This is the interface where plugin must undo anything which was done
+        in do_mount_instroot(). For example, if a filesystem image was mounted
+        onto _instroot, it should be unmounted here.
+        """
+        pass
+
+    def do_mount(self):
+        """Setup the target filesystem in preparation for an install.
+
+        This interface should setup the filesystem which other functions will
+        install into and configure.
+        """
+        pass
+
+    def do_umount(self):
+        """Unmounts the target filesystem.
+
+        It should detache the system from the install root.
+        """
+        pass
+
+    def do_cleanup(self):
+        """Unmounts the target filesystem and deletes temporary files.
+
+        This interface deletes any temporary files and directories that were created
+        on the host system while building the image.
+        """
+        pass
+
+    def do_install(self):
+        """Install packages into the install root.
+
+        This interface installs the packages listed in the supplied kickstart
+        into the install root. By default, the packages are installed from the
+        repository URLs specified in the kickstart.
+        """
+        pass
+
+    def do_configure(self):
+        """Configure the system image according to the kickstart.
+
+        This interface applies the (e.g. keyboard or network) configuration
+        specified in the kickstart and executes the kickstart %post scripts.
+
+        If neccessary, it also prepares the image to be bootable by e.g.
+        creating an initrd and bootloader configuration.
+        """
+        pass
+
+    def do_package(self, destdir):
+        """Prepares the created image for final delivery.
+
+        This interface merely copies the install root to the supplied destination
+        directory,
+        """
+        pass
+
+    def do_create(self, args):
+        """ Temporary solution to create image in one single interface """
+        pass
+
+    def pack(self):
+        pass
+
+    def unpack(self):
+        pass
diff --git a/micng/pluginmgr.py b/micng/pluginmgr.py
new file mode 100644 (file)
index 0000000..cecc0ed
--- /dev/null
@@ -0,0 +1,33 @@
+#!/usr/bin/python
+import os
+import sys
+import micng.pluginbase.base_plugin as bp
+
+class PluginMgr(object):
+    def __init__(self, dirlist = []):
+        self.plugin_place = ["/usr/lib/micng/plugins"] + dirlist
+        self.plugins = {}
+    
+    def loadPlugins(self):
+        for pdir in map(os.path.abspath, self.plugin_place):
+            for pitem in os.walk(pdir):
+                sys.path.append(pitem[0])
+                for pf in pitem[2]:
+                    if not pf.endswith(".py"):
+                        continue
+
+                    pmod =  __import__(os.path.splitext(pf)[0])
+                    if hasattr(pmod, "mic_plugin"):
+                        pname, pcls = pmod.mic_plugin
+                        ptmp = (pname, pcls)
+                        if hasattr(pcls, "plugin_type"):
+                            if pcls.plugin_type not in self.plugins.keys():
+                                self.plugins[pcls.plugin_type] = [ptmp]
+                            else:
+                                self.plugins[pcls.plugin_type].append(ptmp)
+                                     
+    def getPluginByCateg(self, categ = None):
+        if categ is None:
+            return self.plugins
+        else:
+            return self.plugins[categ]                            
diff --git a/micng/utils/__init__.py b/micng/utils/__init__.py
new file mode 100644 (file)
index 0000000..49a7889
--- /dev/null
@@ -0,0 +1,6 @@
+import misc
+import cmdln
+import kickstart
+import errors
+import fs_related
+import argparse
diff --git a/micng/utils/argparse.py b/micng/utils/argparse.py
new file mode 100644 (file)
index 0000000..a69d294
--- /dev/null
@@ -0,0 +1,2271 @@
+# -*- coding: utf-8 -*-\r
+\r
+# Copyright Â© 2006-2009 Steven J. Bethard <steven.bethard@gmail.com>.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not\r
+# use this file except in compliance with the License. You may obtain a copy\r
+# of the License at\r
+#\r
+#     http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT\r
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\r
+# License for the specific language governing permissions and limitations\r
+# under the License.\r
+\r
+"""Command-line parsing library\r
+\r
+This module is an optparse-inspired command-line parsing library that:\r
+\r
+    - handles both optional and positional arguments\r
+    - produces highly informative usage messages\r
+    - supports parsers that dispatch to sub-parsers\r
+\r
+The following is a simple usage example that sums integers from the\r
+command-line and writes the result to a file::\r
+\r
+    parser = argparse.ArgumentParser(\r
+        description='sum the integers at the command line')\r
+    parser.add_argument(\r
+        'integers', metavar='int', nargs='+', type=int,\r
+        help='an integer to be summed')\r
+    parser.add_argument(\r
+        '--log', default=sys.stdout, type=argparse.FileType('w'),\r
+        help='the file where the sum should be written')\r
+    args = parser.parse_args()\r
+    args.log.write('%s' % sum(args.integers))\r
+    args.log.close()\r
+\r
+The module contains the following public classes:\r
+\r
+    - ArgumentParser -- The main entry point for command-line parsing. As the\r
+        example above shows, the add_argument() method is used to populate\r
+        the parser with actions for optional and positional arguments. Then\r
+        the parse_args() method is invoked to convert the args at the\r
+        command-line into an object with attributes.\r
+\r
+    - ArgumentError -- The exception raised by ArgumentParser objects when\r
+        there are errors with the parser's actions. Errors raised while\r
+        parsing the command-line are caught by ArgumentParser and emitted\r
+        as command-line messages.\r
+\r
+    - FileType -- A factory for defining types of files to be created. As the\r
+        example above shows, instances of FileType are typically passed as\r
+        the type= argument of add_argument() calls.\r
+\r
+    - Action -- The base class for parser actions. Typically actions are\r
+        selected by passing strings like 'store_true' or 'append_const' to\r
+        the action= argument of add_argument(). However, for greater\r
+        customization of ArgumentParser actions, subclasses of Action may\r
+        be defined and passed as the action= argument.\r
+\r
+    - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,\r
+        ArgumentDefaultsHelpFormatter -- Formatter classes which\r
+        may be passed as the formatter_class= argument to the\r
+        ArgumentParser constructor. HelpFormatter is the default,\r
+        RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser\r
+        not to change the formatting for help text, and\r
+        ArgumentDefaultsHelpFormatter adds information about argument defaults\r
+        to the help.\r
+\r
+All other classes in this module are considered implementation details.\r
+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only\r
+considered public as object names -- the API of the formatter objects is\r
+still considered an implementation detail.)\r
+"""\r
+\r
+__version__ = '1.0.1'\r
+__all__ = [\r
+    'ArgumentParser',\r
+    'ArgumentError',\r
+    'Namespace',\r
+    'Action',\r
+    'FileType',\r
+    'HelpFormatter',\r
+    'RawDescriptionHelpFormatter',\r
+    'RawTextHelpFormatter'\r
+    'ArgumentDefaultsHelpFormatter',\r
+]\r
+\r
+\r
+import copy as _copy\r
+import os as _os\r
+import re as _re\r
+import sys as _sys\r
+import textwrap as _textwrap\r
+\r
+from gettext import gettext as _\r
+\r
+try:\r
+    _set = set\r
+except NameError:\r
+    from sets import Set as _set\r
+\r
+try:\r
+    _basestring = basestring\r
+except NameError:\r
+    _basestring = str\r
+\r
+try:\r
+    _sorted = sorted\r
+except NameError:\r
+\r
+    def _sorted(iterable, reverse=False):\r
+        result = list(iterable)\r
+        result.sort()\r
+        if reverse:\r
+            result.reverse()\r
+        return result\r
+\r
+# silence Python 2.6 buggy warnings about Exception.message\r
+if _sys.version_info[:2] == (2, 6):\r
+    import warnings\r
+    warnings.filterwarnings(\r
+        action='ignore',\r
+        message='BaseException.message has been deprecated as of Python 2.6',\r
+        category=DeprecationWarning,\r
+        module='argparse')\r
+\r
+\r
+SUPPRESS = '==SUPPRESS=='\r
+\r
+OPTIONAL = '?'\r
+ZERO_OR_MORE = '*'\r
+ONE_OR_MORE = '+'\r
+PARSER = '==PARSER=='\r
+\r
+# =============================\r
+# Utility functions and classes\r
+# =============================\r
+\r
+class _AttributeHolder(object):\r
+    """Abstract base class that provides __repr__.\r
+\r
+    The __repr__ method returns a string in the format::\r
+        ClassName(attr=name, attr=name, ...)\r
+    The attributes are determined either by a class-level attribute,\r
+    '_kwarg_names', or by inspecting the instance __dict__.\r
+    """\r
+\r
+    def __repr__(self):\r
+        type_name = type(self).__name__\r
+        arg_strings = []\r
+        for arg in self._get_args():\r
+            arg_strings.append(repr(arg))\r
+        for name, value in self._get_kwargs():\r
+            arg_strings.append('%s=%r' % (name, value))\r
+        return '%s(%s)' % (type_name, ', '.join(arg_strings))\r
+\r
+    def _get_kwargs(self):\r
+        return _sorted(self.__dict__.items())\r
+\r
+    def _get_args(self):\r
+        return []\r
+\r
+\r
+def _ensure_value(namespace, name, value):\r
+    if getattr(namespace, name, None) is None:\r
+        setattr(namespace, name, value)\r
+    return getattr(namespace, name)\r
+\r
+\r
+# ===============\r
+# Formatting Help\r
+# ===============\r
+\r
+class HelpFormatter(object):\r
+    """Formatter for generating usage messages and argument help strings.\r
+\r
+    Only the name of this class is considered a public API. All the methods\r
+    provided by the class are considered an implementation detail.\r
+    """\r
+\r
+    def __init__(self,\r
+                 prog,\r
+                 indent_increment=2,\r
+                 max_help_position=24,\r
+                 width=None):\r
+\r
+        # default setting for width\r
+        if width is None:\r
+            try:\r
+                width = int(_os.environ['COLUMNS'])\r
+            except (KeyError, ValueError):\r
+                width = 80\r
+            width -= 2\r
+\r
+        self._prog = prog\r
+        self._indent_increment = indent_increment\r
+        self._max_help_position = max_help_position\r
+        self._width = width\r
+\r
+        self._current_indent = 0\r
+        self._level = 0\r
+        self._action_max_length = 0\r
+\r
+        self._root_section = self._Section(self, None)\r
+        self._current_section = self._root_section\r
+\r
+        self._whitespace_matcher = _re.compile(r'\s+')\r
+        self._long_break_matcher = _re.compile(r'\n\n\n+')\r
+\r
+    # ===============================\r
+    # Section and indentation methods\r
+    # ===============================\r
+    def _indent(self):\r
+        self._current_indent += self._indent_increment\r
+        self._level += 1\r
+\r
+    def _dedent(self):\r
+        self._current_indent -= self._indent_increment\r
+        assert self._current_indent >= 0, 'Indent decreased below 0.'\r
+        self._level -= 1\r
+\r
+    class _Section(object):\r
+\r
+        def __init__(self, formatter, parent, heading=None):\r
+            self.formatter = formatter\r
+            self.parent = parent\r
+            self.heading = heading\r
+            self.items = []\r
+\r
+        def format_help(self):\r
+            # format the indented section\r
+            if self.parent is not None:\r
+                self.formatter._indent()\r
+            join = self.formatter._join_parts\r
+            for func, args in self.items:\r
+                func(*args)\r
+            item_help = join([func(*args) for func, args in self.items])\r
+            if self.parent is not None:\r
+                self.formatter._dedent()\r
+\r
+            # return nothing if the section was empty\r
+            if not item_help:\r
+                return ''\r
+\r
+            # add the heading if the section was non-empty\r
+            if self.heading is not SUPPRESS and self.heading is not None:\r
+                current_indent = self.formatter._current_indent\r
+                heading = '%*s%s:\n' % (current_indent, '', self.heading)\r
+            else:\r
+                heading = ''\r
+\r
+            # join the section-initial newline, the heading and the help\r
+            return join(['\n', heading, item_help, '\n'])\r
+\r
+    def _add_item(self, func, args):\r
+        self._current_section.items.append((func, args))\r
+\r
+    # ========================\r
+    # Message building methods\r
+    # ========================\r
+    def start_section(self, heading):\r
+        self._indent()\r
+        section = self._Section(self, self._current_section, heading)\r
+        self._add_item(section.format_help, [])\r
+        self._current_section = section\r
+\r
+    def end_section(self):\r
+        self._current_section = self._current_section.parent\r
+        self._dedent()\r
+\r
+    def add_text(self, text):\r
+        if text is not SUPPRESS and text is not None:\r
+            self._add_item(self._format_text, [text])\r
+\r
+    def add_usage(self, usage, actions, groups, prefix=None):\r
+        if usage is not SUPPRESS:\r
+            args = usage, actions, groups, prefix\r
+            self._add_item(self._format_usage, args)\r
+\r
+    def add_argument(self, action):\r
+        if action.help is not SUPPRESS:\r
+\r
+            # find all invocations\r
+            get_invocation = self._format_action_invocation\r
+            invocations = [get_invocation(action)]\r
+            for subaction in self._iter_indented_subactions(action):\r
+                invocations.append(get_invocation(subaction))\r
+\r
+            # update the maximum item length\r
+            invocation_length = max([len(s) for s in invocations])\r
+            action_length = invocation_length + self._current_indent\r
+            self._action_max_length = max(self._action_max_length,\r
+                                          action_length)\r
+\r
+            # add the item to the list\r
+            self._add_item(self._format_action, [action])\r
+\r
+    def add_arguments(self, actions):\r
+        for action in actions:\r
+            self.add_argument(action)\r
+\r
+    # =======================\r
+    # Help-formatting methods\r
+    # =======================\r
+    def format_help(self):\r
+        help = self._root_section.format_help()\r
+        if help:\r
+            help = self._long_break_matcher.sub('\n\n', help)\r
+            help = help.strip('\n') + '\n'\r
+        return help\r
+\r
+    def _join_parts(self, part_strings):\r
+        return ''.join([part\r
+                        for part in part_strings\r
+                        if part and part is not SUPPRESS])\r
+\r
+    def _format_usage(self, usage, actions, groups, prefix):\r
+        if prefix is None:\r
+            prefix = _('usage: ')\r
+\r
+        # if usage is specified, use that\r
+        if usage is not None:\r
+            usage = usage % dict(prog=self._prog)\r
+\r
+        # if no optionals or positionals are available, usage is just prog\r
+        elif usage is None and not actions:\r
+            usage = '%(prog)s' % dict(prog=self._prog)\r
+\r
+        # if optionals and positionals are available, calculate usage\r
+        elif usage is None:\r
+            prog = '%(prog)s' % dict(prog=self._prog)\r
+\r
+            # split optionals from positionals\r
+            optionals = []\r
+            positionals = []\r
+            for action in actions:\r
+                if action.option_strings:\r
+                    optionals.append(action)\r
+                else:\r
+                    positionals.append(action)\r
+\r
+            # build full usage string\r
+            format = self._format_actions_usage\r
+            action_usage = format(optionals + positionals, groups)\r
+            usage = ' '.join([s for s in [prog, action_usage] if s])\r
+\r
+            # wrap the usage parts if it's too long\r
+            text_width = self._width - self._current_indent\r
+            if len(prefix) + len(usage) > text_width:\r
+\r
+                # break usage into wrappable parts\r
+                part_regexp = r'\(.*?\)+|\[.*?\]+|\S+'\r
+                opt_usage = format(optionals, groups)\r
+                pos_usage = format(positionals, groups)\r
+                opt_parts = _re.findall(part_regexp, opt_usage)\r
+                pos_parts = _re.findall(part_regexp, pos_usage)\r
+                assert ' '.join(opt_parts) == opt_usage\r
+                assert ' '.join(pos_parts) == pos_usage\r
+\r
+                # helper for wrapping lines\r
+                def get_lines(parts, indent, prefix=None):\r
+                    lines = []\r
+                    line = []\r
+                    if prefix is not None:\r
+                        line_len = len(prefix) - 1\r
+                    else:\r
+                        line_len = len(indent) - 1\r
+                    for part in parts:\r
+                        if line_len + 1 + len(part) > text_width:\r
+                            lines.append(indent + ' '.join(line))\r
+                            line = []\r
+                            line_len = len(indent) - 1\r
+                        line.append(part)\r
+                        line_len += len(part) + 1\r
+                    if line:\r
+                        lines.append(indent + ' '.join(line))\r
+                    if prefix is not None:\r
+                        lines[0] = lines[0][len(indent):]\r
+                    return lines\r
+\r
+                # if prog is short, follow it with optionals or positionals\r
+                if len(prefix) + len(prog) <= 0.75 * text_width:\r
+                    indent = ' ' * (len(prefix) + len(prog) + 1)\r
+                    if opt_parts:\r
+                        lines = get_lines([prog] + opt_parts, indent, prefix)\r
+                        lines.extend(get_lines(pos_parts, indent))\r
+                    elif pos_parts:\r
+                        lines = get_lines([prog] + pos_parts, indent, prefix)\r
+                    else:\r
+                        lines = [prog]\r
+\r
+                # if prog is long, put it on its own line\r
+                else:\r
+                    indent = ' ' * len(prefix)\r
+                    parts = opt_parts + pos_parts\r
+                    lines = get_lines(parts, indent)\r
+                    if len(lines) > 1:\r
+                        lines = []\r
+                        lines.extend(get_lines(opt_parts, indent))\r
+                        lines.extend(get_lines(pos_parts, indent))\r
+                    lines = [prog] + lines\r
+\r
+                # join lines into usage\r
+                usage = '\n'.join(lines)\r
+\r
+        # prefix with 'usage:'\r
+        return '%s%s\n\n' % (prefix, usage)\r
+\r
+    def _format_actions_usage(self, actions, groups):\r
+        # find group indices and identify actions in groups\r
+        group_actions = _set()\r
+        inserts = {}\r
+        for group in groups:\r
+            try:\r
+                start = actions.index(group._group_actions[0])\r
+            except ValueError:\r
+                continue\r
+            else:\r
+                end = start + len(group._group_actions)\r
+                if actions[start:end] == group._group_actions:\r
+                    for action in group._group_actions:\r
+                        group_actions.add(action)\r
+                    if not group.required:\r
+                        inserts[start] = '['\r
+                        inserts[end] = ']'\r
+                    else:\r
+                        inserts[start] = '('\r
+                        inserts[end] = ')'\r
+                    for i in range(start + 1, end):\r
+                        inserts[i] = '|'\r
+\r
+        # collect all actions format strings\r
+        parts = []\r
+        for i, action in enumerate(actions):\r
+\r
+            # suppressed arguments are marked with None\r
+            # remove | separators for suppressed arguments\r
+            if action.help is SUPPRESS:\r
+                parts.append(None)\r
+                if inserts.get(i) == '|':\r
+                    inserts.pop(i)\r
+                elif inserts.get(i + 1) == '|':\r
+                    inserts.pop(i + 1)\r
+\r
+            # produce all arg strings\r
+            elif not action.option_strings:\r
+                part = self._format_args(action, action.dest)\r
+\r
+                # if it's in a group, strip the outer []\r
+                if action in group_actions:\r
+                    if part[0] == '[' and part[-1] == ']':\r
+                        part = part[1:-1]\r
+\r
+                # add the action string to the list\r
+                parts.append(part)\r
+\r
+            # produce the first way to invoke the option in brackets\r
+            else:\r
+                option_string = action.option_strings[0]\r
+\r
+                # if the Optional doesn't take a value, format is:\r
+                #    -s or --long\r
+                if action.nargs == 0:\r
+                    part = '%s' % option_string\r
+\r
+                # if the Optional takes a value, format is:\r
+                #    -s ARGS or --long ARGS\r
+                else:\r
+                    default = action.dest.upper()\r
+                    args_string = self._format_args(action, default)\r
+                    part = '%s %s' % (option_string, args_string)\r
+\r
+                # make it look optional if it's not required or in a group\r
+                if not action.required and action not in group_actions:\r
+                    part = '[%s]' % part\r
+\r
+                # add the action string to the list\r
+                parts.append(part)\r
+\r
+        # insert things at the necessary indices\r
+        for i in _sorted(inserts, reverse=True):\r
+            parts[i:i] = [inserts[i]]\r
+\r
+        # join all the action items with spaces\r
+        text = ' '.join([item for item in parts if item is not None])\r
+\r
+        # clean up separators for mutually exclusive groups\r
+        open = r'[\[(]'\r
+        close = r'[\])]'\r
+        text = _re.sub(r'(%s) ' % open, r'\1', text)\r
+        text = _re.sub(r' (%s)' % close, r'\1', text)\r
+        text = _re.sub(r'%s *%s' % (open, close), r'', text)\r
+        text = _re.sub(r'\(([^|]*)\)', r'\1', text)\r
+        text = text.strip()\r
+\r
+        # return the text\r
+        return text\r
+\r
+    def _format_text(self, text):\r
+        text_width = self._width - self._current_indent\r
+        indent = ' ' * self._current_indent\r
+        return self._fill_text(text, text_width, indent) + '\n\n'\r
+\r
+    def _format_action(self, action):\r
+        # determine the required width and the entry label\r
+        help_position = min(self._action_max_length + 2,\r
+                            self._max_help_position)\r
+        help_width = self._width - help_position\r
+        action_width = help_position - self._current_indent - 2\r
+        action_header = self._format_action_invocation(action)\r
+\r
+        # ho nelp; start on same line and add a final newline\r
+        if not action.help:\r
+            tup = self._current_indent, '', action_header\r
+            action_header = '%*s%s\n' % tup\r
+\r
+        # short action name; start on the same line and pad two spaces\r
+        elif len(action_header) <= action_width:\r
+            tup = self._current_indent, '', action_width, action_header\r
+            action_header = '%*s%-*s  ' % tup\r
+            indent_first = 0\r
+\r
+        # long action name; start on the next line\r
+        else:\r
+            tup = self._current_indent, '', action_header\r
+            action_header = '%*s%s\n' % tup\r
+            indent_first = help_position\r
+\r
+        # collect the pieces of the action help\r
+        parts = [action_header]\r
+\r
+        # if there was help for the action, add lines of help text\r
+        if action.help:\r
+            help_text = self._expand_help(action)\r
+            help_lines = self._split_lines(help_text, help_width)\r
+            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))\r
+            for line in help_lines[1:]:\r
+                parts.append('%*s%s\n' % (help_position, '', line))\r
+\r
+        # or add a newline if the description doesn't end with one\r
+        elif not action_header.endswith('\n'):\r
+            parts.append('\n')\r
+\r
+        # if there are any sub-actions, add their help as well\r
+        for subaction in self._iter_indented_subactions(action):\r
+            parts.append(self._format_action(subaction))\r
+\r
+        # return a single string\r
+        return self._join_parts(parts)\r
+\r
+    def _format_action_invocation(self, action):\r
+        if not action.option_strings:\r
+            metavar, = self._metavar_formatter(action, action.dest)(1)\r
+            return metavar\r
+\r
+        else:\r
+            parts = []\r
+\r
+            # if the Optional doesn't take a value, format is:\r
+            #    -s, --long\r
+            if action.nargs == 0:\r
+                parts.extend(action.option_strings)\r
+\r
+            # if the Optional takes a value, format is:\r
+            #    -s ARGS, --long ARGS\r
+            else:\r
+                default = action.dest.upper()\r
+                args_string = self._format_args(action, default)\r
+                for option_string in action.option_strings:\r
+                    parts.append('%s %s' % (option_string, args_string))\r
+\r
+            return ', '.join(parts)\r
+\r
+    def _metavar_formatter(self, action, default_metavar):\r
+        if action.metavar is not None:\r
+            result = action.metavar\r
+        elif action.choices is not None:\r
+            choice_strs = [str(choice) for choice in action.choices]\r
+            #result = '{%s}' % ','.join(choice_strs)\r
+            result = ""\r
+        else:\r
+            result = default_metavar\r
+\r
+        def format(tuple_size):\r
+            if isinstance(result, tuple):\r
+                return result\r
+            else:\r
+                return (result, ) * tuple_size\r
+        return format\r
+\r
+    def _format_args(self, action, default_metavar):\r
+        get_metavar = self._metavar_formatter(action, default_metavar)\r
+        if action.nargs is None:\r
+            result = '%s' % get_metavar(1)\r
+        elif action.nargs == OPTIONAL:\r
+            result = '[%s]' % get_metavar(1)\r
+        elif action.nargs == ZERO_OR_MORE:\r
+            result = '[%s [%s ...]]' % get_metavar(2)\r
+        elif action.nargs == ONE_OR_MORE:\r
+            result = '%s [%s ...]' % get_metavar(2)\r
+        elif action.nargs is PARSER:\r
+            result = '%s ...' % get_metavar(1)\r
+        else:\r
+            formats = ['%s' for _ in range(action.nargs)]\r
+            result = ' '.join(formats) % get_metavar(action.nargs)\r
+        return result\r
+\r
+    def _expand_help(self, action):\r
+        params = dict(vars(action), prog=self._prog)\r
+        for name in list(params):\r
+            if params[name] is SUPPRESS:\r
+                del params[name]\r
+        if params.get('choices') is not None:\r
+            choices_str = ', '.join([str(c) for c in params['choices']])\r
+            params['choices'] = choices_str\r
+        return self._get_help_string(action) % params\r
+\r
+    def _iter_indented_subactions(self, action):\r
+        try:\r
+            get_subactions = action._get_subactions\r
+        except AttributeError:\r
+            pass\r
+        else:\r
+            self._indent()\r
+            for subaction in get_subactions():\r
+                yield subaction\r
+            self._dedent()\r
+\r
+    def _split_lines(self, text, width):\r
+        text = self._whitespace_matcher.sub(' ', text).strip()\r
+        return _textwrap.wrap(text, width)\r
+\r
+    def _fill_text(self, text, width, indent):\r
+        text = self._whitespace_matcher.sub(' ', text).strip()\r
+        return _textwrap.fill(text, width, initial_indent=indent,\r
+                                           subsequent_indent=indent)\r
+\r
+    def _get_help_string(self, action):\r
+        return action.help\r
+\r
+\r
+class RawDescriptionHelpFormatter(HelpFormatter):\r
+    """Help message formatter which retains any formatting in descriptions.\r
+\r
+    Only the name of this class is considered a public API. All the methods\r
+    provided by the class are considered an implementation detail.\r
+    """\r
+\r
+    def _fill_text(self, text, width, indent):\r
+        return ''.join([indent + line for line in text.splitlines(True)])\r
+\r
+\r
+class RawTextHelpFormatter(RawDescriptionHelpFormatter):\r
+    """Help message formatter which retains formatting of all help text.\r
+\r
+    Only the name of this class is considered a public API. All the methods\r
+    provided by the class are considered an implementation detail.\r
+    """\r
+\r
+    def _split_lines(self, text, width):\r
+        return text.splitlines()\r
+\r
+\r
+class ArgumentDefaultsHelpFormatter(HelpFormatter):\r
+    """Help message formatter which adds default values to argument help.\r
+\r
+    Only the name of this class is considered a public API. All the methods\r
+    provided by the class are considered an implementation detail.\r
+    """\r
+\r
+    def _get_help_string(self, action):\r
+        help = action.help\r
+        if '%(default)' not in action.help:\r
+            if action.default is not SUPPRESS:\r
+                defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]\r
+                if action.option_strings or action.nargs in defaulting_nargs:\r
+                    help += ' (default: %(default)s)'\r
+        return help\r
+\r
+\r
+# =====================\r
+# Options and Arguments\r
+# =====================\r
+\r
+def _get_action_name(argument):\r
+    if argument is None:\r
+        return None\r
+    elif argument.option_strings:\r
+        return  '/'.join(argument.option_strings)\r
+    elif argument.metavar not in (None, SUPPRESS):\r
+        return argument.metavar\r
+    elif argument.dest not in (None, SUPPRESS):\r
+        return argument.dest\r
+    else:\r
+        return None\r
+\r
+\r
+class ArgumentError(Exception):\r
+    """An error from creating or using an argument (optional or positional).\r
+\r
+    The string value of this exception is the message, augmented with\r
+    information about the argument that caused it.\r
+    """\r
+\r
+    def __init__(self, argument, message):\r
+        self.argument_name = _get_action_name(argument)\r
+        self.message = message\r
+\r
+    def __str__(self):\r
+        if self.argument_name is None:\r
+            format = '%(message)s'\r
+        else:\r
+            format = 'argument %(argument_name)s: %(message)s'\r
+        return format % dict(message=self.message,\r
+                             argument_name=self.argument_name)\r
+\r
+# ==============\r
+# Action classes\r
+# ==============\r
+\r
+class Action(_AttributeHolder):\r
+    """Information about how to convert command line strings to Python objects.\r
+\r
+    Action objects are used by an ArgumentParser to represent the information\r
+    needed to parse a single argument from one or more strings from the\r
+    command line. The keyword arguments to the Action constructor are also\r
+    all attributes of Action instances.\r
+\r
+    Keyword Arguments:\r
+\r
+        - option_strings -- A list of command-line option strings which\r
+            should be associated with this action.\r
+\r
+        - dest -- The name of the attribute to hold the created object(s)\r
+\r
+        - nargs -- The number of command-line arguments that should be\r
+            consumed. By default, one argument will be consumed and a single\r
+            value will be produced.  Other values include:\r
+                - N (an integer) consumes N arguments (and produces a list)\r
+                - '?' consumes zero or one arguments\r
+                - '*' consumes zero or more arguments (and produces a list)\r
+                - '+' consumes one or more arguments (and produces a list)\r
+            Note that the difference between the default and nargs=1 is that\r
+            with the default, a single value will be produced, while with\r
+            nargs=1, a list containing a single value will be produced.\r
+\r
+        - const -- The value to be produced if the option is specified and the\r
+            option uses an action that takes no values.\r
+\r
+        - default -- The value to be produced if the option is not specified.\r
+\r
+        - type -- The type which the command-line arguments should be converted\r
+            to, should be one of 'string', 'int', 'float', 'complex' or a\r
+            callable object that accepts a single string argument. If None,\r
+            'string' is assumed.\r
+\r
+        - choices -- A container of values that should be allowed. If not None,\r
+            after a command-line argument has been converted to the appropriate\r
+            type, an exception will be raised if it is not a member of this\r
+            collection.\r
+\r
+        - required -- True if the action must always be specified at the\r
+            command line. This is only meaningful for optional command-line\r
+            arguments.\r
+\r
+        - help -- The help string describing the argument.\r
+\r
+        - metavar -- The name to be used for the option's argument with the\r
+            help string. If None, the 'dest' value will be used as the name.\r
+    """\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 nargs=None,\r
+                 const=None,\r
+                 default=None,\r
+                 type=None,\r
+                 choices=None,\r
+                 required=False,\r
+                 help=None,\r
+                 metavar=None):\r
+        self.option_strings = option_strings\r
+        self.dest = dest\r
+        self.nargs = nargs\r
+        self.const = const\r
+        self.default = default\r
+        self.type = type\r
+        self.choices = choices\r
+        self.required = required\r
+        self.help = help\r
+        self.metavar = metavar\r
+\r
+    def _get_kwargs(self):\r
+        names = [\r
+            'option_strings',\r
+            'dest',\r
+            'nargs',\r
+            'const',\r
+            'default',\r
+            'type',\r
+            'choices',\r
+            'help',\r
+            'metavar',\r
+        ]\r
+        return [(name, getattr(self, name)) for name in names]\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        raise NotImplementedError(_('.__call__() not defined'))\r
+\r
+\r
+class _StoreAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 nargs=None,\r
+                 const=None,\r
+                 default=None,\r
+                 type=None,\r
+                 choices=None,\r
+                 required=False,\r
+                 help=None,\r
+                 metavar=None):\r
+        if nargs == 0:\r
+            raise ValueError('nargs for store actions must be > 0; if you '\r
+                             'have nothing to store, actions such as store '\r
+                             'true or store const may be more appropriate')\r
+        if const is not None and nargs != OPTIONAL:\r
+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)\r
+        super(_StoreAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=nargs,\r
+            const=const,\r
+            default=default,\r
+            type=type,\r
+            choices=choices,\r
+            required=required,\r
+            help=help,\r
+            metavar=metavar)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        setattr(namespace, self.dest, values)\r
+\r
+\r
+class _StoreConstAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 const,\r
+                 default=None,\r
+                 required=False,\r
+                 help=None,\r
+                 metavar=None):\r
+        super(_StoreConstAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=0,\r
+            const=const,\r
+            default=default,\r
+            required=required,\r
+            help=help)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        setattr(namespace, self.dest, self.const)\r
+\r
+\r
+class _StoreTrueAction(_StoreConstAction):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 default=False,\r
+                 required=False,\r
+                 help=None):\r
+        super(_StoreTrueAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            const=True,\r
+            default=default,\r
+            required=required,\r
+            help=help)\r
+\r
+\r
+class _StoreFalseAction(_StoreConstAction):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 default=True,\r
+                 required=False,\r
+                 help=None):\r
+        super(_StoreFalseAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            const=False,\r
+            default=default,\r
+            required=required,\r
+            help=help)\r
+\r
+\r
+class _AppendAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 nargs=None,\r
+                 const=None,\r
+                 default=None,\r
+                 type=None,\r
+                 choices=None,\r
+                 required=False,\r
+                 help=None,\r
+                 metavar=None):\r
+        if nargs == 0:\r
+            raise ValueError('nargs for append actions must be > 0; if arg '\r
+                             'strings are not supplying the value to append, '\r
+                             'the append const action may be more appropriate')\r
+        if const is not None and nargs != OPTIONAL:\r
+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)\r
+        super(_AppendAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=nargs,\r
+            const=const,\r
+            default=default,\r
+            type=type,\r
+            choices=choices,\r
+            required=required,\r
+            help=help,\r
+            metavar=metavar)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        items = _copy.copy(_ensure_value(namespace, self.dest, []))\r
+        items.append(values)\r
+        setattr(namespace, self.dest, items)\r
+\r
+\r
+class _AppendConstAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 const,\r
+                 default=None,\r
+                 required=False,\r
+                 help=None,\r
+                 metavar=None):\r
+        super(_AppendConstAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=0,\r
+            const=const,\r
+            default=default,\r
+            required=required,\r
+            help=help,\r
+            metavar=metavar)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        items = _copy.copy(_ensure_value(namespace, self.dest, []))\r
+        items.append(self.const)\r
+        setattr(namespace, self.dest, items)\r
+\r
+\r
+class _CountAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest,\r
+                 default=None,\r
+                 required=False,\r
+                 help=None):\r
+        super(_CountAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=0,\r
+            default=default,\r
+            required=required,\r
+            help=help)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        new_count = _ensure_value(namespace, self.dest, 0) + 1\r
+        setattr(namespace, self.dest, new_count)\r
+\r
+\r
+class _HelpAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest=SUPPRESS,\r
+                 default=SUPPRESS,\r
+                 help=None):\r
+        super(_HelpAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            default=default,\r
+            nargs=0,\r
+            help=help)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        parser.print_help()\r
+        parser.exit()\r
+\r
+\r
+class _VersionAction(Action):\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 dest=SUPPRESS,\r
+                 default=SUPPRESS,\r
+                 help=None):\r
+        super(_VersionAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            default=default,\r
+            nargs=0,\r
+            help=help)\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        parser.print_version()\r
+        parser.exit()\r
+\r
+\r
+class _SubParsersAction(Action):\r
+\r
+    class _ChoicesPseudoAction(Action):\r
+\r
+        def __init__(self, name, help):\r
+            sup = super(_SubParsersAction._ChoicesPseudoAction, self)\r
+            sup.__init__(option_strings=[], dest=name, help=help)\r
+\r
+    def __init__(self,\r
+                 option_strings,\r
+                 prog,\r
+                 parser_class,\r
+                 dest=SUPPRESS,\r
+                 help=None,\r
+                 metavar=None):\r
+\r
+        self._prog_prefix = prog\r
+        self._parser_class = parser_class\r
+        self._name_parser_map = {}\r
+        self._choices_actions = []\r
+\r
+        super(_SubParsersAction, self).__init__(\r
+            option_strings=option_strings,\r
+            dest=dest,\r
+            nargs=PARSER,\r
+            choices=self._name_parser_map,\r
+            help=help,\r
+            metavar=metavar)\r
+\r
+    def add_parser(self, name, **kwargs):\r
+        # set prog from the existing prefix\r
+        if kwargs.get('prog') is None:\r
+            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)\r
+\r
+        # create a pseudo-action to hold the choice help\r
+        if 'help' in kwargs:\r
+            help = kwargs.pop('help')\r
+            choice_action = self._ChoicesPseudoAction(name, help)\r
+            self._choices_actions.append(choice_action)\r
+\r
+        # create the parser and add it to the map\r
+        parser = self._parser_class(**kwargs)\r
+        self._name_parser_map[name] = parser\r
+        return parser\r
+\r
+    def _get_subactions(self):\r
+        return self._choices_actions\r
+\r
+    def __call__(self, parser, namespace, values, option_string=None):\r
+        parser_name = values[0]\r
+        arg_strings = values[1:]\r
+\r
+        # set the parser name if requested\r
+        if self.dest is not SUPPRESS:\r
+            setattr(namespace, self.dest, parser_name)\r
+\r
+        # select the parser\r
+        try:\r
+            parser = self._name_parser_map[parser_name]\r
+        except KeyError:\r
+            tup = parser_name, ', '.join(self._name_parser_map)\r
+            msg = _('unknown parser %r (choices: %s)' % tup)\r
+            raise ArgumentError(self, msg)\r
+\r
+        # parse all the remaining options into the namespace\r
+        parser.parse_args(arg_strings, namespace)\r
+\r
+\r
+# ==============\r
+# Type classes\r
+# ==============\r
+\r
+class FileType(object):\r
+    """Factory for creating file object types\r
+\r
+    Instances of FileType are typically passed as type= arguments to the\r
+    ArgumentParser add_argument() method.\r
+\r
+    Keyword Arguments:\r
+        - mode -- A string indicating how the file is to be opened. Accepts the\r
+            same values as the builtin open() function.\r
+        - bufsize -- The file's desired buffer size. Accepts the same values as\r
+            the builtin open() function.\r
+    """\r
+\r
+    def __init__(self, mode='r', bufsize=None):\r
+        self._mode = mode\r
+        self._bufsize = bufsize\r
+\r
+    def __call__(self, string):\r
+        # the special argument "-" means sys.std{in,out}\r
+        if string == '-':\r
+            if 'r' in self._mode:\r
+                return _sys.stdin\r
+            elif 'w' in self._mode:\r
+                return _sys.stdout\r
+            else:\r
+                msg = _('argument "-" with mode %r' % self._mode)\r
+                raise ValueError(msg)\r
+\r
+        # all other arguments are used as file names\r
+        if self._bufsize:\r
+            return open(string, self._mode, self._bufsize)\r
+        else:\r
+            return open(string, self._mode)\r
+\r
+    def __repr__(self):\r
+        args = [self._mode, self._bufsize]\r
+        args_str = ', '.join([repr(arg) for arg in args if arg is not None])\r
+        return '%s(%s)' % (type(self).__name__, args_str)\r
+\r
+# ===========================\r
+# Optional and Positional Parsing\r
+# ===========================\r
+\r
+class Namespace(_AttributeHolder):\r
+    """Simple object for storing attributes.\r
+\r
+    Implements equality by attribute names and values, and provides a simple\r
+    string representation.\r
+    """\r
+\r
+    def __init__(self, **kwargs):\r
+        for name in kwargs:\r
+            setattr(self, name, kwargs[name])\r
+\r
+    def __eq__(self, other):\r
+        return vars(self) == vars(other)\r
+\r
+    def __ne__(self, other):\r
+        return not (self == other)\r
+\r
+\r
+class _ActionsContainer(object):\r
+\r
+    def __init__(self,\r
+                 description,\r
+                 prefix_chars,\r
+                 argument_default,\r
+                 conflict_handler):\r
+        super(_ActionsContainer, self).__init__()\r
+\r
+        self.description = description\r
+        self.argument_default = argument_default\r
+        self.prefix_chars = prefix_chars\r
+        self.conflict_handler = conflict_handler\r
+\r
+        # set up registries\r
+        self._registries = {}\r
+\r
+        # register actions\r
+        self.register('action', None, _StoreAction)\r
+        self.register('action', 'store', _StoreAction)\r
+        self.register('action', 'store_const', _StoreConstAction)\r
+        self.register('action', 'store_true', _StoreTrueAction)\r
+        self.register('action', 'store_false', _StoreFalseAction)\r
+        self.register('action', 'append', _AppendAction)\r
+        self.register('action', 'append_const', _AppendConstAction)\r
+        self.register('action', 'count', _CountAction)\r
+        self.register('action', 'help', _HelpAction)\r
+        self.register('action', 'version', _VersionAction)\r
+        self.register('action', 'parsers', _SubParsersAction)\r
+\r
+        # raise an exception if the conflict handler is invalid\r
+        self._get_handler()\r
+\r
+        # action storage\r
+        self._actions = []\r
+        self._option_string_actions = {}\r
+\r
+        # groups\r
+        self._action_groups = []\r
+        self._mutually_exclusive_groups = []\r
+\r
+        # defaults storage\r
+        self._defaults = {}\r
+\r
+        # determines whether an "option" looks like a negative number\r
+        self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')\r
+\r
+        # whether or not there are any optionals that look like negative\r
+        # numbers -- uses a list so it can be shared and edited\r
+        self._has_negative_number_optionals = []\r
+\r
+    # ====================\r
+    # Registration methods\r
+    # ====================\r
+    def register(self, registry_name, value, object):\r
+        registry = self._registries.setdefault(registry_name, {})\r
+        registry[value] = object\r
+\r
+    def _registry_get(self, registry_name, value, default=None):\r
+        return self._registries[registry_name].get(value, default)\r
+\r
+    # ==================================\r
+    # Namespace default settings methods\r
+    # ==================================\r
+    def set_defaults(self, **kwargs):\r
+        self._defaults.update(kwargs)\r
+\r
+        # if these defaults match any existing arguments, replace\r
+        # the previous default on the object with the new one\r
+        for action in self._actions:\r
+            if action.dest in kwargs:\r
+                action.default = kwargs[action.dest]\r
+\r
+    # =======================\r
+    # Adding argument actions\r
+    # =======================\r
+    def add_argument(self, *args, **kwargs):\r
+        """\r
+        add_argument(dest, ..., name=value, ...)\r
+        add_argument(option_string, option_string, ..., name=value, ...)\r
+        """\r
+\r
+        # if no positional args are supplied or only one is supplied and\r
+        # it doesn't look like an option string, parse a positional\r
+        # argument\r
+        chars = self.prefix_chars\r
+        if not args or len(args) == 1 and args[0][0] not in chars:\r
+            kwargs = self._get_positional_kwargs(*args, **kwargs)\r
+\r
+        # otherwise, we're adding an optional argument\r
+        else:\r
+            kwargs = self._get_optional_kwargs(*args, **kwargs)\r
+\r
+        # if no default was supplied, use the parser-level default\r
+        if 'default' not in kwargs:\r
+            dest = kwargs['dest']\r
+            if dest in self._defaults:\r
+                kwargs['default'] = self._defaults[dest]\r
+            elif self.argument_default is not None:\r
+                kwargs['default'] = self.argument_default\r
+\r
+        # create the action object, and add it to the parser\r
+        action_class = self._pop_action_class(kwargs)\r
+        action = action_class(**kwargs)\r
+        return self._add_action(action)\r
+\r
+    def add_argument_group(self, *args, **kwargs):\r
+        group = _ArgumentGroup(self, *args, **kwargs)\r
+        self._action_groups.append(group)\r
+        return group\r
+\r
+    def add_mutually_exclusive_group(self, **kwargs):\r
+        group = _MutuallyExclusiveGroup(self, **kwargs)\r
+        self._mutually_exclusive_groups.append(group)\r
+        return group\r
+\r
+    def _add_action(self, action):\r
+        # resolve any conflicts\r
+        self._check_conflict(action)\r
+\r
+        # add to actions list\r
+        self._actions.append(action)\r
+        action.container = self\r
+\r
+        # index the action by any option strings it has\r
+        for option_string in action.option_strings:\r
+            self._option_string_actions[option_string] = action\r
+\r
+        # set the flag if any option strings look like negative numbers\r
+        for option_string in action.option_strings:\r
+            if self._negative_number_matcher.match(option_string):\r
+                if not self._has_negative_number_optionals:\r
+                    self._has_negative_number_optionals.append(True)\r
+\r
+        # return the created action\r
+        return action\r
+\r
+    def _remove_action(self, action):\r
+        self._actions.remove(action)\r
+\r
+    def _add_container_actions(self, container):\r
+        # collect groups by titles\r
+        title_group_map = {}\r
+        for group in self._action_groups:\r
+            if group.title in title_group_map:\r
+                msg = _('cannot merge actions - two groups are named %r')\r
+                raise ValueError(msg % (group.title))\r
+            title_group_map[group.title] = group\r
+\r
+        # map each action to its group\r
+        group_map = {}\r
+        for group in container._action_groups:\r
+\r
+            # if a group with the title exists, use that, otherwise\r
+            # create a new group matching the container's group\r
+            if group.title not in title_group_map:\r
+                title_group_map[group.title] = self.add_argument_group(\r
+                    title=group.title,\r
+                    description=group.description,\r
+                    conflict_handler=group.conflict_handler)\r
+\r
+            # map the actions to their new group\r
+            for action in group._group_actions:\r
+                group_map[action] = title_group_map[group.title]\r
+\r
+        # add container's mutually exclusive groups\r
+        # NOTE: if add_mutually_exclusive_group ever gains title= and\r
+        # description= then this code will need to be expanded as above\r
+        for group in container._mutually_exclusive_groups:\r
+            mutex_group = self.add_mutually_exclusive_group(\r
+                required=group.required)\r
+\r
+            # map the actions to their new mutex group\r
+            for action in group._group_actions:\r
+                group_map[action] = mutex_group\r
+\r
+        # add all actions to this container or their group\r
+        for action in container._actions:\r
+            group_map.get(action, self)._add_action(action)\r
+\r
+    def _get_positional_kwargs(self, dest, **kwargs):\r
+        # make sure required is not specified\r
+        if 'required' in kwargs:\r
+            msg = _("'required' is an invalid argument for positionals")\r
+            raise TypeError(msg)\r
+\r
+        # mark positional arguments as required if at least one is\r
+        # always required\r
+        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:\r
+            kwargs['required'] = True\r
+        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:\r
+            kwargs['required'] = True\r
+\r
+        # return the keyword arguments with no option strings\r
+        return dict(kwargs, dest=dest, option_strings=[])\r
+\r
+    def _get_optional_kwargs(self, *args, **kwargs):\r
+        # determine short and long option strings\r
+        option_strings = []\r
+        long_option_strings = []\r
+        for option_string in args:\r
+            # error on one-or-fewer-character option strings\r
+            if len(option_string) < 2:\r
+                msg = _('invalid option string %r: '\r
+                        'must be at least two characters long')\r
+                raise ValueError(msg % option_string)\r
+\r
+            # error on strings that don't start with an appropriate prefix\r
+            if not option_string[0] in self.prefix_chars:\r
+                msg = _('invalid option string %r: '\r
+                        'must start with a character %r')\r
+                tup = option_string, self.prefix_chars\r
+                raise ValueError(msg % tup)\r
+\r
+            # error on strings that are all prefix characters\r
+            if not (_set(option_string) - _set(self.prefix_chars)):\r
+                msg = _('invalid option string %r: '\r
+                        'must contain characters other than %r')\r
+                tup = option_string, self.prefix_chars\r
+                raise ValueError(msg % tup)\r
+\r
+            # strings starting with two prefix characters are long options\r
+            option_strings.append(option_string)\r
+            if option_string[0] in self.prefix_chars:\r
+                if option_string[1] in self.prefix_chars:\r
+                    long_option_strings.append(option_string)\r
+\r
+        # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'\r
+        dest = kwargs.pop('dest', None)\r
+        if dest is None:\r
+            if long_option_strings:\r
+                dest_option_string = long_option_strings[0]\r
+            else:\r
+                dest_option_string = option_strings[0]\r
+            dest = dest_option_string.lstrip(self.prefix_chars)\r
+            dest = dest.replace('-', '_')\r
+\r
+        # return the updated keyword arguments\r
+        return dict(kwargs, dest=dest, option_strings=option_strings)\r
+\r
+    def _pop_action_class(self, kwargs, default=None):\r
+        action = kwargs.pop('action', default)\r
+        return self._registry_get('action', action, action)\r
+\r
+    def _get_handler(self):\r
+        # determine function from conflict handler string\r
+        handler_func_name = '_handle_conflict_%s' % self.conflict_handler\r
+        try:\r
+            return getattr(self, handler_func_name)\r
+        except AttributeError:\r
+            msg = _('invalid conflict_resolution value: %r')\r
+            raise ValueError(msg % self.conflict_handler)\r
+\r
+    def _check_conflict(self, action):\r
+\r
+        # find all options that conflict with this option\r
+        confl_optionals = []\r
+        for option_string in action.option_strings:\r
+            if option_string in self._option_string_actions:\r
+                confl_optional = self._option_string_actions[option_string]\r
+                confl_optionals.append((option_string, confl_optional))\r
+\r
+        # resolve any conflicts\r
+        if confl_optionals:\r
+            conflict_handler = self._get_handler()\r
+            conflict_handler(action, confl_optionals)\r
+\r
+    def _handle_conflict_error(self, action, conflicting_actions):\r
+        message = _('conflicting option string(s): %s')\r
+        conflict_string = ', '.join([option_string\r
+                                     for option_string, action\r
+                                     in conflicting_actions])\r
+        raise ArgumentError(action, message % conflict_string)\r
+\r
+    def _handle_conflict_resolve(self, action, conflicting_actions):\r
+\r
+        # remove all conflicting options\r
+        for option_string, action in conflicting_actions:\r
+\r
+            # remove the conflicting option\r
+            action.option_strings.remove(option_string)\r
+            self._option_string_actions.pop(option_string, None)\r
+\r
+            # if the option now has no option string, remove it from the\r
+            # container holding it\r
+            if not action.option_strings:\r
+                action.container._remove_action(action)\r
+\r
+\r
+class _ArgumentGroup(_ActionsContainer):\r
+\r
+    def __init__(self, container, title=None, description=None, **kwargs):\r
+        # add any missing keyword arguments by checking the container\r
+        update = kwargs.setdefault\r
+        update('conflict_handler', container.conflict_handler)\r
+        update('prefix_chars', container.prefix_chars)\r
+        update('argument_default', container.argument_default)\r
+        super_init = super(_ArgumentGroup, self).__init__\r
+        super_init(description=description, **kwargs)\r
+\r
+        # group attributes\r
+        self.title = title\r
+        self._group_actions = []\r
+\r
+        # share most attributes with the container\r
+        self._registries = container._registries\r
+        self._actions = container._actions\r
+        self._option_string_actions = container._option_string_actions\r
+        self._defaults = container._defaults\r
+        self._has_negative_number_optionals = \\r
+            container._has_negative_number_optionals\r
+\r
+    def _add_action(self, action):\r
+        action = super(_ArgumentGroup, self)._add_action(action)\r
+        self._group_actions.append(action)\r
+        return action\r
+\r
+    def _remove_action(self, action):\r
+        super(_ArgumentGroup, self)._remove_action(action)\r
+        self._group_actions.remove(action)\r
+\r
+\r
+class _MutuallyExclusiveGroup(_ArgumentGroup):\r
+\r
+    def __init__(self, container, required=False):\r
+        super(_MutuallyExclusiveGroup, self).__init__(container)\r
+        self.required = required\r
+        self._container = container\r
+\r
+    def _add_action(self, action):\r
+        if action.required:\r
+            msg = _('mutually exclusive arguments must be optional')\r
+            raise ValueError(msg)\r
+        action = self._container._add_action(action)\r
+        self._group_actions.append(action)\r
+        return action\r
+\r
+    def _remove_action(self, action):\r
+        self._container._remove_action(action)\r
+        self._group_actions.remove(action)\r
+\r
+\r
+class ArgumentParser(_AttributeHolder, _ActionsContainer):\r
+    """Object for parsing command line strings into Python objects.\r
+\r
+    Keyword Arguments:\r
+        - prog -- The name of the program (default: sys.argv[0])\r
+        - usage -- A usage message (default: auto-generated from arguments)\r
+        - description -- A description of what the program does\r
+        - epilog -- Text following the argument descriptions\r
+        - version -- Add a -v/--version option with the given version string\r
+        - parents -- Parsers whose arguments should be copied into this one\r
+        - formatter_class -- HelpFormatter class for printing help messages\r
+        - prefix_chars -- Characters that prefix optional arguments\r
+        - fromfile_prefix_chars -- Characters that prefix files containing\r
+            additional arguments\r
+        - argument_default -- The default value for all arguments\r
+        - conflict_handler -- String indicating how to handle conflicts\r
+        - add_help -- Add a -h/-help option\r
+    """\r
+\r
+    def __init__(self,\r
+                 prog=None,\r
+                 usage=None,\r
+                 description=None,\r
+                 epilog=None,\r
+                 version=None,\r
+                 parents=[],\r
+                 formatter_class=HelpFormatter,\r
+                 prefix_chars='-',\r
+                 fromfile_prefix_chars=None,\r
+                 argument_default=None,\r
+                 conflict_handler='error',\r
+                 add_help=True):\r
+\r
+        superinit = super(ArgumentParser, self).__init__\r
+        superinit(description=description,\r
+                  prefix_chars=prefix_chars,\r
+                  argument_default=argument_default,\r
+                  conflict_handler=conflict_handler)\r
+\r
+        # default setting for prog\r
+        if prog is None:\r
+            prog = _os.path.basename(_sys.argv[0])\r
+\r
+        self.prog = prog\r
+        self.usage = usage\r
+        self.epilog = epilog\r
+        self.version = version\r
+        self.formatter_class = formatter_class\r
+        self.fromfile_prefix_chars = fromfile_prefix_chars\r
+        self.add_help = add_help\r
+\r
+        add_group = self.add_argument_group\r
+        self._positionals = add_group(_('arguments'))\r
+        self._optionals = add_group(_('options'))\r
+        self._subparsers = None\r
+\r
+        # register types\r
+        def identity(string):\r
+            return string\r
+        self.register('type', None, identity)\r
+\r
+        # add help and version arguments if necessary\r
+        # (using explicit default to override global argument_default)\r
+        if self.add_help:\r
+            self.add_argument(\r
+                '-h', '--help', action='help', default=SUPPRESS,\r
+                help=_('show this help message and exit'))\r
+        if self.version:\r
+            self.add_argument(\r
+                '-v', '--version', action='version', default=SUPPRESS,\r
+                help=_("show program's version number and exit"))\r
+\r
+        # add parent arguments and defaults\r
+        for parent in parents:\r
+            self._add_container_actions(parent)\r
+            try:\r
+                defaults = parent._defaults\r
+            except AttributeError:\r
+                pass\r
+            else:\r
+                self._defaults.update(defaults)\r
+\r
+    # =======================\r
+    # Pretty __repr__ methods\r
+    # =======================\r
+    def _get_kwargs(self):\r
+        names = [\r
+            'prog',\r
+            'usage',\r
+            'description',\r
+            'version',\r
+            'formatter_class',\r
+            'conflict_handler',\r
+            'add_help',\r
+        ]\r
+        return [(name, getattr(self, name)) for name in names]\r
+\r
+    # ==================================\r
+    # Optional/Positional adding methods\r
+    # ==================================\r
+    def add_subparsers(self, **kwargs):\r
+        if self._subparsers is not None:\r
+            self.error(_('cannot have multiple subparser arguments'))\r
+\r
+        # add the parser class to the arguments if it's not present\r
+        kwargs.setdefault('parser_class', type(self))\r
+\r
+        if 'title' in kwargs or 'description' in kwargs:\r
+            title = _(kwargs.pop('title', 'subcommands'))\r
+            description = _(kwargs.pop('description', None))\r
+            self._subparsers = self.add_argument_group(title, description)\r
+        else:\r
+            self._subparsers = self._positionals\r
+\r
+        # prog defaults to the usage message of this parser, skipping\r
+        # optional arguments and with no "usage:" prefix\r
+        if kwargs.get('prog') is None:\r
+            formatter = self._get_formatter()\r
+            positionals = self._get_positional_actions()\r
+            groups = self._mutually_exclusive_groups\r
+            formatter.add_usage(self.usage, positionals, groups, '')\r
+            kwargs['prog'] = formatter.format_help().strip()\r
+\r
+        # create the parsers action and add it to the positionals list\r
+        parsers_class = self._pop_action_class(kwargs, 'parsers')\r
+        action = parsers_class(option_strings=[], **kwargs)\r
+        self._subparsers._add_action(action)\r
+\r
+        # return the created parsers action\r
+        return action\r
+\r
+    def _add_action(self, action):\r
+        if action.option_strings:\r
+            self._optionals._add_action(action)\r
+        else:\r
+            self._positionals._add_action(action)\r
+        return action\r
+\r
+    def _get_optional_actions(self):\r
+        return [action\r
+                for action in self._actions\r
+                if action.option_strings]\r
+\r
+    def _get_positional_actions(self):\r
+        return [action\r
+                for action in self._actions\r
+                if not action.option_strings]\r
+\r
+    # =====================================\r
+    # Command line argument parsing methods\r
+    # =====================================\r
+    def parse_args(self, args=None, namespace=None):\r
+        args, argv = self.parse_known_args(args, namespace)\r
+        if argv:\r
+            msg = _('unrecognized arguments: %s')\r
+            self.error(msg % ' '.join(argv))\r
+        return args\r
+\r
+    def parse_known_args(self, args=None, namespace=None):\r
+        # args default to the system args\r
+        if args is None:\r
+            args = _sys.argv[1:]\r
+\r
+        # default Namespace built from parser defaults\r
+        if namespace is None:\r
+            namespace = Namespace()\r
+\r
+        # add any action defaults that aren't present\r
+        for action in self._actions:\r
+            if action.dest is not SUPPRESS:\r
+                if not hasattr(namespace, action.dest):\r
+                    if action.default is not SUPPRESS:\r
+                        default = action.default\r
+                        if isinstance(action.default, _basestring):\r
+                            default = self._get_value(action, default)\r
+                        setattr(namespace, action.dest, default)\r
+\r
+        # add any parser defaults that aren't present\r
+        for dest in self._defaults:\r
+            if not hasattr(namespace, dest):\r
+                setattr(namespace, dest, self._defaults[dest])\r
+\r
+        # parse the arguments and exit if there are any errors\r
+        try:\r
+            return self._parse_known_args(args, namespace)\r
+        except ArgumentError:\r
+            err = _sys.exc_info()[1]\r
+            self.error(str(err))\r
+\r
+    def _parse_known_args(self, arg_strings, namespace):\r
+        # replace arg strings that are file references\r
+        if self.fromfile_prefix_chars is not None:\r
+            arg_strings = self._read_args_from_files(arg_strings)\r
+\r
+        # map all mutually exclusive arguments to the other arguments\r
+        # they can't occur with\r
+        action_conflicts = {}\r
+        for mutex_group in self._mutually_exclusive_groups:\r
+            group_actions = mutex_group._group_actions\r
+            for i, mutex_action in enumerate(mutex_group._group_actions):\r
+                conflicts = action_conflicts.setdefault(mutex_action, [])\r
+                conflicts.extend(group_actions[:i])\r
+                conflicts.extend(group_actions[i + 1:])\r
+\r
+        # find all option indices, and determine the arg_string_pattern\r
+        # which has an 'O' if there is an option at an index,\r
+        # an 'A' if there is an argument, or a '-' if there is a '--'\r
+        option_string_indices = {}\r
+        arg_string_pattern_parts = []\r
+        arg_strings_iter = iter(arg_strings)\r
+        for i, arg_string in enumerate(arg_strings_iter):\r
+\r
+            # all args after -- are non-options\r
+            if arg_string == '--':\r
+                arg_string_pattern_parts.append('-')\r
+                for arg_string in arg_strings_iter:\r
+                    arg_string_pattern_parts.append('A')\r
+\r
+            # otherwise, add the arg to the arg strings\r
+            # and note the index if it was an option\r
+            else:\r
+                option_tuple = self._parse_optional(arg_string)\r
+                if option_tuple is None:\r
+                    pattern = 'A'\r
+                else:\r
+                    option_string_indices[i] = option_tuple\r
+                    pattern = 'O'\r
+                arg_string_pattern_parts.append(pattern)\r
+\r
+        # join the pieces together to form the pattern\r
+        arg_strings_pattern = ''.join(arg_string_pattern_parts)\r
+\r
+        # converts arg strings to the appropriate and then takes the action\r
+        seen_actions = _set()\r
+        seen_non_default_actions = _set()\r
+\r
+        def take_action(action, argument_strings, option_string=None):\r
+            seen_actions.add(action)\r
+            argument_values = self._get_values(action, argument_strings)\r
+\r
+            # error if this argument is not allowed with other previously\r
+            # seen arguments, assuming that actions that use the default\r
+            # value don't really count as "present"\r
+            if argument_values is not action.default:\r
+                seen_non_default_actions.add(action)\r
+                for conflict_action in action_conflicts.get(action, []):\r
+                    if conflict_action in seen_non_default_actions:\r
+                        msg = _('not allowed with argument %s')\r
+                        action_name = _get_action_name(conflict_action)\r
+                        raise ArgumentError(action, msg % action_name)\r
+\r
+            # take the action if we didn't receive a SUPPRESS value\r
+            # (e.g. from a default)\r
+            if argument_values is not SUPPRESS:\r
+                action(self, namespace, argument_values, option_string)\r
+\r
+        # function to convert arg_strings into an optional action\r
+        def consume_optional(start_index):\r
+\r
+            # get the optional identified at this index\r
+            option_tuple = option_string_indices[start_index]\r
+            action, option_string, explicit_arg = option_tuple\r
+\r
+            # identify additional optionals in the same arg string\r
+            # (e.g. -xyz is the same as -x -y -z if no args are required)\r
+            match_argument = self._match_argument\r
+            action_tuples = []\r
+            while True:\r
+\r
+                # if we found no optional action, skip it\r
+                if action is None:\r
+                    extras.append(arg_strings[start_index])\r
+                    return start_index + 1\r
+\r
+                # if there is an explicit argument, try to match the\r
+                # optional's string arguments to only this\r
+                if explicit_arg is not None:\r
+                    arg_count = match_argument(action, 'A')\r
+\r
+                    # if the action is a single-dash option and takes no\r
+                    # arguments, try to parse more single-dash options out\r
+                    # of the tail of the option string\r
+                    chars = self.prefix_chars\r
+                    if arg_count == 0 and option_string[1] not in chars:\r
+                        action_tuples.append((action, [], option_string))\r
+                        for char in self.prefix_chars:\r
+                            option_string = char + explicit_arg[0]\r
+                            explicit_arg = explicit_arg[1:] or None\r
+                            optionals_map = self._option_string_actions\r
+                            if option_string in optionals_map:\r
+                                action = optionals_map[option_string]\r
+                                break\r
+                        else:\r
+                            msg = _('ignored explicit argument %r')\r
+                            raise ArgumentError(action, msg % explicit_arg)\r
+\r
+                    # if the action expect exactly one argument, we've\r
+                    # successfully matched the option; exit the loop\r
+                    elif arg_count == 1:\r
+                        stop = start_index + 1\r
+                        args = [explicit_arg]\r
+                        action_tuples.append((action, args, option_string))\r
+                        break\r
+\r
+                    # error if a double-dash option did not use the\r
+                    # explicit argument\r
+                    else:\r
+                        msg = _('ignored explicit argument %r')\r
+                        raise ArgumentError(action, msg % explicit_arg)\r
+\r
+                # if there is no explicit argument, try to match the\r
+                # optional's string arguments with the following strings\r
+                # if successful, exit the loop\r
+                else:\r
+                    start = start_index + 1\r
+                    selected_patterns = arg_strings_pattern[start:]\r
+                    arg_count = match_argument(action, selected_patterns)\r
+                    stop = start + arg_count\r
+                    args = arg_strings[start:stop]\r
+                    action_tuples.append((action, args, option_string))\r
+                    break\r
+\r
+            # add the Optional to the list and return the index at which\r
+            # the Optional's string args stopped\r
+            assert action_tuples\r
+            for action, args, option_string in action_tuples:\r
+                take_action(action, args, option_string)\r
+            return stop\r
+\r
+        # the list of Positionals left to be parsed; this is modified\r
+        # by consume_positionals()\r
+        positionals = self._get_positional_actions()\r
+\r
+        # function to convert arg_strings into positional actions\r
+        def consume_positionals(start_index):\r
+            # match as many Positionals as possible\r
+            match_partial = self._match_arguments_partial\r
+            selected_pattern = arg_strings_pattern[start_index:]\r
+            arg_counts = match_partial(positionals, selected_pattern)\r
+\r
+            # slice off the appropriate arg strings for each Positional\r
+            # and add the Positional and its args to the list\r
+            for action, arg_count in zip(positionals, arg_counts):\r
+                args = arg_strings[start_index: start_index + arg_count]\r
+                start_index += arg_count\r
+                take_action(action, args)\r
+\r
+            # slice off the Positionals that we just parsed and return the\r
+            # index at which the Positionals' string args stopped\r
+            positionals[:] = positionals[len(arg_counts):]\r
+            return start_index\r
+\r
+        # consume Positionals and Optionals alternately, until we have\r
+        # passed the last option string\r
+        extras = []\r
+        start_index = 0\r
+        if option_string_indices:\r
+            max_option_string_index = max(option_string_indices)\r
+        else:\r
+            max_option_string_index = -1\r
+        while start_index <= max_option_string_index:\r
+\r
+            # consume any Positionals preceding the next option\r
+            next_option_string_index = min([\r
+                index\r
+                for index in option_string_indices\r
+                if index >= start_index])\r
+            if start_index != next_option_string_index:\r
+                positionals_end_index = consume_positionals(start_index)\r
+\r
+                # only try to parse the next optional if we didn't consume\r
+                # the option string during the positionals parsing\r
+                if positionals_end_index > start_index:\r
+                    start_index = positionals_end_index\r
+                    continue\r
+                else:\r
+                    start_index = positionals_end_index\r
+\r
+            # if we consumed all the positionals we could and we're not\r
+            # at the index of an option string, there were extra arguments\r
+            if start_index not in option_string_indices:\r
+                strings = arg_strings[start_index:next_option_string_index]\r
+                extras.extend(strings)\r
+                start_index = next_option_string_index\r
+\r
+            # consume the next optional and any arguments for it\r
+            start_index = consume_optional(start_index)\r
+\r
+        # consume any positionals following the last Optional\r
+        stop_index = consume_positionals(start_index)\r
+\r
+        # if we didn't consume all the argument strings, there were extras\r
+        extras.extend(arg_strings[stop_index:])\r
+\r
+        # if we didn't use all the Positional objects, there were too few\r
+        # arg strings supplied.\r
+        if positionals:\r
+            self.error(_('too few arguments'))\r
+\r
+        # make sure all required actions were present\r
+        for action in self._actions:\r
+            if action.required:\r
+                if action not in seen_actions:\r
+                    name = _get_action_name(action)\r
+                    self.error(_('argument %s is required') % name)\r
+\r
+        # make sure all required groups had one option present\r
+        for group in self._mutually_exclusive_groups:\r
+            if group.required:\r
+                for action in group._group_actions:\r
+                    if action in seen_non_default_actions:\r
+                        break\r
+\r
+                # if no actions were used, report the error\r
+                else:\r
+                    names = [_get_action_name(action)\r
+                             for action in group._group_actions\r
+                             if action.help is not SUPPRESS]\r
+                    msg = _('one of the arguments %s is required')\r
+                    self.error(msg % ' '.join(names))\r
+\r
+        # return the updated namespace and the extra arguments\r
+        return namespace, extras\r
+\r
+    def _read_args_from_files(self, arg_strings):\r
+        # expand arguments referencing files\r
+        new_arg_strings = []\r
+        for arg_string in arg_strings:\r
+\r
+            # for regular arguments, just add them back into the list\r
+            if arg_string[0] not in self.fromfile_prefix_chars:\r
+                new_arg_strings.append(arg_string)\r
+\r
+            # replace arguments referencing files with the file content\r
+            else:\r
+                try:\r
+                    args_file = open(arg_string[1:])\r
+                    try:\r
+                        arg_strings = args_file.read().splitlines()\r
+                        arg_strings = self._read_args_from_files(arg_strings)\r
+                        new_arg_strings.extend(arg_strings)\r
+                    finally:\r
+                        args_file.close()\r
+                except IOError:\r
+                    err = _sys.exc_info()[1]\r
+                    self.error(str(err))\r
+\r
+        # return the modified argument list\r
+        return new_arg_strings\r
+\r
+    def _match_argument(self, action, arg_strings_pattern):\r
+        # match the pattern for this action to the arg strings\r
+        nargs_pattern = self._get_nargs_pattern(action)\r
+        match = _re.match(nargs_pattern, arg_strings_pattern)\r
+\r
+        # raise an exception if we weren't able to find a match\r
+        if match is None:\r
+            nargs_errors = {\r
+                None: _('expected one argument'),\r
+                OPTIONAL: _('expected at most one argument'),\r
+                ONE_OR_MORE: _('expected at least one argument'),\r
+            }\r
+            default = _('expected %s argument(s)') % action.nargs\r
+            msg = nargs_errors.get(action.nargs, default)\r
+            raise ArgumentError(action, msg)\r
+\r
+        # return the number of arguments matched\r
+        return len(match.group(1))\r
+\r
+    def _match_arguments_partial(self, actions, arg_strings_pattern):\r
+        # progressively shorten the actions list by slicing off the\r
+        # final actions until we find a match\r
+        result = []\r
+        for i in range(len(actions), 0, -1):\r
+            actions_slice = actions[:i]\r
+            pattern = ''.join([self._get_nargs_pattern(action)\r
+                               for action in actions_slice])\r
+            match = _re.match(pattern, arg_strings_pattern)\r
+            if match is not None:\r
+                result.extend([len(string) for string in match.groups()])\r
+                break\r
+\r
+        # return the list of arg string counts\r
+        return result\r
+\r
+    def _parse_optional(self, arg_string):\r
+        # if it's an empty string, it was meant to be a positional\r
+        if not arg_string:\r
+            return None\r
+\r
+        # if it doesn't start with a prefix, it was meant to be positional\r
+        if not arg_string[0] in self.prefix_chars:\r
+            return None\r
+\r
+        # if it's just dashes, it was meant to be positional\r
+        if not arg_string.strip('-'):\r
+            return None\r
+\r
+        # if the option string is present in the parser, return the action\r
+        if arg_string in self._option_string_actions:\r
+            action = self._option_string_actions[arg_string]\r
+            return action, arg_string, None\r
+\r
+        # search through all possible prefixes of the option string\r
+        # and all actions in the parser for possible interpretations\r
+        option_tuples = self._get_option_tuples(arg_string)\r
+\r
+        # if multiple actions match, the option string was ambiguous\r
+        if len(option_tuples) > 1:\r
+            options = ', '.join([option_string\r
+                for action, option_string, explicit_arg in option_tuples])\r
+            tup = arg_string, options\r
+            self.error(_('ambiguous option: %s could match %s') % tup)\r
+\r
+        # if exactly one action matched, this segmentation is good,\r
+        # so return the parsed action\r
+        elif len(option_tuples) == 1:\r
+            option_tuple, = option_tuples\r
+            return option_tuple\r
+\r
+        # if it was not found as an option, but it looks like a negative\r
+        # number, it was meant to be positional\r
+        # unless there are negative-number-like options\r
+        if self._negative_number_matcher.match(arg_string):\r
+            if not self._has_negative_number_optionals:\r
+                return None\r
+\r
+        # if it contains a space, it was meant to be a positional\r
+        if ' ' in arg_string:\r
+            return None\r
+\r
+        # it was meant to be an optional but there is no such option\r
+        # in this parser (though it might be a valid option in a subparser)\r
+        return None, arg_string, None\r
+\r
+    def _get_option_tuples(self, option_string):\r
+        result = []\r
+\r
+        # option strings starting with two prefix characters are only\r
+        # split at the '='\r
+        chars = self.prefix_chars\r
+        if option_string[0] in chars and option_string[1] in chars:\r
+            if '=' in option_string:\r
+                option_prefix, explicit_arg = option_string.split('=', 1)\r
+            else:\r
+                option_prefix = option_string\r
+                explicit_arg = None\r
+            for option_string in self._option_string_actions:\r
+                if option_string.startswith(option_prefix):\r
+                    action = self._option_string_actions[option_string]\r
+                    tup = action, option_string, explicit_arg\r
+                    result.append(tup)\r
+\r
+        # single character options can be concatenated with their arguments\r
+        # but multiple character options always have to have their argument\r
+        # separate\r
+        elif option_string[0] in chars and option_string[1] not in chars:\r
+            option_prefix = option_string\r
+            explicit_arg = None\r
+            short_option_prefix = option_string[:2]\r
+            short_explicit_arg = option_string[2:]\r
+\r
+            for option_string in self._option_string_actions:\r
+                if option_string == short_option_prefix:\r
+                    action = self._option_string_actions[option_string]\r
+                    tup = action, option_string, short_explicit_arg\r
+                    result.append(tup)\r
+                elif option_string.startswith(option_prefix):\r
+                    action = self._option_string_actions[option_string]\r
+                    tup = action, option_string, explicit_arg\r
+                    result.append(tup)\r
+\r
+        # shouldn't ever get here\r
+        else:\r
+            self.error(_('unexpected option string: %s') % option_string)\r
+\r
+        # return the collected option tuples\r
+        return result\r
+\r
+    def _get_nargs_pattern(self, action):\r
+        # in all examples below, we have to allow for '--' args\r
+        # which are represented as '-' in the pattern\r
+        nargs = action.nargs\r
+\r
+        # the default (None) is assumed to be a single argument\r
+        if nargs is None:\r
+            nargs_pattern = '(-*A-*)'\r
+\r
+        # allow zero or one arguments\r
+        elif nargs == OPTIONAL:\r
+            nargs_pattern = '(-*A?-*)'\r
+\r
+        # allow zero or more arguments\r
+        elif nargs == ZERO_OR_MORE:\r
+            nargs_pattern = '(-*[A-]*)'\r
+\r
+        # allow one or more arguments\r
+        elif nargs == ONE_OR_MORE:\r
+            nargs_pattern = '(-*A[A-]*)'\r
+\r
+        # allow one argument followed by any number of options or arguments\r
+        elif nargs is PARSER:\r
+            nargs_pattern = '(-*A[-AO]*)'\r
+\r
+        # all others should be integers\r
+        else:\r
+            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)\r
+\r
+        # if this is an optional action, -- is not allowed\r
+        if action.option_strings:\r
+            nargs_pattern = nargs_pattern.replace('-*', '')\r
+            nargs_pattern = nargs_pattern.replace('-', '')\r
+\r
+        # return the pattern\r
+        return nargs_pattern\r
+\r
+    # ========================\r
+    # Value conversion methods\r
+    # ========================\r
+    def _get_values(self, action, arg_strings):\r
+        # for everything but PARSER args, strip out '--'\r
+        if action.nargs is not PARSER:\r
+            arg_strings = [s for s in arg_strings if s != '--']\r
+\r
+        # optional argument produces a default when not present\r
+        if not arg_strings and action.nargs == OPTIONAL:\r
+            if action.option_strings:\r
+                value = action.const\r
+            else:\r
+                value = action.default\r
+            if isinstance(value, _basestring):\r
+                value = self._get_value(action, value)\r
+                self._check_value(action, value)\r
+\r
+        # when nargs='*' on a positional, if there were no command-line\r
+        # args, use the default if it is anything other than None\r
+        elif (not arg_strings and action.nargs == ZERO_OR_MORE and\r
+              not action.option_strings):\r
+            if action.default is not None:\r
+                value = action.default\r
+            else:\r
+                value = arg_strings\r
+            self._check_value(action, value)\r
+\r
+        # single argument or optional argument produces a single value\r
+        elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:\r
+            arg_string, = arg_strings\r
+            value = self._get_value(action, arg_string)\r
+            self._check_value(action, value)\r
+\r
+        # PARSER arguments convert all values, but check only the first\r
+        elif action.nargs is PARSER:\r
+            value = [self._get_value(action, v) for v in arg_strings]\r
+            self._check_value(action, value[0])\r
+\r
+        # all other types of nargs produce a list\r
+        else:\r
+            value = [self._get_value(action, v) for v in arg_strings]\r
+            for v in value:\r
+                self._check_value(action, v)\r
+\r
+        # return the converted value\r
+        return value\r
+\r
+    def _get_value(self, action, arg_string):\r
+        type_func = self._registry_get('type', action.type, action.type)\r
+        if not hasattr(type_func, '__call__'):\r
+            if not hasattr(type_func, '__bases__'): # classic classes\r
+                msg = _('%r is not callable')\r
+                raise ArgumentError(action, msg % type_func)\r
+\r
+        # convert the value to the appropriate type\r
+        try:\r
+            result = type_func(arg_string)\r
+\r
+        # TypeErrors or ValueErrors indicate errors\r
+        except (TypeError, ValueError):\r
+            name = getattr(action.type, '__name__', repr(action.type))\r
+            msg = _('invalid %s value: %r')\r
+            raise ArgumentError(action, msg % (name, arg_string))\r
+\r
+        # return the converted value\r
+        return result\r
+\r
+    def _check_value(self, action, value):\r
+        # converted value must be one of the choices (if specified)\r
+        if action.choices is not None and value not in action.choices:\r
+            tup = value, ', '.join(map(repr, action.choices))\r
+            msg = _('invalid choice: %r (choose from %s)') % tup\r
+            raise ArgumentError(action, msg)\r
+\r
+    # =======================\r
+    # Help-formatting methods\r
+    # =======================\r
+    def format_usage(self):\r
+        formatter = self._get_formatter()\r
+        formatter.add_usage(self.usage, self._actions,\r
+                            self._mutually_exclusive_groups)\r
+        return formatter.format_help()\r
+\r
+    def format_help(self):\r
+        formatter = self._get_formatter()\r
+\r
+        # usage\r
+        formatter.add_usage(self.usage, self._actions,\r
+                            self._mutually_exclusive_groups)\r
+\r
+        # description\r
+        formatter.add_text(self.description)\r
+\r
+        # positionals, optionals and user-defined groups\r
+        for action_group in self._action_groups:\r
+            formatter.start_section(action_group.title)\r
+            formatter.add_text(action_group.description)\r
+            formatter.add_arguments(action_group._group_actions)\r
+            formatter.end_section()\r
+\r
+        # epilog\r
+        formatter.add_text(self.epilog)\r
+\r
+        # determine help from format above\r
+        return formatter.format_help()\r
+\r
+    def format_version(self):\r
+        formatter = self._get_formatter()\r
+        formatter.add_text(self.version)\r
+        return formatter.format_help()\r
+\r
+    def _get_formatter(self):\r
+        return self.formatter_class(prog=self.prog)\r
+\r
+    # =====================\r
+    # Help-printing methods\r
+    # =====================\r
+    def print_usage(self, file=None):\r
+        self._print_message(self.format_usage(), file)\r
+\r
+    def print_help(self, file=None):\r
+        self._print_message(self.format_help(), file)\r
+\r
+    def print_version(self, file=None):\r
+        self._print_message(self.format_version(), file)\r
+\r
+    def _print_message(self, message, file=None):\r
+        if message:\r
+            if file is None:\r
+                file = _sys.stderr\r
+            file.write(message)\r
+\r
+    # ===============\r
+    # Exiting methods\r
+    # ===============\r
+    def exit(self, status=0, message=None):\r
+        if message:\r
+            _sys.stderr.write(message)\r
+        _sys.exit(status)\r
+\r
+    def error(self, message):\r
+        """error(message: string)\r
+\r
+        Prints a usage message incorporating the message to stderr and\r
+        exits.\r
+\r
+        If you override this in a subclass, it should not return -- it\r
+        should either exit or raise an exception.\r
+        """\r
+        self.print_usage(_sys.stderr)\r
+        self.exit(2, _('%s: error: %s\n') % (self.prog, message))\r
diff --git a/micng/utils/cmdln.py b/micng/utils/cmdln.py
new file mode 100644 (file)
index 0000000..aa37fa9
--- /dev/null
@@ -0,0 +1,1539 @@
+# Copyright (c) 2002-2005 ActiveState Corp.
+# License: MIT (see LICENSE.txt for license details)
+# Author:  Trent Mick (TrentM@ActiveState.com)
+# Home:    http://trentm.com/projects/cmdln/
+
+"""An improvement on Python's standard cmd.py module.
+
+As with cmd.py, this module provides "a simple framework for writing
+line-oriented command intepreters."  This module provides a 'RawCmdln'
+class that fixes some design flaws in cmd.Cmd, making it more scalable
+and nicer to use for good 'cvs'- or 'svn'-style command line interfaces
+or simple shells.  And it provides a 'Cmdln' class that add
+optparse-based option processing. Basically you use it like this:
+
+    import cmdln
+
+    class MySVN(cmdln.Cmdln):
+        name = "svn"
+
+        @cmdln.alias('stat', 'st')
+        @cmdln.option('-v', '--verbose', action='store_true'
+                      help='print verbose information')
+        def do_status(self, subcmd, opts, *paths):
+            print "handle 'svn status' command"
+
+        #...
+
+    if __name__ == "__main__":
+        shell = MySVN()
+        retval = shell.main()
+        sys.exit(retval)
+
+See the README.txt or <http://trentm.com/projects/cmdln/> for more
+details.
+"""
+
+__revision__ = "$Id: cmdln.py 1666 2007-05-09 03:13:03Z trentm $"
+__version_info__ = (1, 0, 0)
+__version__ = '.'.join(map(str, __version_info__))
+
+import os
+import re
+import cmd
+import optparse
+from pprint import pprint
+from datetime import date
+
+
+
+
+#---- globals
+
+LOOP_ALWAYS, LOOP_NEVER, LOOP_IF_EMPTY = range(3)
+
+# An unspecified optional argument when None is a meaningful value.
+_NOT_SPECIFIED = ("Not", "Specified")
+
+# Pattern to match a TypeError message from a call that
+# failed because of incorrect number of arguments (see
+# Python/getargs.c).
+_INCORRECT_NUM_ARGS_RE = re.compile(
+    r"(takes [\w ]+ )(\d+)( arguments? \()(\d+)( given\))")
+
+# Static bits of man page
+MAN_HEADER = r""".TH %(ucname)s "1" "%(date)s" "%(name)s %(version)s" "User Commands"
+.SH NAME
+%(name)s \- Program to do useful things.
+.SH SYNOPSIS
+.B %(name)s
+[\fIGLOBALOPTS\fR] \fISUBCOMMAND \fR[\fIOPTS\fR] [\fIARGS\fR...]
+.br
+.B %(name)s
+\fIhelp SUBCOMMAND\fR
+.SH DESCRIPTION
+"""
+MAN_COMMANDS_HEADER = r"""
+.SS COMMANDS
+"""
+MAN_OPTIONS_HEADER = r"""
+.SS GLOBAL OPTIONS
+"""
+MAN_FOOTER = r"""
+.SH AUTHOR
+This man page is automatically generated.
+"""
+
+#---- exceptions
+
+class CmdlnError(Exception):
+    """A cmdln.py usage error."""
+    def __init__(self, msg):
+        self.msg = msg
+    def __str__(self):
+        return self.msg
+
+class CmdlnUserError(Exception):
+    """An error by a user of a cmdln-based tool/shell."""
+    pass
+
+
+
+#---- public methods and classes
+
+def alias(*aliases):
+    """Decorator to add aliases for Cmdln.do_* command handlers.
+
+    Example:
+        class MyShell(cmdln.Cmdln):
+            @cmdln.alias("!", "sh")
+            def do_shell(self, argv):
+                #...implement 'shell' command
+    """
+    def decorate(f):
+        if not hasattr(f, "aliases"):
+            f.aliases = []
+        f.aliases += aliases
+        return f
+    return decorate
+
+MAN_REPLACES = [
+    (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2\-\3\-\4'),
+    (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2\-\3\-\4'),
+    (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2\-\3'),
+    (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2\-\3'),
+    (re.compile(r'(^|[ \t\[\'])--([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\-\2'),
+    (re.compile(r'(^|[ \t\[\'])-([^/ \t/,-]*)(?=$|[ \t=\]\'/,])'), r'\1\-\2'),
+    (re.compile(r"^'"), r" '"),
+    ]
+
+def man_escape(text):
+    '''
+    Escapes text to be included in man page.
+
+    For now it only escapes dashes in command line options.
+    '''
+    for repl in MAN_REPLACES:
+        text = repl[0].sub(repl[1], text)
+    return text
+
+class RawCmdln(cmd.Cmd):
+    """An improved (on cmd.Cmd) framework for building multi-subcommand
+    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
+    "gdb").
+
+    A simple example:
+
+        import cmdln
+
+        class MySVN(cmdln.RawCmdln):
+            name = "svn"
+
+            @cmdln.aliases('stat', 'st')
+            def do_status(self, argv):
+                print "handle 'svn status' command"
+
+        if __name__ == "__main__":
+            shell = MySVN()
+            retval = shell.main()
+            sys.exit(retval)
+
+    See <http://trentm.com/projects/cmdln> for more information.
+    """
+    name = None      # if unset, defaults basename(sys.argv[0])
+    prompt = None    # if unset, defaults to self.name+"> "
+    version = None   # if set, default top-level options include --version
+
+    # Default messages for some 'help' command error cases.
+    # They are interpolated with one arg: the command.
+    nohelp = "no help on '%s'"
+    unknowncmd = "unknown command: '%s'"
+
+    helpindent = '' # string with which to indent help output
+
+    # Default man page parts, please change them in subclass
+    man_header = MAN_HEADER
+    man_commands_header = MAN_COMMANDS_HEADER
+    man_options_header = MAN_OPTIONS_HEADER
+    man_footer = MAN_FOOTER
+
+    def __init__(self, completekey='tab',
+                 stdin=None, stdout=None, stderr=None):
+        """Cmdln(completekey='tab', stdin=None, stdout=None, stderr=None)
+
+        The optional argument 'completekey' is the readline name of a
+        completion key; it defaults to the Tab key. If completekey is
+        not None and the readline module is available, command completion
+        is done automatically.
+
+        The optional arguments 'stdin', 'stdout' and 'stderr' specify
+        alternate input, output and error output file objects; if not
+        specified, sys.* are used.
+
+        If 'stdout' but not 'stderr' is specified, stdout is used for
+        error output. This is to provide least surprise for users used
+        to only the 'stdin' and 'stdout' options with cmd.Cmd.
+        """
+        import sys
+        if self.name is None:
+            self.name = os.path.basename(sys.argv[0])
+        if self.prompt is None:
+            self.prompt = self.name+"> "
+        self._name_str = self._str(self.name)
+        self._prompt_str = self._str(self.prompt)
+        if stdin is not None:
+            self.stdin = stdin
+        else:
+            self.stdin = sys.stdin
+        if stdout is not None:
+            self.stdout = stdout
+        else:
+            self.stdout = sys.stdout
+        if stderr is not None:
+            self.stderr = stderr
+        elif stdout is not None:
+            self.stderr = stdout
+        else:
+            self.stderr = sys.stderr
+        self.cmdqueue = []
+        self.completekey = completekey
+        self.cmdlooping = False
+
+    def get_optparser(self):
+        """Hook for subclasses to set the option parser for the
+        top-level command/shell.
+
+        This option parser is used retrieved and used by `.main()' to
+        handle top-level options.
+
+        The default implements a single '-h|--help' option. Sub-classes
+        can return None to have no options at the top-level. Typically
+        an instance of CmdlnOptionParser should be returned.
+        """
+        version = (self.version is not None
+                    and "%s %s" % (self._name_str, self.version)
+                    or None)
+        return CmdlnOptionParser(self, version=version)
+
+    def get_version(self):
+        """
+        Returns version of program. To be replaced in subclass.
+        """
+        return __version__
+
+    def postoptparse(self):
+        """Hook method executed just after `.main()' parses top-level
+        options.
+
+        When called `self.values' holds the results of the option parse.
+        """
+        pass
+
+    def main(self, argv=None, loop=LOOP_NEVER):
+        """A possible mainline handler for a script, like so:
+
+            import cmdln
+            class MyCmd(cmdln.Cmdln):
+                name = "mycmd"
+                ...
+
+            if __name__ == "__main__":
+                MyCmd().main()
+
+        By default this will use sys.argv to issue a single command to
+        'MyCmd', then exit. The 'loop' argument can be use to control
+        interactive shell behaviour.
+
+        Arguments:
+            "argv" (optional, default sys.argv) is the command to run.
+                It must be a sequence, where the first element is the
+                command name and subsequent elements the args for that
+                command.
+            "loop" (optional, default LOOP_NEVER) is a constant
+                indicating if a command loop should be started (i.e. an
+                interactive shell). Valid values (constants on this module):
+                    LOOP_ALWAYS     start loop and run "argv", if any
+                    LOOP_NEVER      run "argv" (or .emptyline()) and exit
+                    LOOP_IF_EMPTY   run "argv", if given, and exit;
+                                    otherwise, start loop
+        """
+        if argv is None:
+            import sys
+            argv = sys.argv
+        else:
+            argv = argv[:] # don't modify caller's list
+
+        self.optparser = self.get_optparser()
+        if self.optparser: # i.e. optparser=None means don't process for opts
+            try:
+                self.options, args = self.optparser.parse_args(argv[1:])
+            except CmdlnUserError, ex:
+                msg = "%s: %s\nTry '%s help' for info.\n"\
+                      % (self.name, ex, self.name)
+                self.stderr.write(self._str(msg))
+                self.stderr.flush()
+                return 1
+            except StopOptionProcessing, ex:
+                return 0
+        else:
+            self.options, args = None, argv[1:]
+        self.postoptparse()
+
+        if loop == LOOP_ALWAYS:
+            if args:
+                self.cmdqueue.append(args)
+            return self.cmdloop()
+        elif loop == LOOP_NEVER:
+            if args:
+                return self.cmd(args)
+            else:
+                return self.emptyline()
+        elif loop == LOOP_IF_EMPTY:
+            if args:
+                return self.cmd(args)
+            else:
+                return self.cmdloop()
+
+    def cmd(self, argv):
+        """Run one command and exit.
+
+            "argv" is the arglist for the command to run. argv[0] is the
+                command to run. If argv is an empty list then the
+                'emptyline' handler is run.
+
+        Returns the return value from the command handler.
+        """
+        assert isinstance(argv, (list, tuple)), \
+                "'argv' is not a sequence: %r" % argv
+        retval = None
+        try:
+            argv = self.precmd(argv)
+            retval = self.onecmd(argv)
+            self.postcmd(argv)
+        except:
+            if not self.cmdexc(argv):
+                raise
+            retval = 1
+        return retval
+
+    def _str(self, s):
+        """Safely convert the given str/unicode to a string for printing."""
+        try:
+            return str(s)
+        except UnicodeError:
+            #XXX What is the proper encoding to use here? 'utf-8' seems
+            #    to work better than "getdefaultencoding" (usually
+            #    'ascii'), on OS X at least.
+            #import sys
+            #return s.encode(sys.getdefaultencoding(), "replace")
+            return s.encode("utf-8", "replace")
+
+    def cmdloop(self, intro=None):
+        """Repeatedly issue a prompt, accept input, parse into an argv, and
+        dispatch (via .precmd(), .onecmd() and .postcmd()), passing them
+        the argv. In other words, start a shell.
+
+            "intro" (optional) is a introductory message to print when
+                starting the command loop. This overrides the class
+                "intro" attribute, if any.
+        """
+        self.cmdlooping = True
+        self.preloop()
+        if intro is None:
+            intro = self.intro
+        if intro:
+            intro_str = self._str(intro)
+            self.stdout.write(intro_str+'\n')
+        self.stop = False
+        retval = None
+        while not self.stop:
+            if self.cmdqueue:
+                argv = self.cmdqueue.pop(0)
+                assert isinstance(argv, (list, tuple)), \
+                        "item on 'cmdqueue' is not a sequence: %r" % argv
+            else:
+                if self.use_rawinput:
+                    try:
+                        line = raw_input(self._prompt_str)
+                    except EOFError:
+                        line = 'EOF'
+                else:
+                    self.stdout.write(self._prompt_str)
+                    self.stdout.flush()
+                    line = self.stdin.readline()
+                    if not len(line):
+                        line = 'EOF'
+                    else:
+                        line = line[:-1] # chop '\n'
+                argv = line2argv(line)
+            try:
+                argv = self.precmd(argv)
+                retval = self.onecmd(argv)
+                self.postcmd(argv)
+            except:
+                if not self.cmdexc(argv):
+                    raise
+                retval = 1
+            self.lastretval = retval
+        self.postloop()
+        self.cmdlooping = False
+        return retval
+
+    def precmd(self, argv):
+        """Hook method executed just before the command argv is
+        interpreted, but after the input prompt is generated and issued.
+
+            "argv" is the cmd to run.
+
+        Returns an argv to run (i.e. this method can modify the command
+        to run).
+        """
+        return argv
+
+    def postcmd(self, argv):
+        """Hook method executed just after a command dispatch is finished.
+
+            "argv" is the command that was run.
+        """
+        pass
+
+    def cmdexc(self, argv):
+        """Called if an exception is raised in any of precmd(), onecmd(),
+        or postcmd(). If True is returned, the exception is deemed to have
+        been dealt with. Otherwise, the exception is re-raised.
+
+        The default implementation handles CmdlnUserError's, which
+        typically correspond to user error in calling commands (as
+        opposed to programmer error in the design of the script using
+        cmdln.py).
+        """
+        import sys
+        type, exc, traceback = sys.exc_info()
+        if isinstance(exc, CmdlnUserError):
+            msg = "%s %s: %s\nTry '%s help %s' for info.\n"\
+                  % (self.name, argv[0], exc, self.name, argv[0])
+            self.stderr.write(self._str(msg))
+            self.stderr.flush()
+            return True
+
+    def onecmd(self, argv):
+        if not argv:
+            return self.emptyline()
+        self.lastcmd = argv
+        cmdname = self._get_canonical_cmd_name(argv[0])
+        if cmdname:
+            handler = self._get_cmd_handler(cmdname)
+            if handler:
+                return self._dispatch_cmd(handler, argv)
+        return self.default(argv)
+
+    def _dispatch_cmd(self, handler, argv):
+        return handler(argv)
+
+    def default(self, argv):
+        """Hook called to handle a command for which there is no handler.
+
+            "argv" is the command and arguments to run.
+
+        The default implementation writes and error message to stderr
+        and returns an error exit status.
+
+        Returns a numeric command exit status.
+        """
+        errmsg = self._str(self.unknowncmd % (argv[0],))
+        if self.cmdlooping:
+            self.stderr.write(errmsg+"\n")
+        else:
+            self.stderr.write("%s: %s\nTry '%s help' for info.\n"
+                              % (self._name_str, errmsg, self._name_str))
+        self.stderr.flush()
+        return 1
+
+    def parseline(self, line):
+        # This is used by Cmd.complete (readline completer function) to
+        # massage the current line buffer before completion processing.
+        # We override to drop special '!' handling.
+        line = line.strip()
+        if not line:
+            return None, None, line
+        elif line[0] == '?':
+            line = 'help ' + line[1:]
+        i, n = 0, len(line)
+        while i < n and line[i] in self.identchars: i = i+1
+        cmd, arg = line[:i], line[i:].strip()
+        return cmd, arg, line
+
+    def helpdefault(self, cmd, known):
+        """Hook called to handle help on a command for which there is no
+        help handler.
+
+            "cmd" is the command name on which help was requested.
+            "known" is a boolean indicating if this command is known
+                (i.e. if there is a handler for it).
+
+        Returns a return code.
+        """
+        if known:
+            msg = self._str(self.nohelp % (cmd,))
+            if self.cmdlooping:
+                self.stderr.write(msg + '\n')
+            else:
+                self.stderr.write("%s: %s\n" % (self.name, msg))
+        else:
+            msg = self.unknowncmd % (cmd,)
+            if self.cmdlooping:
+                self.stderr.write(msg + '\n')
+            else:
+                self.stderr.write("%s: %s\n"
+                                  "Try '%s help' for info.\n"
+                                  % (self.name, msg, self.name))
+        self.stderr.flush()
+        return 1
+
+
+    def do_help(self, argv):
+        """${cmd_name}: give detailed help on a specific sub-command
+
+        usage:
+            ${name} help [SUBCOMMAND]
+        """
+        if len(argv) > 1: # asking for help on a particular command
+            doc = None
+            cmdname = self._get_canonical_cmd_name(argv[1]) or argv[1]
+            if not cmdname:
+                return self.helpdefault(argv[1], False)
+            else:
+                helpfunc = getattr(self, "help_"+cmdname, None)
+                if helpfunc:
+                    doc = helpfunc()
+                else:
+                    handler = self._get_cmd_handler(cmdname)
+                    if handler:
+                        doc = handler.__doc__
+                    if doc is None:
+                        return self.helpdefault(argv[1], handler != None)
+        else: # bare "help" command
+            doc = self.__class__.__doc__  # try class docstring
+            if doc is None:
+                # Try to provide some reasonable useful default help.
+                if self.cmdlooping: prefix = ""
+                else:               prefix = self.name+' '
+                doc = """usage:
+                    %sSUBCOMMAND [ARGS...]
+                    %shelp [SUBCOMMAND]
+
+                ${option_list}
+                ${command_list}
+                ${help_list}
+                """ % (prefix, prefix)
+            cmdname = None
+
+        if doc: # *do* have help content, massage and print that
+            doc = self._help_reindent(doc)
+            doc = self._help_preprocess(doc, cmdname)
+            doc = doc.rstrip() + '\n' # trim down trailing space
+            self.stdout.write(self._str(doc))
+            self.stdout.flush()
+    do_help.aliases = ["?"]
+
+
+    def do_man(self, argv):
+        """${cmd_name}: generates a man page
+
+        usage:
+            ${name} man
+        """
+        self.stdout.write(self.man_header % {
+                'date': date.today().strftime('%b %Y'),
+                'version': self.get_version(),
+                'name': self.name,
+                'ucname': self.name.upper()
+                }
+        )
+
+        self.stdout.write(self.man_commands_header)
+        commands = self._help_get_command_list()
+        for command, doc in commands:
+            cmdname = command.split(' ')[0]
+            text = self._help_preprocess(doc, cmdname)
+            lines = []
+            for line in text.splitlines(False):
+                if line[:8] == ' ' * 8:
+                    line = line[8:]
+                lines.append(man_escape(line))
+
+            self.stdout.write('.TP\n\\fB%s\\fR\n%s\n' % (command, '\n'.join(lines)))
+
+        self.stdout.write(self.man_options_header)
+        self.stdout.write(man_escape(self._help_preprocess('${option_list}', None)))
+
+        self.stdout.write(self.man_footer)
+
+        self.stdout.flush()
+
+    def _help_reindent(self, help, indent=None):
+        """Hook to re-indent help strings before writing to stdout.
+
+            "help" is the help content to re-indent
+            "indent" is a string with which to indent each line of the
+                help content after normalizing. If unspecified or None
+                then the default is use: the 'self.helpindent' class
+                attribute. By default this is the empty string, i.e.
+                no indentation.
+
+        By default, all common leading whitespace is removed and then
+        the lot is indented by 'self.helpindent'. When calculating the
+        common leading whitespace the first line is ignored -- hence
+        help content for Conan can be written as follows and have the
+        expected indentation:
+
+            def do_crush(self, ...):
+                '''${cmd_name}: crush your enemies, see them driven before you...
+
+                c.f. Conan the Barbarian'''
+        """
+        if indent is None:
+            indent = self.helpindent
+        lines = help.splitlines(0)
+        _dedentlines(lines, skip_first_line=True)
+        lines = [(indent+line).rstrip() for line in lines]
+        return '\n'.join(lines)
+
+    def _help_preprocess(self, help, cmdname):
+        """Hook to preprocess a help string before writing to stdout.
+
+            "help" is the help string to process.
+            "cmdname" is the canonical sub-command name for which help
+                is being given, or None if the help is not specific to a
+                command.
+
+        By default the following template variables are interpolated in
+        help content. (Note: these are similar to Python 2.4's
+        string.Template interpolation but not quite.)
+
+        ${name}
+            The tool's/shell's name, i.e. 'self.name'.
+        ${option_list}
+            A formatted table of options for this shell/tool.
+        ${command_list}
+            A formatted table of available sub-commands.
+        ${help_list}
+            A formatted table of additional help topics (i.e. 'help_*'
+            methods with no matching 'do_*' method).
+        ${cmd_name}
+            The name (and aliases) for this sub-command formatted as:
+            "NAME (ALIAS1, ALIAS2, ...)".
+        ${cmd_usage}
+            A formatted usage block inferred from the command function
+            signature.
+        ${cmd_option_list}
+            A formatted table of options for this sub-command. (This is
+            only available for commands using the optparse integration,
+            i.e.  using @cmdln.option decorators or manually setting the
+            'optparser' attribute on the 'do_*' method.)
+
+        Returns the processed help.
+        """
+        preprocessors = {
+            "${name}":            self._help_preprocess_name,
+            "${option_list}":     self._help_preprocess_option_list,
+            "${command_list}":    self._help_preprocess_command_list,
+            "${help_list}":       self._help_preprocess_help_list,
+            "${cmd_name}":        self._help_preprocess_cmd_name,
+            "${cmd_usage}":       self._help_preprocess_cmd_usage,
+            "${cmd_option_list}": self._help_preprocess_cmd_option_list,
+        }
+
+        for marker, preprocessor in preprocessors.items():
+            if marker in help:
+                help = preprocessor(help, cmdname)
+        return help
+
+    def _help_preprocess_name(self, help, cmdname=None):
+        return help.replace("${name}", self.name)
+
+    def _help_preprocess_option_list(self, help, cmdname=None):
+        marker = "${option_list}"
+        indent, indent_width = _get_indent(marker, help)
+        suffix = _get_trailing_whitespace(marker, help)
+
+        if self.optparser:
+            # Setup formatting options and format.
+            # - Indentation of 4 is better than optparse default of 2.
+            #   C.f. Damian Conway's discussion of this in Perl Best
+            #   Practices.
+            self.optparser.formatter.indent_increment = 4
+            self.optparser.formatter.current_indent = indent_width
+            block = self.optparser.format_option_help() + '\n'
+        else:
+            block = ""
+
+        help = help.replace(indent+marker+suffix, block, 1)
+        return help
+
+    def _help_get_command_list(self):
+        # Find any aliases for commands.
+        token2canonical = self._get_canonical_map()
+        aliases = {}
+        for token, cmdname in token2canonical.items():
+            if token == cmdname: continue
+            aliases.setdefault(cmdname, []).append(token)
+
+        # Get the list of (non-hidden) commands and their
+        # documentation, if any.
+        cmdnames = {} # use a dict to strip duplicates
+        for attr in self.get_names():
+            if attr.startswith("do_"):
+                cmdnames[attr[3:]] = True
+        cmdnames = cmdnames.keys()
+        cmdnames.remove("help")
+        cmdnames.remove("man")
+        #cmdnames.sort()
+        linedata = []
+        for cmdname in cmdnames:
+            if aliases.get(cmdname):
+                a = aliases[cmdname]
+                a.sort()
+                cmdstr = "%s (%s)" % (cmdname, ", ".join(a))
+            else:
+                cmdstr = cmdname
+            doc = None
+            try:
+                helpfunc = getattr(self, 'help_'+cmdname)
+            except AttributeError:
+                handler = self._get_cmd_handler(cmdname)
+                if handler:
+                    doc = handler.__doc__
+            else:
+                doc = helpfunc()
+
+            # Strip "${cmd_name}: " from the start of a command's doc. Best
+            # practice dictates that command help strings begin with this, but
+            # it isn't at all wanted for the command list.
+            to_strip = "${cmd_name}:"
+            if doc and doc.startswith(to_strip):
+                #log.debug("stripping %r from start of %s's help string",
+                #          to_strip, cmdname)
+                doc = doc[len(to_strip):].lstrip()
+            if not getattr(self._get_cmd_handler(cmdname), "hidden", None):
+                linedata.append( (cmdstr, doc) )
+
+        return linedata
+
+    def _help_preprocess_command_list(self, help, cmdname=None):
+        marker = "${command_list}"
+        indent, indent_width = _get_indent(marker, help)
+        suffix = _get_trailing_whitespace(marker, help)
+
+        linedata = self._help_get_command_list()
+
+        if linedata:
+            subindent = indent + ' '*4
+            lines = _format_linedata(linedata, subindent, indent_width+4)
+            block = indent + "commands:\n" \
+                    + '\n'.join(lines) + "\n\n"
+            help = help.replace(indent+marker+suffix, block, 1)
+        return help
+
+    def _help_preprocess_help_list(self, help, cmdname=None):
+        marker = "${help_list}"
+        indent, indent_width = _get_indent(marker, help)
+        suffix = _get_trailing_whitespace(marker, help)
+
+        # Determine the additional help topics, if any.
+        helpnames = {}
+        token2cmdname = self._get_canonical_map()
+        for attr in self.get_names():
+            if not attr.startswith("help_"): continue
+            helpname = attr[5:]
+            if helpname not in token2cmdname:
+                helpnames[helpname] = True
+
+        if helpnames:
+            helpnames = helpnames.keys()
+            helpnames.sort()
+            linedata = [(self.name+" help "+n, "") for n in helpnames]
+
+            subindent = indent + ' '*4
+            lines = _format_linedata(linedata, subindent, indent_width+4)
+            block = indent + "additional help topics:\n" \
+                    + '\n'.join(lines) + "\n\n"
+        else:
+            block = ''
+        help = help.replace(indent+marker+suffix, block, 1)
+        return help
+
+    def _help_preprocess_cmd_name(self, help, cmdname=None):
+        marker = "${cmd_name}"
+        handler = self._get_cmd_handler(cmdname)
+        if not handler:
+            raise CmdlnError("cannot preprocess '%s' into help string: "
+                             "could not find command handler for %r"
+                             % (marker, cmdname))
+        s = cmdname
+        if hasattr(handler, "aliases"):
+            s += " (%s)" % (", ".join(handler.aliases))
+        help = help.replace(marker, s)
+        return help
+
+    #TODO: this only makes sense as part of the Cmdln class.
+    #      Add hooks to add help preprocessing template vars and put
+    #      this one on that class.
+    def _help_preprocess_cmd_usage(self, help, cmdname=None):
+        marker = "${cmd_usage}"
+        handler = self._get_cmd_handler(cmdname)
+        if not handler:
+            raise CmdlnError("cannot preprocess '%s' into help string: "
+                             "could not find command handler for %r"
+                             % (marker, cmdname))
+        indent, indent_width = _get_indent(marker, help)
+        suffix = _get_trailing_whitespace(marker, help)
+
+        # Extract the introspection bits we need.
+        func = handler.im_func
+        if func.func_defaults:
+            func_defaults = list(func.func_defaults)
+        else:
+            func_defaults = []
+        co_argcount = func.func_code.co_argcount
+        co_varnames = func.func_code.co_varnames
+        co_flags = func.func_code.co_flags
+        CO_FLAGS_ARGS = 4
+        CO_FLAGS_KWARGS = 8
+
+        # Adjust argcount for possible *args and **kwargs arguments.
+        argcount = co_argcount
+        if co_flags & CO_FLAGS_ARGS:   argcount += 1
+        if co_flags & CO_FLAGS_KWARGS: argcount += 1
+
+        # Determine the usage string.
+        usage = "%s %s" % (self.name, cmdname)
+        if argcount <= 2:   # handler ::= do_FOO(self, argv)
+            usage += " [ARGS...]"
+        elif argcount >= 3: # handler ::= do_FOO(self, subcmd, opts, ...)
+            argnames = list(co_varnames[3:argcount])
+            tail = ""
+            if co_flags & CO_FLAGS_KWARGS:
+                name = argnames.pop(-1)
+                import warnings
+                # There is no generally accepted mechanism for passing
+                # keyword arguments from the command line. Could
+                # *perhaps* consider: arg=value arg2=value2 ...
+                warnings.warn("argument '**%s' on '%s.%s' command "
+                              "handler will never get values"
+                              % (name, self.__class__.__name__,
+                                 func.func_name))
+            if co_flags & CO_FLAGS_ARGS:
+                name = argnames.pop(-1)
+                tail = "[%s...]" % name.upper()
+            while func_defaults:
+                func_defaults.pop(-1)
+                name = argnames.pop(-1)
+                tail = "[%s%s%s]" % (name.upper(), (tail and ' ' or ''), tail)
+            while argnames:
+                name = argnames.pop(-1)
+                tail = "%s %s" % (name.upper(), tail)
+            usage += ' ' + tail
+
+        block_lines = [
+            self.helpindent + "usage:",
+            self.helpindent + ' '*4 + usage
+        ]
+        block = '\n'.join(block_lines) + '\n\n'
+
+        help = help.replace(indent+marker+suffix, block, 1)
+        return help
+
+    #TODO: this only makes sense as part of the Cmdln class.
+    #      Add hooks to add help preprocessing template vars and put
+    #      this one on that class.
+    def _help_preprocess_cmd_option_list(self, help, cmdname=None):
+        marker = "${cmd_option_list}"
+        handler = self._get_cmd_handler(cmdname)
+        if not handler:
+            raise CmdlnError("cannot preprocess '%s' into help string: "
+                             "could not find command handler for %r"
+                             % (marker, cmdname))
+        indent, indent_width = _get_indent(marker, help)
+        suffix = _get_trailing_whitespace(marker, help)
+        if hasattr(handler, "optparser"):
+            # Setup formatting options and format.
+            # - Indentation of 4 is better than optparse default of 2.
+            #   C.f. Damian Conway's discussion of this in Perl Best
+            #   Practices.
+            handler.optparser.formatter.indent_increment = 4
+            handler.optparser.formatter.current_indent = indent_width
+            block = handler.optparser.format_option_help() + '\n'
+        else:
+            block = ""
+
+        help = help.replace(indent+marker+suffix, block, 1)
+        return help
+
+    def _get_canonical_cmd_name(self, token):
+        map = self._get_canonical_map()
+        return map.get(token, None)
+
+    def _get_canonical_map(self):
+        """Return a mapping of available command names and aliases to
+        their canonical command name.
+        """
+        cacheattr = "_token2canonical"
+        if not hasattr(self, cacheattr):
+            # Get the list of commands and their aliases, if any.
+            token2canonical = {}
+            cmd2funcname = {} # use a dict to strip duplicates
+            for attr in self.get_names():
+                if attr.startswith("do_"):    cmdname = attr[3:]
+                elif attr.startswith("_do_"): cmdname = attr[4:]
+                else:
+                    continue
+                cmd2funcname[cmdname] = attr
+                token2canonical[cmdname] = cmdname
+            for cmdname, funcname in cmd2funcname.items(): # add aliases
+                func = getattr(self, funcname)
+                aliases = getattr(func, "aliases", [])
+                for alias in aliases:
+                    if alias in cmd2funcname:
+                        import warnings
+                        warnings.warn("'%s' alias for '%s' command conflicts "
+                                      "with '%s' handler"
+                                      % (alias, cmdname, cmd2funcname[alias]))
+                        continue
+                    token2canonical[alias] = cmdname
+            setattr(self, cacheattr, token2canonical)
+        return getattr(self, cacheattr)
+
+    def _get_cmd_handler(self, cmdname):
+        handler = None
+        try:
+            handler = getattr(self, 'do_' + cmdname)
+        except AttributeError:
+            try:
+                # Private command handlers begin with "_do_".
+                handler = getattr(self, '_do_' + cmdname)
+            except AttributeError:
+                pass
+        return handler
+
+    def _do_EOF(self, argv):
+        # Default EOF handler
+        # Note: an actual EOF is redirected to this command.
+        #TODO: separate name for this. Currently it is available from
+        #      command-line. Is that okay?
+        self.stdout.write('\n')
+        self.stdout.flush()
+        self.stop = True
+
+    def emptyline(self):
+        # Different from cmd.Cmd: don't repeat the last command for an
+        # emptyline.
+        if self.cmdlooping:
+            pass
+        else:
+            return self.do_help(["help"])
+
+
+#---- optparse.py extension to fix (IMO) some deficiencies
+#
+# See the class _OptionParserEx docstring for details.
+#
+
+class StopOptionProcessing(Exception):
+    """Indicate that option *and argument* processing should stop
+    cleanly. This is not an error condition. It is similar in spirit to
+    StopIteration. This is raised by _OptionParserEx's default "help"
+    and "version" option actions and can be raised by custom option
+    callbacks too.
+
+    Hence the typical CmdlnOptionParser (a subclass of _OptionParserEx)
+    usage is:
+
+        parser = CmdlnOptionParser(mycmd)
+        parser.add_option("-f", "--force", dest="force")
+        ...
+        try:
+            opts, args = parser.parse_args()
+        except StopOptionProcessing:
+            # normal termination, "--help" was probably given
+            sys.exit(0)
+    """
+
+class _OptionParserEx(optparse.OptionParser):
+    """An optparse.OptionParser that uses exceptions instead of sys.exit.
+
+    This class is an extension of optparse.OptionParser that differs
+    as follows:
+    - Correct (IMO) the default OptionParser error handling to never
+      sys.exit(). Instead OptParseError exceptions are passed through.
+    - Add the StopOptionProcessing exception (a la StopIteration) to
+      indicate normal termination of option processing.
+      See StopOptionProcessing's docstring for details.
+
+    I'd also like to see the following in the core optparse.py, perhaps
+    as a RawOptionParser which would serve as a base class for the more
+    generally used OptionParser (that works as current):
+    - Remove the implicit addition of the -h|--help and --version
+      options. They can get in the way (e.g. if want '-?' and '-V' for
+      these as well) and it is not hard to do:
+        optparser.add_option("-h", "--help", action="help")
+        optparser.add_option("--version", action="version")
+      These are good practices, just not valid defaults if they can
+      get in the way.
+    """
+    def error(self, msg):
+        raise optparse.OptParseError(msg)
+
+    def exit(self, status=0, msg=None):
+        if status == 0:
+            raise StopOptionProcessing(msg)
+        else:
+            #TODO: don't lose status info here
+            raise optparse.OptParseError(msg)
+
+
+
+#---- optparse.py-based option processing support
+
+class CmdlnOptionParser(_OptionParserEx):
+    """An optparse.OptionParser class more appropriate for top-level
+    Cmdln options. For parsing of sub-command options, see
+    SubCmdOptionParser.
+
+    Changes:
+    - disable_interspersed_args() by default, because a Cmdln instance
+      has sub-commands which may themselves have options.
+    - Redirect print_help() to the Cmdln.do_help() which is better
+      equiped to handle the "help" action.
+    - error() will raise a CmdlnUserError: OptionParse.error() is meant
+      to be called for user errors. Raising a well-known error here can
+      make error handling clearer.
+    - Also see the changes in _OptionParserEx.
+    """
+    def __init__(self, cmdln, **kwargs):
+        self.cmdln = cmdln
+        kwargs["prog"] = self.cmdln.name
+        _OptionParserEx.__init__(self, **kwargs)
+        self.disable_interspersed_args()
+
+    def print_help(self, file=None):
+        self.cmdln.onecmd(["help"])
+
+    def error(self, msg):
+        raise CmdlnUserError(msg)
+
+
+class SubCmdOptionParser(_OptionParserEx):
+    def set_cmdln_info(self, cmdln, subcmd):
+        """Called by Cmdln to pass relevant info about itself needed
+        for print_help().
+        """
+        self.cmdln = cmdln
+        self.subcmd = subcmd
+
+    def print_help(self, file=None):
+        self.cmdln.onecmd(["help", self.subcmd])
+
+    def error(self, msg):
+        raise CmdlnUserError(msg)
+
+
+def option(*args, **kwargs):
+    """Decorator to add an option to the optparser argument of a Cmdln
+    subcommand.
+
+    Example:
+        class MyShell(cmdln.Cmdln):
+            @cmdln.option("-f", "--force", help="force removal")
+            def do_remove(self, subcmd, opts, *args):
+                #...
+    """
+    #XXX Is there a possible optimization for many options to not have a
+    #    large stack depth here?
+    def decorate(f):
+        if not hasattr(f, "optparser"):
+            f.optparser = SubCmdOptionParser()
+        f.optparser.add_option(*args, **kwargs)
+        return f
+    return decorate
+
+def hide(*args):
+    """For obsolete calls, hide them in help listings.
+
+    Example:
+        class MyShell(cmdln.Cmdln):
+            @cmdln.hide()
+            def do_shell(self, argv):
+                #...implement 'shell' command
+    """
+    def decorate(f):
+        f.hidden = 1
+        return f
+    return decorate
+
+
+class Cmdln(RawCmdln):
+    """An improved (on cmd.Cmd) framework for building multi-subcommand
+    scripts (think "svn" & "cvs") and simple shells (think "pdb" and
+    "gdb").
+
+    A simple example:
+
+        import cmdln
+
+        class MySVN(cmdln.Cmdln):
+            name = "svn"
+
+            @cmdln.aliases('stat', 'st')
+            @cmdln.option('-v', '--verbose', action='store_true'
+                          help='print verbose information')
+            def do_status(self, subcmd, opts, *paths):
+                print "handle 'svn status' command"
+
+            #...
+
+        if __name__ == "__main__":
+            shell = MySVN()
+            retval = shell.main()
+            sys.exit(retval)
+
+    'Cmdln' extends 'RawCmdln' by providing optparse option processing
+    integration.  See this class' _dispatch_cmd() docstring and
+    <http://trentm.com/projects/cmdln> for more information.
+    """
+    def _dispatch_cmd(self, handler, argv):
+        """Introspect sub-command handler signature to determine how to
+        dispatch the command. The raw handler provided by the base
+        'RawCmdln' class is still supported:
+
+            def do_foo(self, argv):
+                # 'argv' is the vector of command line args, argv[0] is
+                # the command name itself (i.e. "foo" or an alias)
+                pass
+
+        In addition, if the handler has more than 2 arguments option
+        processing is automatically done (using optparse):
+
+            @cmdln.option('-v', '--verbose', action='store_true')
+            def do_bar(self, subcmd, opts, *args):
+                # subcmd = <"bar" or an alias>
+                # opts = <an optparse.Values instance>
+                if opts.verbose:
+                    print "lots of debugging output..."
+                # args = <tuple of arguments>
+                for arg in args:
+                    bar(arg)
+
+        TODO: explain that "*args" can be other signatures as well.
+
+        The `cmdln.option` decorator corresponds to an `add_option()`
+        method call on an `optparse.OptionParser` instance.
+
+        You can declare a specific number of arguments:
+
+            @cmdln.option('-v', '--verbose', action='store_true')
+            def do_bar2(self, subcmd, opts, bar_one, bar_two):
+                #...
+
+        and an appropriate error message will be raised/printed if the
+        command is called with a different number of args.
+        """
+        co_argcount = handler.im_func.func_code.co_argcount
+        if co_argcount == 2:   # handler ::= do_foo(self, argv)
+            return handler(argv)
+        elif co_argcount >= 3: # handler ::= do_foo(self, subcmd, opts, ...)
+            try:
+                optparser = handler.optparser
+            except AttributeError:
+                optparser = handler.im_func.optparser = SubCmdOptionParser()
+            assert isinstance(optparser, SubCmdOptionParser)
+            optparser.set_cmdln_info(self, argv[0])
+            try:
+                opts, args = optparser.parse_args(argv[1:])
+            except StopOptionProcessing:
+                #TODO: this doesn't really fly for a replacement of
+                #      optparse.py behaviour, does it?
+                return 0 # Normal command termination
+
+            try:
+                return handler(argv[0], opts, *args)
+            except TypeError, ex:
+                # Some TypeError's are user errors:
+                #   do_foo() takes at least 4 arguments (3 given)
+                #   do_foo() takes at most 5 arguments (6 given)
+                #   do_foo() takes exactly 5 arguments (6 given)
+                # Raise CmdlnUserError for these with a suitably
+                # massaged error message.
+                import sys
+                tb = sys.exc_info()[2] # the traceback object
+                if tb.tb_next is not None:
+                    # If the traceback is more than one level deep, then the
+                    # TypeError do *not* happen on the "handler(...)" call
+                    # above. In that we don't want to handle it specially
+                    # here: it would falsely mask deeper code errors.
+                    raise
+                msg = ex.args[0]
+                match = _INCORRECT_NUM_ARGS_RE.search(msg)
+                if match:
+                    msg = list(match.groups())
+                    msg[1] = int(msg[1]) - 3
+                    if msg[1] == 1:
+                        msg[2] = msg[2].replace("arguments", "argument")
+                    msg[3] = int(msg[3]) - 3
+                    msg = ''.join(map(str, msg))
+                    raise CmdlnUserError(msg)
+                else:
+                    raise
+        else:
+            raise CmdlnError("incorrect argcount for %s(): takes %d, must "
+                             "take 2 for 'argv' signature or 3+ for 'opts' "
+                             "signature" % (handler.__name__, co_argcount))
+
+
+
+#---- internal support functions
+
+def _format_linedata(linedata, indent, indent_width):
+    """Format specific linedata into a pleasant layout.
+
+        "linedata" is a list of 2-tuples of the form:
+            (<item-display-string>, <item-docstring>)
+        "indent" is a string to use for one level of indentation
+        "indent_width" is a number of columns by which the
+            formatted data will be indented when printed.
+
+    The <item-display-string> column is held to 15 columns.
+    """
+    lines = []
+    WIDTH = 78 - indent_width
+    SPACING = 3
+    MAX_NAME_WIDTH = 15
+
+    NAME_WIDTH = min(max([len(s) for s,d in linedata]), MAX_NAME_WIDTH)
+    DOC_WIDTH = WIDTH - NAME_WIDTH - SPACING
+    for namestr, doc in linedata:
+        line = indent + namestr
+        if len(namestr) <= NAME_WIDTH:
+            line += ' ' * (NAME_WIDTH + SPACING - len(namestr))
+        else:
+            lines.append(line)
+            line = indent + ' ' * (NAME_WIDTH + SPACING)
+        line += _summarize_doc(doc, DOC_WIDTH)
+        lines.append(line.rstrip())
+    return lines
+
+def _summarize_doc(doc, length=60):
+    r"""Parse out a short one line summary from the given doclines.
+
+        "doc" is the doc string to summarize.
+        "length" is the max length for the summary
+
+    >>> _summarize_doc("this function does this")
+    'this function does this'
+    >>> _summarize_doc("this function does this", 10)
+    'this fu...'
+    >>> _summarize_doc("this function does this\nand that")
+    'this function does this and that'
+    >>> _summarize_doc("this function does this\n\nand that")
+    'this function does this'
+    """
+    import re
+    if doc is None:
+        return ""
+    assert length > 3, "length <= 3 is absurdly short for a doc summary"
+    doclines = doc.strip().splitlines(0)
+    if not doclines:
+        return ""
+
+    summlines = []
+    for i, line in enumerate(doclines):
+        stripped = line.strip()
+        if not stripped:
+            break
+        summlines.append(stripped)
+        if len(''.join(summlines)) >= length:
+            break
+
+    summary = ' '.join(summlines)
+    if len(summary) > length:
+        summary = summary[:length-3] + "..."
+    return summary
+
+
+def line2argv(line):
+    r"""Parse the given line into an argument vector.
+
+        "line" is the line of input to parse.
+
+    This may get niggly when dealing with quoting and escaping. The
+    current state of this parsing may not be completely thorough/correct
+    in this respect.
+
+    >>> from cmdln import line2argv
+    >>> line2argv("foo")
+    ['foo']
+    >>> line2argv("foo bar")
+    ['foo', 'bar']
+    >>> line2argv("foo bar ")
+    ['foo', 'bar']
+    >>> line2argv(" foo bar")
+    ['foo', 'bar']
+
+    Quote handling:
+
+    >>> line2argv("'foo bar'")
+    ['foo bar']
+    >>> line2argv('"foo bar"')
+    ['foo bar']
+    >>> line2argv(r'"foo\"bar"')
+    ['foo"bar']
+    >>> line2argv("'foo bar' spam")
+    ['foo bar', 'spam']
+    >>> line2argv("'foo 'bar spam")
+    ['foo bar', 'spam']
+    >>> line2argv("'foo")
+    Traceback (most recent call last):
+        ...
+    ValueError: command line is not terminated: unfinished single-quoted segment
+    >>> line2argv('"foo')
+    Traceback (most recent call last):
+        ...
+    ValueError: command line is not terminated: unfinished double-quoted segment
+    >>> line2argv('some\tsimple\ttests')
+    ['some', 'simple', 'tests']
+    >>> line2argv('a "more complex" test')
+    ['a', 'more complex', 'test']
+    >>> line2argv('a more="complex test of " quotes')
+    ['a', 'more=complex test of ', 'quotes']
+    >>> line2argv('a more" complex test of " quotes')
+    ['a', 'more complex test of ', 'quotes']
+    >>> line2argv('an "embedded \\"quote\\""')
+    ['an', 'embedded "quote"']
+    """
+    import string
+    line = line.strip()
+    argv = []
+    state = "default"
+    arg = None  # the current argument being parsed
+    i = -1
+    while 1:
+        i += 1
+        if i >= len(line): break
+        ch = line[i]
+
+        if ch == "\\": # escaped char always added to arg, regardless of state
+            if arg is None: arg = ""
+            i += 1
+            arg += line[i]
+            continue
+
+        if state == "single-quoted":
+            if ch == "'":
+                state = "default"
+            else:
+                arg += ch
+        elif state == "double-quoted":
+            if ch == '"':
+                state = "default"
+            else:
+                arg += ch
+        elif state == "default":
+            if ch == '"':
+                if arg is None: arg = ""
+                state = "double-quoted"
+            elif ch == "'":
+                if arg is None: arg = ""
+                state = "single-quoted"
+            elif ch in string.whitespace:
+                if arg is not None:
+                    argv.append(arg)
+                arg = None
+            else:
+                if arg is None: arg = ""
+                arg += ch
+    if arg is not None:
+        argv.append(arg)
+    if state != "default":
+        raise ValueError("command line is not terminated: unfinished %s "
+                         "segment" % state)
+    return argv
+
+
+def argv2line(argv):
+    r"""Put together the given argument vector into a command line.
+
+        "argv" is the argument vector to process.
+
+    >>> from cmdln import argv2line
+    >>> argv2line(['foo'])
+    'foo'
+    >>> argv2line(['foo', 'bar'])
+    'foo bar'
+    >>> argv2line(['foo', 'bar baz'])
+    'foo "bar baz"'
+    >>> argv2line(['foo"bar'])
+    'foo"bar'
+    >>> print argv2line(['foo" bar'])
+    'foo" bar'
+    >>> print argv2line(["foo' bar"])
+    "foo' bar"
+    >>> argv2line(["foo'bar"])
+    "foo'bar"
+    """
+    escapedArgs = []
+    for arg in argv:
+        if ' ' in arg and '"' not in arg:
+            arg = '"'+arg+'"'
+        elif ' ' in arg and "'" not in arg:
+            arg = "'"+arg+"'"
+        elif ' ' in arg:
+            arg = arg.replace('"', r'\"')
+            arg = '"'+arg+'"'
+        escapedArgs.append(arg)
+    return ' '.join(escapedArgs)
+
+
+# Recipe: dedent (0.1) in /Users/trentm/tm/recipes/cookbook
+def _dedentlines(lines, tabsize=8, skip_first_line=False):
+    """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
+
+        "lines" is a list of lines to dedent.
+        "tabsize" is the tab width to use for indent width calculations.
+        "skip_first_line" is a boolean indicating if the first line should
+            be skipped for calculating the indent width and for dedenting.
+            This is sometimes useful for docstrings and similar.
+
+    Same as dedent() except operates on a sequence of lines. Note: the
+    lines list is modified **in-place**.
+    """
+    DEBUG = False
+    if DEBUG:
+        print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
+              % (tabsize, skip_first_line)
+    indents = []
+    margin = None
+    for i, line in enumerate(lines):
+        if i == 0 and skip_first_line: continue
+        indent = 0
+        for ch in line:
+            if ch == ' ':
+                indent += 1
+            elif ch == '\t':
+                indent += tabsize - (indent % tabsize)
+            elif ch in '\r\n':
+                continue # skip all-whitespace lines
+            else:
+                break
+        else:
+            continue # skip all-whitespace lines
+        if DEBUG: print "dedent: indent=%d: %r" % (indent, line)
+        if margin is None:
+            margin = indent
+        else:
+            margin = min(margin, indent)
+    if DEBUG: print "dedent: margin=%r" % margin
+
+    if margin is not None and margin > 0:
+        for i, line in enumerate(lines):
+            if i == 0 and skip_first_line: continue
+            removed = 0
+            for j, ch in enumerate(line):
+                if ch == ' ':
+                    removed += 1
+                elif ch == '\t':
+                    removed += tabsize - (removed % tabsize)
+                elif ch in '\r\n':
+                    if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line
+                    lines[i] = lines[i][j:]
+                    break
+                else:
+                    raise ValueError("unexpected non-whitespace char %r in "
+                                     "line %r while removing %d-space margin"
+                                     % (ch, line, margin))
+                if DEBUG:
+                    print "dedent: %r: %r -> removed %d/%d"\
+                          % (line, ch, removed, margin)
+                if removed == margin:
+                    lines[i] = lines[i][j+1:]
+                    break
+                elif removed > margin:
+                    lines[i] = ' '*(removed-margin) + lines[i][j+1:]
+                    break
+    return lines
+
+def _dedent(text, tabsize=8, skip_first_line=False):
+    """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
+
+        "text" is the text to dedent.
+        "tabsize" is the tab width to use for indent width calculations.
+        "skip_first_line" is a boolean indicating if the first line should
+            be skipped for calculating the indent width and for dedenting.
+            This is sometimes useful for docstrings and similar.
+
+    textwrap.dedent(s), but don't expand tabs to spaces
+    """
+    lines = text.splitlines(1)
+    _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
+    return ''.join(lines)
+
+
+def _get_indent(marker, s, tab_width=8):
+    """_get_indent(marker, s, tab_width=8) ->
+        (<indentation-of-'marker'>, <indentation-width>)"""
+    # Figure out how much the marker is indented.
+    INDENT_CHARS = tuple(' \t')
+    start = s.index(marker)
+    i = start
+    while i > 0:
+        if s[i-1] not in INDENT_CHARS:
+            break
+        i -= 1
+    indent = s[i:start]
+    indent_width = 0
+    for ch in indent:
+        if ch == ' ':
+            indent_width += 1
+        elif ch == '\t':
+            indent_width += tab_width - (indent_width % tab_width)
+    return indent, indent_width
+
+def _get_trailing_whitespace(marker, s):
+    """Return the whitespace content trailing the given 'marker' in string 's',
+    up to and including a newline.
+    """
+    suffix = ''
+    start = s.index(marker) + len(marker)
+    i = start
+    while i < len(s):
+        if s[i] in ' \t':
+            suffix += s[i]
+        elif s[i] in '\r\n':
+            suffix += s[i]
+            if s[i] == '\r' and i+1 < len(s) and s[i+1] == '\n':
+                suffix += s[i+1]
+            break
+        else:
+            break
+        i += 1
+    return suffix
+
diff --git a/micng/utils/error.py b/micng/utils/error.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/utils/errors.py b/micng/utils/errors.py
new file mode 100644 (file)
index 0000000..ba08563
--- /dev/null
@@ -0,0 +1,31 @@
+#
+# errors.py : exception definitions
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+class CreatorError(Exception):
+    """An exception base class for all imgcreate errors."""
+    def __init__(self, msg):
+        Exception.__init__(self, msg)
+
+class KickstartError(CreatorError):
+    pass
+class MountError(CreatorError):
+    pass
+class SnapshotError(CreatorError):
+    pass
+class SquashfsError(CreatorError):
+    pass
diff --git a/micng/utils/fs_related.py b/micng/utils/fs_related.py
new file mode 100644 (file)
index 0000000..ff2a07a
--- /dev/null
@@ -0,0 +1,945 @@
+#
+# fs.py : Filesystem related utilities and classes
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+import os
+import sys
+import errno
+import stat
+import subprocess
+import random
+import string
+import logging
+import time
+import fcntl
+import struct
+import termios
+
+from errors import *
+from urlgrabber.grabber import URLGrabber
+from urlgrabber.grabber import URLGrabError
+
+def terminal_width(fd=1):
+    """ Get the real terminal width """
+    try:
+        buf = 'abcdefgh'
+        buf = fcntl.ioctl(fd, termios.TIOCGWINSZ, buf)
+        return struct.unpack('hhhh', buf)[1]
+    except: # IOError
+        return 80
+
+def truncate_url(url, width):
+    if len(url) > width:
+        return os.path.basename(url)[0:width]
+    return url
+
+class TextProgress(object):
+    def start(self, filename, url, *args, **kwargs):
+        self.url = url
+        self.termwidth = terminal_width()
+        sys.stdout.write("Retrieving %s " % truncate_url(self.url, self.termwidth - 17))
+        sys.stdout.flush()
+        self.indicators = ["-", "\\", "|", "/"]
+        self.counter = 0
+    def update(self, *args):
+        if sys.stdout.isatty():
+            sys.stdout.write("\rRetrieving %s %s" % (truncate_url(self.url, self.termwidth - 17), self.indicators[self.counter%4]))
+            sys.stdout.flush()
+            self.counter += 1
+        else:
+            pass
+    def end(self, *args):
+        if sys.stdout.isatty():
+            sys.stdout.write("\rRetrieving %s ...OK\n" % (self.url,))
+        else:
+            sys.stdout.write("...OK\n")
+        sys.stdout.flush()
+
+def find_binary_path(binary):
+    if os.environ.has_key("PATH"):
+        paths = os.environ["PATH"].split(":")
+    else:
+        paths = []
+        if os.environ.has_key("HOME"):
+            paths += [os.environ["HOME"] + "/bin"]
+        paths += ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]
+
+    for path in paths:
+        bin_path = "%s/%s" % (path, binary)
+        if os.path.exists(bin_path):
+            return bin_path
+    raise CreatorError("Command '%s' is not available." % binary)
+
+def makedirs(dirname):
+    """A version of os.makedirs() that doesn't throw an
+    exception if the leaf directory already exists.
+    """
+    try:
+        os.makedirs(dirname)
+    except OSError, (err, msg):
+        if err != errno.EEXIST:
+            raise
+
+def mksquashfs(in_img, out_img):
+    fullpathmksquashfs = find_binary_path("mksquashfs")
+    args = [fullpathmksquashfs, in_img, out_img]
+
+    if not sys.stdout.isatty():
+        args.append("-no-progress")
+
+    ret = subprocess.call(args, stdout=sys.stdout, stderr=sys.stderr)
+    if ret != 0:
+        raise SquashfsError("'%s' exited with error (%d)" %
+                            (string.join(args, " "), ret))
+
+def resize2fs(fs, size):
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    try:
+        resize2fs = find_binary_path("resize2fs")
+        return subprocess.call([resize2fs, fs, "%sK" % (size / 1024,)],
+                               stdout = dev_null, stderr = dev_null)
+    finally:
+        os.close(dev_null)
+
+def my_fuser(file):
+    ret = False
+    fuser = find_binary_path("fuser")
+    if not os.path.exists(file):
+        return ret
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    rc = subprocess.call([fuser, "-s", file], stderr=dev_null) 
+    if rc == 0:
+        fuser_proc = subprocess.Popen([fuser, file], stdout=subprocess.PIPE, stderr=dev_null)
+        pids = fuser_proc.communicate()[0].strip().split()
+        for pid in pids:
+            fd = open("/proc/%s/cmdline" % pid, "r")
+            cmdline = fd.read()
+            fd.close()
+            if cmdline[:-1] == "/bin/bash":
+                ret = True
+                break
+    os.close(dev_null)
+    return ret
+
+class BindChrootMount:
+    """Represents a bind mount of a directory into a chroot."""
+    def __init__(self, src, chroot, dest = None, option = None):
+        self.src = src
+        self.root = os.path.abspath(os.path.expanduser(chroot))
+        self.option = option
+
+        if not dest:
+            dest = src
+        self.dest = self.root + "/" + dest
+
+        self.mounted = False
+        self.mountcmd = find_binary_path("mount")
+        self.umountcmd = find_binary_path("umount")
+
+    def ismounted(self):
+        ret = False
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        catcmd = find_binary_path("cat")
+        args = [ catcmd, "/proc/mounts" ]
+        proc_mounts = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null)
+        outputs = proc_mounts.communicate()[0].strip().split("\n")
+        for line in outputs:
+            if line.split()[1] == os.path.abspath(self.dest):
+                ret = True
+                break
+        os.close(dev_null)
+        return ret
+
+    def has_chroot_instance(self):
+        lock = os.path.join(self.root, ".chroot.lock")
+        return my_fuser(lock)
+
+    def mount(self):
+        if self.mounted or self.ismounted():
+            return
+
+        makedirs(self.dest)
+        rc = subprocess.call([self.mountcmd, "--bind", self.src, self.dest])
+        if rc != 0:
+            raise MountError("Bind-mounting '%s' to '%s' failed" %
+                             (self.src, self.dest))
+        if self.option:
+            rc = subprocess.call([self.mountcmd, "-o", "remount,%s" % self.option, self.dest])
+            if rc != 0:
+                raise MountError("Bind-remounting '%s' failed" % self.dest)
+        self.mounted = True
+
+    def unmount(self):
+        if self.has_chroot_instance():
+            return
+
+        if self.ismounted():
+            subprocess.call([self.umountcmd, "-l", self.dest])
+        self.mounted = False
+
+class LoopbackMount:
+    """LoopbackMount  compatibility layer for old API"""
+    def __init__(self, lofile, mountdir, fstype = None):
+        self.diskmount = DiskMount(LoopbackDisk(lofile,size = 0),mountdir,fstype,rmmountdir = True)
+        self.losetup = False
+        self.losetupcmd = find_binary_path("losetup")
+
+    def cleanup(self):
+        self.diskmount.cleanup()
+
+    def unmount(self):
+        self.diskmount.unmount()
+
+    def lounsetup(self):
+        if self.losetup:
+            rc = subprocess.call([self.losetupcmd, "-d", self.loopdev])
+            self.losetup = False
+            self.loopdev = None
+
+    def loopsetup(self):
+        if self.losetup:
+            return
+
+        losetupProc = subprocess.Popen([self.losetupcmd, "-f"],
+                                       stdout=subprocess.PIPE)
+        losetupOutput = losetupProc.communicate()[0]
+
+        if losetupProc.returncode:
+            raise MountError("Failed to allocate loop device for '%s'" %
+                             self.lofile)
+
+        self.loopdev = losetupOutput.split()[0]
+
+        rc = subprocess.call([self.losetupcmd, self.loopdev, self.lofile])
+        if rc != 0:
+            raise MountError("Failed to allocate loop device for '%s'" %
+                             self.lofile)
+
+        self.losetup = True
+
+    def mount(self):
+        self.diskmount.mount()
+
+class SparseLoopbackMount(LoopbackMount):
+    """SparseLoopbackMount  compatibility layer for old API"""
+    def __init__(self, lofile, mountdir, size, fstype = None):
+        self.diskmount = DiskMount(SparseLoopbackDisk(lofile,size),mountdir,fstype,rmmountdir = True)
+
+    def expand(self, create = False, size = None):
+        self.diskmount.disk.expand(create, size)
+
+    def truncate(self, size = None):
+        self.diskmount.disk.truncate(size)
+
+    def create(self):
+        self.diskmount.disk.create()
+
+class SparseExtLoopbackMount(SparseLoopbackMount):
+    """SparseExtLoopbackMount  compatibility layer for old API"""
+    def __init__(self, lofile, mountdir, size, fstype, blocksize, fslabel):
+        self.diskmount = ExtDiskMount(SparseLoopbackDisk(lofile,size), mountdir, fstype, blocksize, fslabel, rmmountdir = True)
+
+
+    def __format_filesystem(self):
+        self.diskmount.__format_filesystem()
+
+    def create(self):
+        self.diskmount.disk.create()
+
+    def resize(self, size = None):
+        return self.diskmount.__resize_filesystem(size)
+
+    def mount(self):
+        self.diskmount.mount()
+
+    def __fsck(self):
+        self.extdiskmount.__fsck()
+
+    def __get_size_from_filesystem(self):
+        return self.diskmount.__get_size_from_filesystem()
+
+    def __resize_to_minimal(self):
+        return self.diskmount.__resize_to_minimal()
+
+    def resparse(self, size = None):
+        return self.diskmount.resparse(size)
+
+class Disk:
+    """Generic base object for a disk
+
+    The 'create' method must make the disk visible as a block device - eg
+    by calling losetup. For RawDisk, this is obviously a no-op. The 'cleanup'
+    method must undo the 'create' operation.
+    """
+    def __init__(self, size, device = None):
+        self._device = device
+        self._size = size
+
+    def create(self):
+        pass
+
+    def cleanup(self):
+        pass
+
+    def get_device(self):
+        return self._device
+    def set_device(self, path):
+        self._device = path
+    device = property(get_device, set_device)
+
+    def get_size(self):
+        return self._size
+    size = property(get_size)
+
+
+class RawDisk(Disk):
+    """A Disk backed by a block device.
+    Note that create() is a no-op.
+    """
+    def __init__(self, size, device):
+        Disk.__init__(self, size, device)
+
+    def fixed(self):
+        return True
+
+    def exists(self):
+        return True
+
+class LoopbackDisk(Disk):
+    """A Disk backed by a file via the loop module."""
+    def __init__(self, lofile, size):
+        Disk.__init__(self, size)
+        self.lofile = lofile
+        self.losetupcmd = find_binary_path("losetup")
+
+    def fixed(self):
+        return False
+
+    def exists(self):
+        return os.path.exists(self.lofile)
+
+    def create(self):
+        if self.device is not None:
+            return
+
+        losetupProc = subprocess.Popen([self.losetupcmd, "-f"],
+                                       stdout=subprocess.PIPE)
+        losetupOutput = losetupProc.communicate()[0]
+
+        if losetupProc.returncode:
+            raise MountError("Failed to allocate loop device for '%s'" %
+                             self.lofile)
+
+        device = losetupOutput.split()[0]
+
+        logging.debug("Losetup add %s mapping to %s"  % (device, self.lofile))
+        rc = subprocess.call([self.losetupcmd, device, self.lofile])
+        if rc != 0:
+            raise MountError("Failed to allocate loop device for '%s'" %
+                             self.lofile)
+        self.device = device
+
+    def cleanup(self):
+        if self.device is None:
+            return
+        logging.debug("Losetup remove %s" % self.device)
+        rc = subprocess.call([self.losetupcmd, "-d", self.device])
+        self.device = None
+
+
+
+class SparseLoopbackDisk(LoopbackDisk):
+    """A Disk backed by a sparse file via the loop module."""
+    def __init__(self, lofile, size):
+        LoopbackDisk.__init__(self, lofile, size)
+
+    def expand(self, create = False, size = None):
+        flags = os.O_WRONLY
+        if create:
+            flags |= os.O_CREAT
+            if not os.path.exists(self.lofile):
+                makedirs(os.path.dirname(self.lofile))
+
+        if size is None:
+            size = self.size
+
+        logging.debug("Extending sparse file %s to %d" % (self.lofile, size))
+        if create:
+            fd = os.open(self.lofile, flags, 0644)
+        else:
+            fd = os.open(self.lofile, flags)
+
+        os.lseek(fd, size, os.SEEK_SET)
+        os.write(fd, '\x00')
+        os.close(fd)
+
+    def truncate(self, size = None):
+        if size is None:
+            size = self.size
+
+        logging.debug("Truncating sparse file %s to %d" % (self.lofile, size))
+        fd = os.open(self.lofile, os.O_WRONLY)
+        os.ftruncate(fd, size)
+        os.close(fd)
+
+    def create(self):
+        self.expand(create = True)
+        LoopbackDisk.create(self)
+
+class Mount:
+    """A generic base class to deal with mounting things."""
+    def __init__(self, mountdir):
+        self.mountdir = mountdir
+
+    def cleanup(self):
+        self.unmount()
+
+    def mount(self, options = None):
+        pass
+
+    def unmount(self):
+        pass
+
+class DiskMount(Mount):
+    """A Mount object that handles mounting of a Disk."""
+    def __init__(self, disk, mountdir, fstype = None, rmmountdir = True):
+        Mount.__init__(self, mountdir)
+
+        self.disk = disk
+        self.fstype = fstype
+        self.rmmountdir = rmmountdir
+
+        self.mounted = False
+        self.rmdir   = False
+        if fstype:
+            self.mkfscmd = find_binary_path("mkfs." + self.fstype)
+        else:
+            self.mkfscmd = None
+        self.mountcmd = find_binary_path("mount")
+        self.umountcmd = find_binary_path("umount")
+
+    def cleanup(self):
+        Mount.cleanup(self)
+        self.disk.cleanup()
+
+    def unmount(self):
+        if self.mounted:
+            logging.debug("Unmounting directory %s" % self.mountdir)
+            synccmd = find_binary_path("sync")
+            subprocess.call([synccmd]) # sync the data on this mount point
+            rc = subprocess.call([self.umountcmd, "-l", self.mountdir])
+            if rc == 0:
+                self.mounted = False
+            else:
+                raise MountError("Failed to umount %s" % self.mountdir)
+        if self.rmdir and not self.mounted:
+            try:
+                os.rmdir(self.mountdir)
+            except OSError, e:
+                pass
+            self.rmdir = False
+
+
+    def __create(self):
+        self.disk.create()
+
+
+    def mount(self, options = None):
+        if self.mounted:
+            return
+
+        if not os.path.isdir(self.mountdir):
+            logging.debug("Creating mount point %s" % self.mountdir)
+            os.makedirs(self.mountdir)
+            self.rmdir = self.rmmountdir
+
+        self.__create()
+
+        logging.debug("Mounting %s at %s" % (self.disk.device, self.mountdir))
+        if options:
+            args = [ self.mountcmd, "-o", options, self.disk.device, self.mountdir ]
+        else:
+            args = [ self.mountcmd, self.disk.device, self.mountdir ]
+        if self.fstype:
+            args.extend(["-t", self.fstype])
+
+        rc = subprocess.call(args)
+        if rc != 0:
+            raise MountError("Failed to mount '%s' to '%s' with command '%s'. Retval: %s" %
+                             (self.disk.device, self.mountdir, " ".join(args), rc))
+
+        self.mounted = True
+
+class ExtDiskMount(DiskMount):
+    """A DiskMount object that is able to format/resize ext[23] filesystems."""
+    def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+        DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+        self.blocksize = blocksize
+        self.fslabel = fslabel.replace("/", "")
+        self.uuid  = None
+        self.skipformat = skipformat
+        self.fsopts = fsopts
+        self.dumpe2fs = find_binary_path("dumpe2fs")
+        self.tune2fs = find_binary_path("tune2fs")
+
+    def __parse_field(self, output, field):
+        for line in output.split("\n"):
+            if line.startswith(field + ":"):
+                return line[len(field) + 1:].strip()
+
+        raise KeyError("Failed to find field '%s' in output" % field)
+
+    def __format_filesystem(self):
+        if self.skipformat:
+            logging.debug("Skip filesystem format.")
+            return
+        logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+        rc = subprocess.call([self.mkfscmd,
+                              "-F", "-L", self.fslabel,
+                              "-m", "1", "-b", str(self.blocksize),
+                              self.disk.device], stdout=sys.stdout,
+                              stderr=sys.stderr)
+        #                      str(self.disk.size / self.blocksize)])
+        if rc != 0:
+            raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
+
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        try:
+            out = subprocess.Popen([self.dumpe2fs, '-h', self.disk.device],
+                                   stdout = subprocess.PIPE,
+                                   stderr = dev_null).communicate()[0]
+        finally:
+            os.close(dev_null)
+
+        self.uuid = self.__parse_field(out, "Filesystem UUID")
+        logging.debug("Tuning filesystem on %s" % self.disk.device)
+        subprocess.call([self.tune2fs, "-c0", "-i0", "-Odir_index",
+                         "-ouser_xattr,acl", self.disk.device],
+                         stdout=sys.stdout, stderr=sys.stderr)
+
+    def __resize_filesystem(self, size = None):
+        current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+        if size is None:
+            size = self.disk.size
+
+        if size == current_size:
+            return
+
+        if size > current_size:
+            self.disk.expand(size)
+
+        self.__fsck()
+
+        resize2fs(self.disk.lofile, size)
+        return size
+
+    def __create(self):
+        resize = False
+        if not self.disk.fixed() and self.disk.exists():
+            resize = True
+
+        self.disk.create()
+
+        if resize:
+            self.__resize_filesystem()
+        else:
+            self.__format_filesystem()
+
+    def mount(self, options = None):
+        self.__create()
+        DiskMount.mount(self, options)
+
+    def __fsck(self):
+        logging.debug("Checking filesystem %s" % self.disk.lofile)
+        subprocess.call(["/sbin/e2fsck", "-f", "-y", self.disk.lofile], stdout=sys.stdout, stderr=sys.stderr)
+
+    def __get_size_from_filesystem(self):
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        try:
+            out = subprocess.Popen([self.dumpe2fs, '-h', self.disk.lofile],
+                                   stdout = subprocess.PIPE,
+                                   stderr = dev_null).communicate()[0]
+        finally:
+            os.close(dev_null)
+
+        return int(self.__parse_field(out, "Block count")) * self.blocksize
+
+    def __resize_to_minimal(self):
+        self.__fsck()
+
+        #
+        # Use a binary search to find the minimal size
+        # we can resize the image to
+        #
+        bot = 0
+        top = self.__get_size_from_filesystem()
+        while top != (bot + 1):
+            t = bot + ((top - bot) / 2)
+
+            if not resize2fs(self.disk.lofile, t):
+                top = t
+            else:
+                bot = t
+        return top
+
+    def resparse(self, size = None):
+        self.cleanup()
+        minsize = self.__resize_to_minimal()
+        self.disk.truncate(minsize)
+        self.__resize_filesystem(size)
+        return minsize
+
+class VfatDiskMount(DiskMount):
+    """A DiskMount object that is able to format vfat/msdos filesystems."""
+    def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+        DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+        self.blocksize = blocksize
+        self.fslabel = fslabel.replace("/", "")
+        self.uuid = "%08X" % int(time.time())
+        self.skipformat = skipformat
+        self.fsopts = fsopts
+        self.fsckcmd = find_binary_path("fsck." + self.fstype)
+
+    def __format_filesystem(self):
+        if self.skipformat:
+            logging.debug("Skip filesystem format.")
+            return
+        logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+        blah = [self.mkfscmd, "-n", self.fslabel, "-i", self.uuid, self.disk.device]
+        rc = subprocess.call(blah)
+        if rc != 0:
+            raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
+        logging.debug("Tuning filesystem on %s" % self.disk.device)
+
+    def __resize_filesystem(self, size = None):
+        current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+        if size is None:
+            size = self.disk.size
+
+        if size == current_size:
+            return
+
+        if size > current_size:
+            self.disk.expand(size)
+
+        self.__fsck()
+
+        #resize2fs(self.disk.lofile, size)
+        return size
+
+    def __create(self):
+        resize = False
+        if not self.disk.fixed() and self.disk.exists():
+            resize = True
+
+        self.disk.create()
+
+        if resize:
+            self.__resize_filesystem()
+        else:
+            self.__format_filesystem()
+
+    def mount(self, options = None):
+        self.__create()
+        DiskMount.mount(self, options)
+
+    def __fsck(self):
+        logging.debug("Checking filesystem %s" % self.disk.lofile)
+        subprocess.call([self.fsckcmd, "-y", self.disk.lofile])
+
+    def __get_size_from_filesystem(self):
+        return self.disk.size
+
+    def __resize_to_minimal(self):
+        self.__fsck()
+
+        #
+        # Use a binary search to find the minimal size
+        # we can resize the image to
+        #
+        bot = 0
+        top = self.__get_size_from_filesystem()
+        return top
+
+    def resparse(self, size = None):
+        self.cleanup()
+        minsize = self.__resize_to_minimal()
+        self.disk.truncate(minsize)
+        self.__resize_filesystem(size)
+        return minsize
+
+class BtrfsDiskMount(DiskMount):
+    """A DiskMount object that is able to format/resize btrfs filesystems."""
+    def __init__(self, disk, mountdir, fstype, blocksize, fslabel, rmmountdir=True, skipformat = False, fsopts = None):
+        self.__check_btrfs()
+        DiskMount.__init__(self, disk, mountdir, fstype, rmmountdir)
+        self.blocksize = blocksize
+        self.fslabel = fslabel.replace("/", "")
+        self.uuid  = None
+        self.skipformat = skipformat
+        self.fsopts = fsopts
+        self.blkidcmd = find_binary_path("blkid")
+        self.btrfsckcmd = find_binary_path("btrfsck")
+
+    def __check_btrfs(self):
+        found = False
+        """ Need to load btrfs module to mount it """
+        load_module("btrfs")
+        for line in open("/proc/filesystems").xreadlines():
+            if line.find("btrfs") > -1:
+                found = True
+                break
+        if not found:
+            raise MountError("Your system can't mount btrfs filesystem, please make sure your kernel has btrfs support and the module btrfs.ko has been loaded.")
+
+        # disable selinux, selinux will block write
+        if os.path.exists("/usr/sbin/setenforce"):
+            subprocess.call(["/usr/sbin/setenforce", "0"])
+
+    def __parse_field(self, output, field):
+        for line in output.split(" "):
+            if line.startswith(field + "="):
+                return line[len(field) + 1:].strip().replace("\"", "")
+
+        raise KeyError("Failed to find field '%s' in output" % field)
+
+    def __format_filesystem(self):
+        if self.skipformat:
+            logging.debug("Skip filesystem format.")
+            return
+        logging.debug("Formating %s filesystem on %s" % (self.fstype, self.disk.device))
+        rc = subprocess.call([self.mkfscmd, "-L", self.fslabel, self.disk.device])
+        if rc != 0:
+            raise MountError("Error creating %s filesystem on disk %s" % (self.fstype,self.disk.device))
+
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        try:
+            out = subprocess.Popen([self.blkidcmd, self.disk.device],
+                                   stdout = subprocess.PIPE,
+                                   stderr = dev_null).communicate()[0]
+        finally:
+            os.close(dev_null)
+
+        self.uuid = self.__parse_field(out, "UUID")
+
+    def __resize_filesystem(self, size = None):
+        current_size = os.stat(self.disk.lofile)[stat.ST_SIZE]
+
+        if size is None:
+            size = self.disk.size
+
+        if size == current_size:
+            return
+
+        if size > current_size:
+            self.disk.expand(size)
+
+        self.__fsck()
+        return size
+
+    def __create(self):
+        resize = False
+        if not self.disk.fixed() and self.disk.exists():
+            resize = True
+
+        self.disk.create()
+
+        if resize:
+            self.__resize_filesystem()
+        else:
+            self.__format_filesystem()
+
+    def mount(self, options = None):
+        self.__create()
+        DiskMount.mount(self, options)
+
+    def __fsck(self):
+        logging.debug("Checking filesystem %s" % self.disk.lofile)
+        subprocess.call([self.btrfsckcmd, self.disk.lofile])
+
+    def __get_size_from_filesystem(self):
+        return self.disk.size
+
+    def __resize_to_minimal(self):
+        self.__fsck()
+
+        return self.__get_size_from_filesystem()
+
+    def resparse(self, size = None):
+        self.cleanup()
+        minsize = self.__resize_to_minimal()
+        self.disk.truncate(minsize)
+        self.__resize_filesystem(size)
+        return minsize
+
+class DeviceMapperSnapshot(object):
+    def __init__(self, imgloop, cowloop):
+        self.imgloop = imgloop
+        self.cowloop = cowloop
+
+        self.__created = False
+        self.__name = None
+        self.dmsetupcmd = find_binary_path("dmsetup")
+
+        """Load dm_snapshot if it isn't loaded"""
+        load_module("dm_snapshot")
+
+    def get_path(self):
+        if self.__name is None:
+            return None
+        return os.path.join("/dev/mapper", self.__name)
+    path = property(get_path)
+
+    def create(self):
+        if self.__created:
+            return
+
+        self.imgloop.create()
+        self.cowloop.create()
+
+        self.__name = "imgcreate-%d-%d" % (os.getpid(),
+                                           random.randint(0, 2**16))
+
+        size = os.stat(self.imgloop.lofile)[stat.ST_SIZE]
+
+        table = "0 %d snapshot %s %s p 8" % (size / 512,
+                                             self.imgloop.device,
+                                             self.cowloop.device)
+
+        args = [self.dmsetupcmd, "create", self.__name, "--table", table]
+        if subprocess.call(args) != 0:
+            self.cowloop.cleanup()
+            self.imgloop.cleanup()
+            raise SnapshotError("Could not create snapshot device using: " +
+                                string.join(args, " "))
+
+        self.__created = True
+
+    def remove(self, ignore_errors = False):
+        if not self.__created:
+            return
+        
+        time.sleep(2)
+        rc = subprocess.call([self.dmsetupcmd, "remove", self.__name])
+        if not ignore_errors and rc != 0:
+            raise SnapshotError("Could not remove snapshot device")
+
+        self.__name = None
+        self.__created = False
+
+        self.cowloop.cleanup()
+        self.imgloop.cleanup()
+
+    def get_cow_used(self):
+        if not self.__created:
+            return 0
+
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        try:
+            out = subprocess.Popen([self.dmsetupcmd, "status", self.__name],
+                                   stdout = subprocess.PIPE,
+                                   stderr = dev_null).communicate()[0]
+        finally:
+            os.close(dev_null)
+
+        #
+        # dmsetup status on a snapshot returns e.g.
+        #   "0 8388608 snapshot 416/1048576"
+        # or, more generally:
+        #   "A B snapshot C/D"
+        # where C is the number of 512 byte sectors in use
+        #
+        try:
+            return int((out.split()[3]).split('/')[0]) * 512
+        except ValueError:
+            raise SnapshotError("Failed to parse dmsetup status: " + out)
+
+def create_image_minimizer(path, image, minimal_size):
+    """
+    Builds a copy-on-write image which can be used to
+    create a device-mapper snapshot of an image where
+    the image's filesystem is as small as possible
+
+    The steps taken are:
+      1) Create a sparse COW
+      2) Loopback mount the image and the COW
+      3) Create a device-mapper snapshot of the image
+         using the COW
+      4) Resize the filesystem to the minimal size
+      5) Determine the amount of space used in the COW
+      6) Restroy the device-mapper snapshot
+      7) Truncate the COW, removing unused space
+      8) Create a squashfs of the COW
+    """
+    imgloop = LoopbackDisk(image, None) # Passing bogus size - doesn't matter
+
+    cowloop = SparseLoopbackDisk(os.path.join(os.path.dirname(path), "osmin"),
+                                 64L * 1024L * 1024L)
+
+    snapshot = DeviceMapperSnapshot(imgloop, cowloop)
+
+    try:
+        snapshot.create()
+
+        resize2fs(snapshot.path, minimal_size)
+
+        cow_used = snapshot.get_cow_used()
+    finally:
+        snapshot.remove(ignore_errors = (not sys.exc_info()[0] is None))
+
+    cowloop.truncate(cow_used)
+
+    mksquashfs(cowloop.lofile, path)
+
+    os.unlink(cowloop.lofile)
+
+def load_module(module):
+    found = False
+    for line in open('/proc/modules').xreadlines():
+        if line.startswith("%s " % module):
+            found = True
+            break
+    if not found:
+        print "Loading %s..." % module
+        dev_null = os.open("/dev/null", os.O_WRONLY)
+        modprobecmd = find_binary_path("modprobe")
+        modprobe = subprocess.Popen([modprobecmd, module],
+                     stdout=dev_null, stderr=dev_null)
+        os.waitpid(modprobe.pid, 0)
+        os.close(dev_null)
+
+def myurlgrab(url, filename, proxies):
+    g = URLGrabber()
+    if url.startswith("file:///"):
+        file = url.replace("file://", "")
+        if not os.path.exists(file):
+            raise CreatorError("URLGrabber error: can't find file %s" % file)
+        copycmd = find_binary_path("cp")
+        subprocess.call([copycmd, "-f", file, filename])
+    else:
+        try:
+            filename = g.urlgrab(url = url, filename = filename,
+                ssl_verify_host = False, ssl_verify_peer = False,
+                proxies = proxies, http_headers = (('Pragma', 'no-cache'),))
+        except URLGrabError, e:
+            raise CreatorError("URLGrabber error: %s: %s" % (e, url))
+        except:
+            raise CreatorError("URLGrabber error: %s" % url)
+    return filename
diff --git a/micng/utils/kickstart.py b/micng/utils/kickstart.py
new file mode 100644 (file)
index 0000000..d6ebcd8
--- /dev/null
@@ -0,0 +1,815 @@
+#
+# kickstart.py : Apply kickstart configuration to a system
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import os
+import os.path
+import shutil
+import subprocess
+import time
+import logging
+import string
+
+#import rhpl.keyboard
+
+import pykickstart.commands as kscommands
+import pykickstart.constants as ksconstants
+import pykickstart.errors as kserrors
+import pykickstart.parser as ksparser
+import pykickstart.version as ksversion
+from pykickstart.handlers.control import commandMap
+from pykickstart.handlers.control import dataMap
+
+import errors as errors
+import fs_related as fs
+import kscommands.desktop as desktop
+import kscommands.moblinrepo as moblinrepo
+import kscommands.micboot as micboot
+
+import sys
+sys.path.append("~/0509/mic")
+import misc as misc
+
+def read_kickstart(path):
+    """Parse a kickstart file and return a KickstartParser instance.
+
+    This is a simple utility function which takes a path to a kickstart file,
+    parses it and returns a pykickstart KickstartParser instance which can
+    be then passed to an ImageCreator constructor.
+
+    If an error occurs, a CreatorError exception is thrown.
+
+    """
+    #version = ksversion.makeVersion()
+    #ks = ksparser.KickstartParser(version)
+
+    using_version = ksversion.DEVEL
+    commandMap[using_version]["desktop"] = desktop.Moblin_Desktop
+    commandMap[using_version]["repo"] = moblinrepo.Moblin_Repo
+    commandMap[using_version]["bootloader"] = micboot.Moblin_Bootloader
+    dataMap[using_version]["RepoData"] = moblinrepo.Moblin_RepoData
+    superclass = ksversion.returnClassForVersion(version=using_version)
+
+    class KSHandlers(superclass):
+        def __init__(self, mapping={}):
+            superclass.__init__(self, mapping=commandMap[using_version])
+
+    ks = ksparser.KickstartParser(KSHandlers())
+
+    try:
+        ks.readKickstart(path)
+    except IOError, (err, msg):
+        raise errors.KickstartError("Failed to read kickstart file "
+                                    "'%s' : %s" % (path, msg))
+    except kserrors.KickstartError, e:
+        raise errors.KickstartError("Failed to parse kickstart file "
+                                    "'%s' : %s" % (path, e))
+    return ks
+
+def build_name(kscfg, prefix = None, suffix = None, maxlen = None):
+    """Construct and return an image name string.
+
+    This is a utility function to help create sensible name and fslabel
+    strings. The name is constructed using the sans-prefix-and-extension
+    kickstart filename and the supplied prefix and suffix.
+
+    If the name exceeds the maxlen length supplied, the prefix is first dropped
+    and then the kickstart filename portion is reduced until it fits. In other
+    words, the suffix takes precedence over the kickstart portion and the
+    kickstart portion takes precedence over the prefix.
+
+    kscfg -- a path to a kickstart file
+    prefix -- a prefix to prepend to the name; defaults to None, which causes
+              no prefix to be used
+    suffix -- a suffix to append to the name; defaults to None, which causes
+              a YYYYMMDDHHMM suffix to be used
+    maxlen -- the maximum length for the returned string; defaults to None,
+              which means there is no restriction on the name length
+
+    Note, if maxlen is less then the len(suffix), you get to keep both pieces.
+
+    """
+    name = os.path.basename(kscfg)
+    idx = name.rfind('.')
+    if idx >= 0:
+        name = name[:idx]
+
+    if prefix is None:
+        prefix = ""
+    if suffix is None:
+        suffix = time.strftime("%Y%m%d%H%M")
+
+    if name.startswith(prefix):
+        name = name[len(prefix):]
+
+    ret = prefix + name + "-" + suffix
+    if not maxlen is None and len(ret) > maxlen:
+        ret = name[:maxlen - len(suffix) - 1] + "-" + suffix
+
+    return ret
+
+class KickstartConfig(object):
+    """A base class for applying kickstart configurations to a system."""
+    def __init__(self, instroot):
+        self.instroot = instroot
+
+    def path(self, subpath):
+        return self.instroot + subpath
+
+    def chroot(self):
+        os.chroot(self.instroot)
+        os.chdir("/")
+
+    def call(self, args):
+        if not os.path.exists("%s/%s" %(self.instroot, args[0])):
+            print "%s/%s" %(self.instroot, args[0])
+            raise errors.KickstartError("Unable to run %s!" %(args))
+        subprocess.call(args, preexec_fn = self.chroot)
+
+    def apply(self):
+        pass
+
+class LanguageConfig(KickstartConfig):
+    """A class to apply a kickstart language configuration to a system."""
+    def apply(self, kslang):
+        lang = kslang.lang or "en_US.UTF-8"
+
+        f = open(self.path("/etc/sysconfig/i18n"), "w+")
+        f.write("LANG=\"" + lang + "\"\n")
+        f.close()
+
+class KeyboardConfig(KickstartConfig):
+    """A class to apply a kickstart keyboard configuration to a system."""
+    def apply(self, kskeyboard):
+        #
+        # FIXME:
+        #   should this impact the X keyboard config too?
+        #   or do we want to make X be able to do this mapping?
+        #
+        #k = rhpl.keyboard.Keyboard()
+        #if kskeyboard.keyboard:
+        #   k.set(kskeyboard.keyboard)
+        #k.write(self.instroot)
+        pass
+
+class TimezoneConfig(KickstartConfig):
+    """A class to apply a kickstart timezone configuration to a system."""
+    def apply(self, kstimezone):
+        tz = kstimezone.timezone or "America/New_York"
+        utc = str(kstimezone.isUtc)
+
+        f = open(self.path("/etc/sysconfig/clock"), "w+")
+        f.write("ZONE=\"" + tz + "\"\n")
+        f.write("UTC=" + utc + "\n")
+        f.close()
+        try:
+            shutil.copyfile(self.path("/usr/share/zoneinfo/%s" %(tz,)),
+                            self.path("/etc/localtime"))
+        except (IOError, OSError), (errno, msg):
+            raise errors.KickstartError("Error copying timezone info: %s" %(msg,))
+
+
+class AuthConfig(KickstartConfig):
+    """A class to apply a kickstart authconfig configuration to a system."""
+    def apply(self, ksauthconfig):
+        auth = ksauthconfig.authconfig or "--useshadow --enablemd5"
+        args = ["/usr/share/authconfig/authconfig.py", "--update", "--nostart"]
+        self.call(args + auth.split())
+
+class FirewallConfig(KickstartConfig):
+    """A class to apply a kickstart firewall configuration to a system."""
+    def apply(self, ksfirewall):
+        #
+        # FIXME: should handle the rest of the options
+        #
+        if not os.path.exists(self.path("/usr/sbin/lokkit")):
+            return
+        if ksfirewall.enabled:
+            status = "--enabled"
+        else:
+            status = "--disabled"
+
+        self.call(["/usr/sbin/lokkit",
+                   "-f", "--quiet", "--nostart", status])
+
+class RootPasswordConfig(KickstartConfig):
+    """A class to apply a kickstart root password configuration to a system."""
+    def unset(self):
+        self.call(["/usr/bin/passwd", "-d", "root"])
+
+    def set_encrypted(self, password):
+        self.call(["/usr/sbin/usermod", "-p", password, "root"])
+
+    def set_unencrypted(self, password):        
+        for p in ("/bin/echo", "/usr/sbin/chpasswd"):
+            if not os.path.exists("%s/%s" %(self.instroot, p)):
+                raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p) 
+        
+        p1 = subprocess.Popen(["/bin/echo", "root:%s" %password],
+                              stdout = subprocess.PIPE,
+                              preexec_fn = self.chroot)
+        p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"],
+                              stdin = p1.stdout,
+                              stdout = subprocess.PIPE,
+                              preexec_fn = self.chroot)
+        p2.communicate()
+
+    def apply(self, ksrootpw):
+        if ksrootpw.isCrypted:
+            self.set_encrypted(ksrootpw.password)
+        elif ksrootpw.password != "":
+            self.set_unencrypted(ksrootpw.password)
+        else:
+            self.unset()
+
+class UserConfig(KickstartConfig):
+    def set_empty_passwd(self, user):
+        self.call(["/usr/bin/passwd", "-d", user])
+
+    def set_encrypted_passwd(self, user, password):
+        self.call(["/usr/sbin/usermod", "-p", "%s" % password, user])
+
+    def set_unencrypted_passwd(self, user, password):
+        for p in ("/bin/echo", "/usr/sbin/chpasswd"):
+            if not os.path.exists("%s/%s" %(self.instroot, p)):
+                raise errors.KickstartError("Unable to set unencrypted password due to lack of %s" % p)
+
+        p1 = subprocess.Popen(["/bin/echo", "%s:%s" %(user, password)],
+                              stdout = subprocess.PIPE,
+                              preexec_fn = self.chroot)
+        p2 = subprocess.Popen(["/usr/sbin/chpasswd", "-m"],
+                              stdin = p1.stdout,
+                              stdout = subprocess.PIPE,
+                              preexec_fn = self.chroot)
+        p2.communicate()
+
+    def addUser(self, userconfig):
+        args = [ "/usr/sbin/useradd" ]
+        if userconfig.groups:
+            args += [ "--groups", string.join(userconfig.groups, ",") ]
+        if userconfig.name:
+            args.append(userconfig.name)
+            dev_null = os.open("/dev/null", os.O_WRONLY)
+            subprocess.call(args,
+                             stdout = dev_null,
+                             stderr = dev_null,
+                             preexec_fn = self.chroot)
+            os.close(dev_null)
+            if userconfig.password not in (None, ""):
+                if userconfig.isCrypted:
+                    self.set_encrypted_passwd(userconfig.name, userconfig.password)
+                else:
+                    self.set_unencrypted_passwd(userconfig.name, userconfig.password)
+            else:
+                self.set_empty_passwd(userconfig.name)
+        else:
+            raise errors.KickstartError("Invalid kickstart command: %s" % userconfig.__str__())
+
+    def apply(self, user):
+        for userconfig in user.userList:
+            try:
+                self.addUser(userconfig)
+            except:
+                raise
+
+class ServicesConfig(KickstartConfig):
+    """A class to apply a kickstart services configuration to a system."""
+    def apply(self, ksservices):
+        if not os.path.exists(self.path("/sbin/chkconfig")):
+            return
+        for s in ksservices.enabled:
+            self.call(["/sbin/chkconfig", s, "on"])
+        for s in ksservices.disabled:
+            self.call(["/sbin/chkconfig", s, "off"])
+
+class XConfig(KickstartConfig):
+    """A class to apply a kickstart X configuration to a system."""
+    def apply(self, ksxconfig):
+        if ksxconfig.startX:
+            f = open(self.path("/etc/inittab"), "rw+")
+            buf = f.read()
+            buf = buf.replace("id:3:initdefault", "id:5:initdefault")
+            f.seek(0)
+            f.write(buf)
+            f.close()
+        if ksxconfig.defaultdesktop:
+            f = open(self.path("/etc/sysconfig/desktop"), "w")
+            f.write("DESKTOP="+ksxconfig.defaultdesktop+"\n")
+            f.close()
+
+class DesktopConfig(KickstartConfig):
+    """A class to apply a kickstart desktop configuration to a system."""
+    def apply(self, ksdesktop):
+        if ksdesktop.defaultdesktop:
+            f = open(self.path("/etc/sysconfig/desktop"), "w")
+            f.write("DESKTOP="+ksdesktop.defaultdesktop+"\n")
+            f.close()
+            if os.path.exists(self.path("/etc/gdm/custom.conf")):
+                f = open(self.path("/etc/skel/.dmrc"), "w")
+                f.write("[Desktop]\n")
+                f.write("Session="+ksdesktop.defaultdesktop.lower()+"\n")
+                f.close()
+        if ksdesktop.session:
+            if os.path.exists(self.path("/etc/sysconfig/uxlaunch")):
+                f = open(self.path("/etc/sysconfig/uxlaunch"), "a+")
+                f.write("session="+ksdesktop.session.lower()+"\n")
+                f.close()
+        if ksdesktop.autologinuser:
+            f = open(self.path("/etc/sysconfig/desktop"), "a+")
+            f.write("AUTOLOGIN_USER=" + ksdesktop.autologinuser + "\n")
+            f.close()
+            if ksdesktop.session:
+                if os.path.exists(self.path("/etc/sysconfig/uxlaunch")):
+                    f = open(self.path("/etc/sysconfig/uxlaunch"), "a+")
+                    f.write("user="+ksdesktop.autologinuser+"\n")
+                    f.close()
+            if os.path.exists(self.path("/etc/gdm/custom.conf")):
+                f = open(self.path("/etc/gdm/custom.conf"), "w")
+                f.write("[daemon]\n")
+                f.write("AutomaticLoginEnable=true\n")
+                f.write("AutomaticLogin=" + ksdesktop.autologinuser + "\n")
+                f.close()
+
+class MoblinRepoConfig(KickstartConfig):
+    """A class to apply a kickstart desktop configuration to a system."""
+    def __create_repo_section(self, repo, type, fd):
+        baseurl = None
+        mirrorlist = None
+        reposuffix = {"base":"", "debuginfo":"-debuginfo", "source":"-source"}
+        reponame = repo.name + reposuffix[type]
+        if type == "base":
+            if repo.baseurl:
+                baseurl = repo.baseurl
+            if repo.mirrorlist:
+                mirrorlist = repo.mirrorlist
+        elif type == "debuginfo":
+            if repo.baseurl:
+                if repo.baseurl.endswith("/"):
+                    baseurl = os.path.dirname(os.path.dirname(repo.baseurl))
+                else:
+                    baseurl = os.path.dirname(repo.baseurl)
+                baseurl += "/debug"
+            if repo.mirrorlist:
+                variant = repo.mirrorlist[repo.mirrorlist.find("$"):]
+                mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")]
+                mirrorlist += "debug" + "-" + variant
+        elif type == "source":
+            if repo.baseurl:
+                if repo.baseurl.endswith("/"):
+                    baseurl = os.path.dirname(os.path.dirname(os.path.dirname(repo.baseurl)))
+                else:
+                    baseurl = os.path.dirname(os.path.dirname(repo.baseurl))
+                baseurl += "/source"
+            if repo.mirrorlist:
+                variant = repo.mirrorlist[repo.mirrorlist.find("$"):]
+                mirrorlist = repo.mirrorlist[0:repo.mirrorlist.find("$")]
+                mirrorlist += "source" + "-" + variant
+
+        fd.write("[" + reponame + "]\n")
+        fd.write("name=" + reponame + "\n")
+        fd.write("failovermethod=priority\n")
+        if baseurl:
+            fd.write("baseurl=" + baseurl + "\n")
+        if mirrorlist:
+            fd.write("mirrorlist=" + mirrorlist + "\n")
+        """ Skip saving proxy settings """
+        #if repo.proxy:
+        #    fd.write("proxy=" + repo.proxy + "\n")
+        #if repo.proxy_username:
+        #    fd.write("proxy_username=" + repo.proxy_username + "\n")
+        #if repo.proxy_password:
+        #    fd.write("proxy_password=" + repo.proxy_password + "\n")
+        if repo.gpgkey:
+            fd.write("gpgkey=" + repo.gpgkey + "\n")
+            fd.write("gpgcheck=1\n")
+        else:
+            fd.write("gpgcheck=0\n")
+        if type == "source" or type == "debuginfo" or repo.disable:
+            fd.write("enabled=0\n")
+        else:
+            fd.write("enabled=1\n")
+        fd.write("\n")
+
+    def __create_repo_file(self, repo, repodir):
+        if not os.path.exists(self.path(repodir)):
+            fs.makedirs(self.path(repodir))
+        f = open(self.path(repodir + "/" + repo.name + ".repo"), "w")
+        self.__create_repo_section(repo, "base", f)
+        if repo.debuginfo:
+            self.__create_repo_section(repo, "debuginfo", f)
+        if repo.source:
+            self.__create_repo_section(repo, "source", f)
+        f.close()
+
+    def apply(self, ksrepo, repodata):
+        for repo in ksrepo.repoList:
+            if repo.save:
+                #self.__create_repo_file(repo, "/etc/yum.repos.d")
+                self.__create_repo_file(repo, "/etc/zypp/repos.d")
+        """ Import repo gpg keys """
+        if repodata:
+            dev_null = os.open("/dev/null", os.O_WRONLY)
+            for repo in repodata:
+                if repo['repokey']:
+                    subprocess.call([fs.find_binary_path("rpm"), "--root=%s" % self.instroot, "--import", repo['repokey']],
+                                    stdout = dev_null, stderr = dev_null)
+            os.close(dev_null)
+
+class RPMMacroConfig(KickstartConfig):
+    """A class to apply the specified rpm macros to the filesystem"""
+    def apply(self, ks):
+        if not ks:
+            return
+        if not os.path.exists(self.path("/etc/rpm")):
+            os.mkdir(self.path("/etc/rpm"))
+        f = open(self.path("/etc/rpm/macros.imgcreate"), "w+")
+        if exclude_docs(ks):
+            f.write("%_excludedocs 1\n")
+        f.write("%__file_context_path %{nil}\n")
+        if inst_langs(ks) != None:
+            f.write("%_install_langs ")
+            f.write(inst_langs(ks))
+            f.write("\n")
+        f.close()
+
+class NetworkConfig(KickstartConfig):
+    """A class to apply a kickstart network configuration to a system."""
+    def write_ifcfg(self, network):
+        p = self.path("/etc/sysconfig/network-scripts/ifcfg-" + network.device)
+
+        f = file(p, "w+")
+        os.chmod(p, 0644)
+
+        f.write("DEVICE=%s\n" % network.device)
+        f.write("BOOTPROTO=%s\n" % network.bootProto)
+
+        if network.bootProto.lower() == "static":
+            if network.ip:
+                f.write("IPADDR=%s\n" % network.ip)
+            if network.netmask:
+                f.write("NETMASK=%s\n" % network.netmask)
+
+        if network.onboot:
+            f.write("ONBOOT=on\n")
+        else:
+            f.write("ONBOOT=off\n")
+
+        if network.essid:
+            f.write("ESSID=%s\n" % network.essid)
+
+        if network.ethtool:
+            if network.ethtool.find("autoneg") == -1:
+                network.ethtool = "autoneg off " + network.ethtool
+            f.write("ETHTOOL_OPTS=%s\n" % network.ethtool)
+
+        if network.bootProto.lower() == "dhcp":
+            if network.hostname:
+                f.write("DHCP_HOSTNAME=%s\n" % network.hostname)
+            if network.dhcpclass:
+                f.write("DHCP_CLASSID=%s\n" % network.dhcpclass)
+
+        if network.mtu:
+            f.write("MTU=%s\n" % network.mtu)
+
+        f.close()
+
+    def write_wepkey(self, network):
+        if not network.wepkey:
+            return
+
+        p = self.path("/etc/sysconfig/network-scripts/keys-" + network.device)
+        f = file(p, "w+")
+        os.chmod(p, 0600)
+        f.write("KEY=%s\n" % network.wepkey)
+        f.close()
+
+    def write_sysconfig(self, useipv6, hostname, gateway):
+        path = self.path("/etc/sysconfig/network")
+        f = file(path, "w+")
+        os.chmod(path, 0644)
+
+        f.write("NETWORKING=yes\n")
+
+        if useipv6:
+            f.write("NETWORKING_IPV6=yes\n")
+        else:
+            f.write("NETWORKING_IPV6=no\n")
+
+        if hostname:
+            f.write("HOSTNAME=%s\n" % hostname)
+        else:
+            f.write("HOSTNAME=localhost.localdomain\n")
+
+        if gateway:
+            f.write("GATEWAY=%s\n" % gateway)
+
+        f.close()
+
+    def write_hosts(self, hostname):
+        localline = ""
+        if hostname and hostname != "localhost.localdomain":
+            localline += hostname + " "
+            l = hostname.split(".")
+            if len(l) > 1:
+                localline += l[0] + " "
+        localline += "localhost.localdomain localhost"
+
+        path = self.path("/etc/hosts")
+        f = file(path, "w+")
+        os.chmod(path, 0644)
+        f.write("127.0.0.1\t\t%s\n" % localline)
+        f.write("::1\t\tlocalhost6.localdomain6 localhost6\n")
+        f.close()
+
+    def write_resolv(self, nodns, nameservers):
+        if nodns or not nameservers:
+            return
+
+        path = self.path("/etc/resolv.conf")
+        f = file(path, "w+")
+        os.chmod(path, 0644)
+
+        for ns in (nameservers):
+            if ns:
+                f.write("nameserver %s\n" % ns)
+
+        f.close()
+
+    def apply(self, ksnet):
+        fs.makedirs(self.path("/etc/sysconfig/network-scripts"))
+
+        useipv6 = False
+        nodns = False
+        hostname = None
+        gateway = None
+        nameservers = None
+
+        for network in ksnet.network:
+            if not network.device:
+                raise errors.KickstartError("No --device specified with "
+                                            "network kickstart command")
+
+            if (network.onboot and network.bootProto.lower() != "dhcp" and
+                not (network.ip and network.netmask)):
+                raise errors.KickstartError("No IP address and/or netmask "
+                                            "specified with static "
+                                            "configuration for '%s'" %
+                                            network.device)
+
+            self.write_ifcfg(network)
+            self.write_wepkey(network)
+
+            if network.ipv6:
+                useipv6 = True
+            if network.nodns:
+                nodns = True
+
+            if network.hostname:
+                hostname = network.hostname
+            if network.gateway:
+                gateway = network.gateway
+
+            if network.nameserver:
+                nameservers = network.nameserver.split(",")
+
+        self.write_sysconfig(useipv6, hostname, gateway)
+        self.write_hosts(hostname)
+        self.write_resolv(nodns, nameservers)
+
+
+def get_image_size(ks, default = None):
+    __size = 0
+    for p in ks.handler.partition.partitions:
+        if p.mountpoint == "/" and p.size:
+            __size = p.size
+    if __size > 0:
+        return int(__size) * 1024L * 1024L
+    else:
+        return default
+
+def get_image_fstype(ks, default = None):
+    for p in ks.handler.partition.partitions:
+        if p.mountpoint == "/" and p.fstype:
+            return p.fstype
+    return default
+
+def get_image_fsopts(ks, default = None):
+    for p in ks.handler.partition.partitions:
+        if p.mountpoint == "/" and p.fsopts:
+            return p.fstype
+    return default
+
+def get_modules(ks):
+    devices = []
+    if isinstance(ks.handler.device, kscommands.device.FC3_Device):
+        devices.append(ks.handler.device)
+    else:
+        devices.extend(ks.handler.device.deviceList)
+
+    modules = []
+    for device in devices:
+        if not device.moduleName:
+            continue
+        modules.extend(device.moduleName.split(":"))
+
+    return modules
+
+def get_timeout(ks, default = None):
+    if not hasattr(ks.handler.bootloader, "timeout"):
+        return default
+    if ks.handler.bootloader.timeout is None:
+        return default
+    return int(ks.handler.bootloader.timeout)
+
+def get_kernel_args(ks, default = "ro liveimg"):
+    if not hasattr(ks.handler.bootloader, "appendLine"):
+        return default
+    if ks.handler.bootloader.appendLine is None:
+        return default
+    return "%s %s" %(default, ks.handler.bootloader.appendLine)
+
+def get_menu_args(ks, default = "bootinstall"):
+    if not hasattr(ks.handler.bootloader, "menus"):
+        return default
+    if ks.handler.bootloader.menus is None:
+        return default
+    return "%s %s" %(default, ks.handler.bootloader.menus)
+
+def get_default_kernel(ks, default = None):
+    if not hasattr(ks.handler.bootloader, "default"):
+        return default
+    if not ks.handler.bootloader.default:
+        return default
+    return ks.handler.bootloader.default
+
+def get_repos(ks, repo_urls = {}):
+    repos = {}
+    for repo in ks.handler.repo.repoList:
+        inc = []
+        if hasattr(repo, "includepkgs"):
+            inc.extend(repo.includepkgs)
+
+        exc = []
+        if hasattr(repo, "excludepkgs"):
+            exc.extend(repo.excludepkgs)
+
+        baseurl = repo.baseurl
+        mirrorlist = repo.mirrorlist
+
+        if repo.name in repo_urls:
+            baseurl = repo_urls[repo.name]
+            mirrorlist = None
+
+        if repos.has_key(repo.name):
+            logging.warn("Overriding already specified repo %s" %(repo.name,))
+
+        proxy = None
+        if hasattr(repo, "proxy"):
+            proxy = repo.proxy
+        proxy_username = None
+        if hasattr(repo, "proxy_username"):
+            proxy_username = repo.proxy_username
+        proxy_password = None
+        if hasattr(repo, "proxy_password"):
+            proxy_password = repo.proxy_password
+        if hasattr(repo, "debuginfo"):
+            debuginfo = repo.debuginfo
+        if hasattr(repo, "source"):
+            source = repo.source
+        if hasattr(repo, "gpgkey"):
+            gpgkey = repo.gpgkey
+        if hasattr(repo, "disable"):
+            disable = repo.disable
+
+        repos[repo.name] = (repo.name, baseurl, mirrorlist, inc, exc, proxy, proxy_username, proxy_password, debuginfo, source, gpgkey, disable)
+
+    return repos.values()
+
+def convert_method_to_repo(ks):
+    try:
+        ks.handler.repo.methodToRepo()
+    except (AttributeError, kserrors.KickstartError):
+        pass
+
+def get_packages(ks, required = []):
+    return ks.handler.packages.packageList + required
+
+def get_groups(ks, required = []):
+    return ks.handler.packages.groupList + required
+
+def get_excluded(ks, required = []):
+    return ks.handler.packages.excludedList + required
+
+def get_partitions(ks, required = []):
+    return ks.handler.partition.partitions
+
+def ignore_missing(ks):
+    return ks.handler.packages.handleMissing == ksconstants.KS_MISSING_IGNORE
+
+def exclude_docs(ks):
+    return ks.handler.packages.excludeDocs
+
+def inst_langs(ks):
+    if hasattr(ks.handler.packages, "instLange"):
+        return ks.handler.packages.instLange
+    elif hasattr(ks.handler.packages, "instLangs"):
+        return ks.handler.packages.instLangs
+    return ""
+
+def get_post_scripts(ks):
+    scripts = []
+    for s in ks.handler.scripts:
+        if s.type != ksparser.KS_SCRIPT_POST:
+            continue
+        scripts.append(s)
+    return scripts
+
+def add_repo(ks, repostr):
+    args = repostr.split()
+    repoobj = ks.handler.repo.parse(args[1:])
+    if repoobj and repoobj not in ks.handler.repo.repoList:
+        ks.handler.repo.repoList.append(repoobj) 
+
+def remove_all_repos(ks):
+    while len(ks.handler.repo.repoList) != 0:
+        del ks.handler.repo.repoList[0]
+
+def remove_duplicate_repos(ks):
+    i = 0
+    j = i + 1
+    while True:
+        if len(ks.handler.repo.repoList) < 2:
+            break
+        if i >= len(ks.handler.repo.repoList) - 1:
+            break
+        name = ks.handler.repo.repoList[i].name
+        baseurl = ks.handler.repo.repoList[i].baseurl
+        if j < len(ks.handler.repo.repoList):
+            if (ks.handler.repo.repoList[j].name == name or \
+                ks.handler.repo.repoList[j].baseurl == baseurl):
+                del ks.handler.repo.repoList[j]
+            else:
+                j += 1
+            if j >= len(ks.handler.repo.repoList):
+                i += 1
+                j = i + 1
+        else:
+            i += 1
+            j = i + 1
+
+def resolve_groups(creator, repometadata, use_comps = False):
+    pkgmgr = creator.pkgmgr.get_default_pkg_manager
+    iszypp = False
+    if creator.pkgmgr.managers.has_key("zypp") and creator.pkgmgr.managers['zypp'] == pkgmgr:
+        iszypp = True
+    ks = creator.ks
+
+    for repo in repometadata:
+        """ Mustn't replace group with package list if repo is ready for the corresponding package manager """
+        if iszypp and repo["patterns"] and not use_comps:
+            continue
+        if not iszypp and repo["comps"] and use_comps:
+            continue
+
+        """
+            But we also must handle such cases, use zypp but repo only has comps, 
+            use yum but repo only has patterns, use zypp but use_comps is true,
+            use yum but use_comps is false.
+        """
+        groupfile = None
+        if iszypp:
+            if (use_comps and repo["comps"]) or (not repo["patterns"] and repo["comps"]):
+                groupfile = repo["comps"]
+                get_pkglist_handler = misc.get_pkglist_in_comps
+        if not iszypp:
+            if (not use_comps and repo["patterns"]) or (not repo["comps"] and repo["patterns"]):
+                groupfile = repo["patterns"]
+                get_pkglist_handler = misc.get_pkglist_in_patterns
+            
+        if groupfile:
+            i = 0
+            while True:
+                if i >= len(ks.handler.packages.groupList):
+                    break
+                pkglist = get_pkglist_handler(ks.handler.packages.groupList[i].name, groupfile)
+                if pkglist:
+                    del ks.handler.packages.groupList[i]
+                    for pkg in pkglist:
+                        if pkg not in ks.handler.packages.packageList:
+                            ks.handler.packages.packageList.append(pkg)
+                else:
+                    i = i + 1
diff --git a/micng/utils/kscommands/__init__.py b/micng/utils/kscommands/__init__.py
new file mode 100644 (file)
index 0000000..7123ac1
--- /dev/null
@@ -0,0 +1,8 @@
+import desktop
+import moblinrepo
+
+__all__ = (
+    "Moblin_Desktop",
+    "Moblin_Repo",
+    "Moblin_RepoData",
+)
diff --git a/micng/utils/kscommands/desktop.py b/micng/utils/kscommands/desktop.py
new file mode 100644 (file)
index 0000000..dfa5250
--- /dev/null
@@ -0,0 +1,78 @@
+#!/usr/bin/python -tt
+#
+# Yi Yang <yi.y.yang@intel.com>
+#
+# Copyright 2008, 2009, 2010 Intel, Inc.
+#
+# This copyrighted material is made available to anyone wishing to use, modify,
+# copy, or redistribute it subject to the terms and conditions of the GNU
+# General Public License v.2.  This program is distributed in the hope that it
+# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
+# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.  Any Red Hat
+# trademarks that are incorporated in the source code or documentation are not
+# subject to the GNU General Public License and may only be used or replicated
+# with the express permission of Red Hat, Inc.
+#
+from pykickstart.base import *
+from pykickstart.errors import *
+from pykickstart.options import *
+
+class Moblin_Desktop(KickstartCommand):
+    def __init__(self, writePriority=0, defaultdesktop=None, defaultdm=None, autologinuser="meego", session="/usr/bin/mutter --sm-disable"):
+        KickstartCommand.__init__(self, writePriority)
+
+        self.__new_version = False
+        self.op = self._getParser()
+
+        self.defaultdesktop = defaultdesktop
+        self.autologinuser = autologinuser
+        self.defaultdm = defaultdm
+        self.session = session
+
+    def __str__(self):
+        retval = ""
+
+        if self.defaultdesktop != None:
+            retval += " --defaultdesktop=%s" % self.defaultdesktop
+        if self.session != None:
+            retval += " --session=\"%s\"" % self.session
+        if self.autologinuser != None:
+            retval += " --autologinuser=%s" % self.autologinuser
+        if self.defaultdm != None:
+            retval += " --defaultdm=%s" % self.defaultdm
+
+        if retval != "":
+            retval = "# Default Desktop Settings\ndesktop %s\n" % retval
+
+        return retval
+
+    def _getParser(self):
+        try:
+            op = KSOptionParser(lineno=self.lineno)
+        except TypeError:
+            # the latest version has not lineno argument
+            op = KSOptionParser()
+            self.__new_version = True
+
+        op.add_option("--defaultdesktop", dest="defaultdesktop", action="store", type="string", nargs=1)
+        op.add_option("--autologinuser", dest="autologinuser", action="store", type="string", nargs=1)
+        op.add_option("--defaultdm", dest="defaultdm", action="store", type="string", nargs=1)
+        op.add_option("--session", dest="session", action="store", type="string", nargs=1)
+        return op
+
+    def parse(self, args):
+        if self.__new_version:
+            (opts, extra) = self.op.parse_args(args=args, lineno=self.lineno)
+        else:
+            (opts, extra) = self.op.parse_args(args=args)
+
+        if extra:
+            mapping = {"command": "desktop", "options": extra}
+            raise KickstartValueError, formatErrorMsg(self.lineno, msg=_("Unexpected arguments to %(command)s command: %(options)s") % mapping)
+
+        self._setToSelf(self.op, opts)
diff --git a/micng/utils/kscommands/micboot.py b/micng/utils/kscommands/micboot.py
new file mode 100644 (file)
index 0000000..9e26dae
--- /dev/null
@@ -0,0 +1,45 @@
+#!/usr/bin/python -tt
+#
+# Anas Nashif
+#
+# Copyright 2008, 2009, 2010 Intel, Inc.
+#
+# This copyrighted material is made available to anyone wishing to use, modify,
+# copy, or redistribute it subject to the terms and conditions of the GNU
+# General Public License v.2.  This program is distributed in the hope that it
+# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
+# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.  Any Red Hat
+# trademarks that are incorporated in the source code or documentation are not
+# subject to the GNU General Public License and may only be used or replicated
+# with the express permission of Red Hat, Inc.
+#
+from pykickstart.base import *
+from pykickstart.errors import *
+from pykickstart.options import *
+from pykickstart.commands.bootloader import *
+class Moblin_Bootloader(F8_Bootloader):
+    def __init__(self, writePriority=10, appendLine="", driveorder=None,
+                 forceLBA=False, location="", md5pass="", password="",
+                 upgrade=False, menus=""):
+        F8_Bootloader.__init__(self, writePriority, appendLine, driveorder,
+                                forceLBA, location, md5pass, password, upgrade)
+
+        self.menus = ""
+
+    def _getArgsAsStr(self):
+        ret = F8_Bootloader._getArgsAsStr(self)
+
+        if self.menus == "":
+            ret += " --menus=%s" %(self.menus,)
+        return ret
+
+    def _getParser(self):
+        op = F8_Bootloader._getParser(self)
+        op.add_option("--menus", dest="menus")
+        return op
+
diff --git a/micng/utils/kscommands/moblinrepo.py b/micng/utils/kscommands/moblinrepo.py
new file mode 100644 (file)
index 0000000..c022a0a
--- /dev/null
@@ -0,0 +1,97 @@
+#!/usr/bin/python -tt
+#
+# Yi Yang <yi.y.yang@intel.com>
+#
+# Copyright 2008, 2009, 2010 Intel, Inc.
+#
+# This copyrighted material is made available to anyone wishing to use, modify,
+# copy, or redistribute it subject to the terms and conditions of the GNU
+# General Public License v.2.  This program is distributed in the hope that it
+# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
+# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+# See the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.  Any Red Hat
+# trademarks that are incorporated in the source code or documentation are not
+# subject to the GNU General Public License and may only be used or replicated
+# with the express permission of Red Hat, Inc.
+#
+from pykickstart.base import *
+from pykickstart.errors import *
+from pykickstart.options import *
+from pykickstart.commands.repo import *
+
+class Moblin_RepoData(F8_RepoData):
+    def __init__(self, baseurl="", mirrorlist="", name="", priority=None,
+                 includepkgs=[], excludepkgs=[], save=False, proxy=None,
+                 proxy_username=None, proxy_password=None, debuginfo=False, source=False, gpgkey=None, disable=False):
+        F8_RepoData.__init__(self, baseurl=baseurl, mirrorlist=mirrorlist,
+                             name=name,  includepkgs=includepkgs,
+                             excludepkgs=excludepkgs)
+        self.save = save
+        self.proxy = proxy
+        self.proxy_username = proxy_username
+        self.proxy_password = proxy_password
+        self.debuginfo = debuginfo
+        self.disable = disable
+        self.source = source
+        self.gpgkey = gpgkey
+
+    def _getArgsAsStr(self):
+        retval = F8_RepoData._getArgsAsStr(self)
+
+        if self.save:
+            retval += " --save"
+        if self.proxy:
+            retval += " --proxy=%s" % self.proxy
+        if self.proxy_username:
+            retval += " --proxyuser=%s" % self.proxy_username
+        if self.proxy_password:
+            retval += " --proxypasswd=%s" % self.proxy_password
+        if self.debuginfo:
+            retval += " --debuginfo"
+        if self.source:
+            retval += " --source"
+        if self.gpgkey:
+            retval += " --gpgkey=%s" % self.gpgkey
+        if self.disable:
+            retval += " --disable"
+
+        return retval
+
+class Moblin_Repo(F8_Repo):
+    def __init__(self, writePriority=0, repoList=None):
+        F8_Repo.__init__(self, writePriority, repoList)
+
+    def __str__(self):
+        retval = ""
+        for repo in self.repoList:
+            retval += repo.__str__()
+
+        return retval
+
+    def _getParser(self):
+        def list_cb (option, opt_str, value, parser):
+            for d in value.split(','):
+                parser.values.ensure_value(option.dest, []).append(d)
+
+        op = F8_Repo._getParser(self)
+        op.add_option("--save", action="store_true", dest="save",
+                      default=False)
+        op.add_option("--proxy", type="string", action="store", dest="proxy",
+                      default=None, nargs=1)
+        op.add_option("--proxyuser", type="string", action="store", dest="proxy_username",
+                      default=None, nargs=1)
+        op.add_option("--proxypasswd", type="string", action="store", dest="proxy_password",
+                      default=None, nargs=1)
+        op.add_option("--debuginfo", action="store_true", dest="debuginfo",
+                      default=False)
+        op.add_option("--source", action="store_true", dest="source",
+                      default=False)
+        op.add_option("--disable", action="store_true", dest="disable",
+                      default=False)
+        op.add_option("--gpgkey", type="string", action="store", dest="gpgkey",
+                      default=None, nargs=1)
+        return op
diff --git a/micng/utils/logger.py b/micng/utils/logger.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/micng/utils/misc.py b/micng/utils/misc.py
new file mode 100644 (file)
index 0000000..d36e5e6
--- /dev/null
@@ -0,0 +1,1146 @@
+#
+# misc.py : miscellaneous utilities
+#
+# Copyright 2010, Intel Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+
+import os
+import sys
+import subprocess
+import logging
+import tempfile
+import re
+import shutil
+import glob
+import xml.dom.minidom
+import hashlib
+import urlparse
+import locale
+import codecs
+
+try:
+    import sqlite3 as sqlite
+except ImportError:
+    import sqlite
+import _sqlitecache
+
+try:
+    from xml.etree import cElementTree
+except ImportError:
+    import cElementTree
+xmlparse = cElementTree.parse
+
+from errors import *
+from fs_related import *
+
+
+def setlocale():
+    try:
+        locale.setlocale(locale.LC_ALL,'')
+    except locale.Error:
+        os.environ['LC_ALL'] = 'C'
+        locale.setlocale(locale.LC_ALL,'C')
+    sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
+    sys.stdout.errors = 'replace'
+
+def get_extension_name(path):
+    match = re.search("(?<=\.)\w+$", path)
+    if match:
+        return match.group(0)
+    else:
+        return None
+
+def get_image_type(path):
+    if os.path.isdir(path):
+        if ismeego(path):
+            return "fs"
+        return None
+    maptab = {"raw":"raw", "vmdk":"vmdk", "vdi":"vdi", "iso":"livecd", "usbimg":"liveusb"}
+    extension = get_extension_name(path)
+    if extension in ("raw", "vmdk", "vdi", "iso", "usbimg"):
+        return maptab[extension]
+
+    fd = open(path, "rb")
+    file_header = fd.read(1024)
+    fd.close()
+    vdi_flag = "<<< Sun VirtualBox Disk Image >>>"
+    if file_header[0:len(vdi_flag)] == vdi_flag:
+        return maptab["vdi"]
+
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    filecmd = find_binary_path("file")
+    args = [ filecmd, path ]
+    file = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null)
+    output = file.communicate()[0]
+    os.close(dev_null)
+    isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*")
+    usbimgptn = re.compile(r".*x86 boot sector.*active.*")
+    rawptn = re.compile(r".*x86 boot sector.*")
+    vmdkptn = re.compile(r".*VMware. disk image.*")
+    ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*")
+    if isoptn.match(output):
+        return maptab["iso"]
+    elif usbimgptn.match(output):
+        return maptab["usbimg"]
+    elif rawptn.match(output):
+        return maptab["raw"]
+    elif vmdkptn.match(output):
+        return maptab["vmdk"]
+    elif ext3fsimgptn.match(output):
+        return "ext3fsimg"
+    else:
+        return None
+
+def get_file_size(file):
+    """Return size in MB unit"""
+    du = find_binary_path("du")
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    duProc = subprocess.Popen([du, "-s", "-b", "-B", "1M", file],
+                               stdout=subprocess.PIPE, stderr=dev_null)
+    duOutput = duProc.communicate()[0]
+    if duProc.returncode:
+        raise CreatorError("Failed to run %s" % du)
+
+    size1 = int(duOutput.split()[0])
+    duProc = subprocess.Popen([du, "-s", "-B", "1M", file],
+                               stdout=subprocess.PIPE, stderr=dev_null)
+    duOutput = duProc.communicate()[0]
+    if duProc.returncode:
+        raise CreatorError("Failed to run %s" % du)
+
+    size2 = int(duOutput.split()[0])
+    os.close(dev_null)
+    if size1 > size2:
+        return size1
+    else:
+        return size2
+
+def get_filesystem_avail(fs):
+    vfstat = os.statvfs(fs)
+    return vfstat.f_bavail * vfstat.f_bsize
+
+def convert_image(srcimg, srcfmt, dstimg, dstfmt):
+    #convert disk format
+    if dstfmt != "raw":
+        raise CreatorError("Invalid destination image format: %s" % dstfmt)
+    logging.debug("converting %s image to %s" % (srcimg, dstimg))
+    if srcfmt == "vmdk":
+        path = find_binary_path("qemu-img")
+        argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt,  dstimg]
+    elif srcfmt == "vdi":
+        path = find_binary_path("VBoxManage")
+        argv = [path, "internalcommands", "converttoraw", srcimg, dstimg]
+    else:
+        raise CreatorError("Invalid soure image format: %s" % srcfmt)
+
+    rc = subprocess.call(argv)
+    if rc == 0:
+        logging.debug("convert successful")
+    if rc != 0:
+        raise CreatorError("Unable to convert disk to %s" % dstfmt)
+
+def myxcopytree(src, dst):
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    dirnames = os.listdir(src)
+    copycmd = find_binary_path("cp")
+    for dir in dirnames:
+        args = [ copycmd, "-af", src + "/" + dir, dst ]
+        subprocess.call(args, stdout=dev_null, stderr=dev_null)
+    os.close(dev_null)
+    ignores = ["dev/fd", "dev/stdin", "dev/stdout", "dev/stderr", "etc/mtab"]
+    for exclude in ignores:
+        if os.path.exists(dst + "/" + exclude):
+            os.unlink(dst + "/" + exclude)
+
+def uncompress_squashfs(squashfsimg, outdir):
+    """Uncompress file system from squshfs image"""
+    unsquashfs = find_binary_path("unsquashfs")
+    args = [ unsquashfs, "-d", outdir, squashfsimg ]
+    rc = subprocess.call(args)
+    if (rc != 0):
+        raise SquashfsError("Failed to uncompress %s." % squashfsimg)
+
+def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"):
+    makedirs(dir)
+    return tempfile.mkdtemp(dir = dir, prefix = prefix)
+
+def ismeego(rootdir):
+    ret = False
+    if (os.path.exists(rootdir + "/etc/moblin-release") \
+       or os.path.exists(rootdir + "/etc/meego-release")) \
+       and os.path.exists(rootdir + "/etc/inittab") \
+       and os.path.exists(rootdir + "/etc/rc.sysinit") \
+       and glob.glob(rootdir + "/boot/vmlinuz-*"):
+        ret = True
+
+    return ret
+
+
+def is_meego_bootstrap(rootdir):
+    ret = False
+    if (os.path.exists(rootdir + "/etc/moblin-release") \
+       or os.path.exists(rootdir + "/etc/meego-release")) \
+       and os.path.exists(rootdir + "/usr/bin/python") \
+       and os.path.exists(rootdir + "/usr/bin/mic-image-creator"):
+        ret = True
+
+    return ret
+
+
+_my_proxies = {}
+_my_noproxy = None
+_my_noproxy_list = []
+
+def set_proxy_environ():
+    global _my_noproxy, _my_proxies
+    if not _my_proxies:
+        return
+    for key in _my_proxies.keys():
+        os.environ[key + "_proxy"] = _my_proxies[key]
+    if not _my_noproxy:
+        return
+    os.environ["no_proxy"] = _my_noproxy
+
+def unset_proxy_environ():
+   if os.environ.has_key("http_proxy"):
+       del os.environ["http_proxy"]
+   if os.environ.has_key("https_proxy"):
+       del os.environ["https_proxy"]
+   if os.environ.has_key("ftp_proxy"):
+       del os.environ["ftp_proxy"]
+   if os.environ.has_key("all_proxy"):
+       del os.environ["all_proxy"]
+   if os.environ.has_key("no_proxy"):
+       del os.environ["no_proxy"]
+   if os.environ.has_key("HTTP_PROXY"):
+       del os.environ["HTTP_PROXY"]
+   if os.environ.has_key("HTTPS_PROXY"):
+       del os.environ["HTTPS_PROXY"]
+   if os.environ.has_key("FTP_PROXY"):
+       del os.environ["FTP_PROXY"]
+   if os.environ.has_key("ALL_PROXY"):
+       del os.environ["ALL_PROXY"]
+   if os.environ.has_key("NO_PROXY"):
+       del os.environ["NO_PROXY"]
+
+def _set_proxies(proxy = None, no_proxy = None):
+    """Return a dictionary of scheme -> proxy server URL mappings."""
+    global _my_noproxy, _my_proxies
+    _my_proxies = {}
+    _my_noproxy = None
+    proxies = []
+    if proxy:
+       proxies.append(("http_proxy", proxy))
+    if no_proxy:
+       proxies.append(("no_proxy", no_proxy))
+
+    """Get proxy settings from environment variables if not provided"""
+    if not proxy and not no_proxy:
+       proxies = os.environ.items()
+
+       """ Remove proxy env variables, urllib2 can't handle them correctly """
+       unset_proxy_environ()
+
+    for name, value in proxies:
+        name = name.lower()
+        if value and name[-6:] == '_proxy':
+            if name[0:2] != "no":
+                _my_proxies[name[:-6]] = value
+            else:
+                _my_noproxy = value
+
+def ip_to_int(ip):
+    ipint=0
+    shift=24
+    for dec in ip.split("."):
+        ipint |= int(dec) << shift
+        shift -= 8
+    return ipint
+
+def int_to_ip(val):
+    ipaddr=""
+    shift=0
+    for i in range(4):
+        dec = val >> shift
+        dec &= 0xff
+        ipaddr = ".%d%s" % (dec, ipaddr)
+        shift += 8
+    return ipaddr[1:]
+
+def isip(host):
+    if host.replace(".", "").isdigit():
+        return True
+    return False
+
+def set_noproxy_list():
+    global _my_noproxy, _my_noproxy_list
+    _my_noproxy_list = []
+    if not _my_noproxy:
+        return
+    for item in _my_noproxy.split(","):
+        item = item.strip()
+        if not item:
+            continue
+        if item[0] != '.' and item.find("/") == -1:
+            """ Need to match it """
+            _my_noproxy_list.append({"match":0,"needle":item})
+        elif item[0] == '.':
+            """ Need to match at tail """
+            _my_noproxy_list.append({"match":1,"needle":item})
+        elif item.find("/") > 3:
+            """ IP/MASK, need to match at head """
+            needle = item[0:item.find("/")].strip()
+            ip = ip_to_int(needle)
+            netmask = 0
+            mask = item[item.find("/")+1:].strip()
+
+            if mask.isdigit():
+                netmask = int(mask)
+                netmask = ~((1<<(32-netmask)) - 1)
+                ip &= netmask
+            else:
+                shift=24
+                netmask=0
+                for dec in mask.split("."):
+                    netmask |= int(dec) << shift
+                    shift -= 8
+                ip &= netmask
+            _my_noproxy_list.append({"match":2,"needle":ip,"netmask":netmask})
+
+def isnoproxy(url):
+    (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+    if '@' in host:
+        user_pass, host = host.split('@', 1)
+    if ':' in host:
+        host, port = host.split(':', 1)
+    hostisip = isip(host)
+    for item in _my_noproxy_list:
+        if hostisip and item["match"] <= 1:
+            continue
+        if item["match"] == 2 and hostisip:
+            if (ip_to_int(host) & item["netmask"]) == item["needle"]:
+                return True
+        if item["match"] == 0:
+            if host == item["needle"]:
+                return True
+        if item["match"] == 1:
+            if host.rfind(item["needle"]) > 0:
+                return True
+    return False
+
+def set_proxies(proxy = None, no_proxy = None):
+    _set_proxies(proxy, no_proxy)
+    set_noproxy_list()
+
+def get_proxy(url):
+    if url[0:4] == "file" or isnoproxy(url):
+        return None
+    type = url[0:url.index(":")]
+    proxy = None
+    if _my_proxies.has_key(type):
+        proxy = _my_proxies[type]
+    elif _my_proxies.has_key("http"):
+        proxy = _my_proxies["http"]
+    else:
+        proxy = None
+    return proxy
+
+def remap_repostr(repostr, siteconf):
+    items = repostr.split(",")
+    name = None
+    baseurl = None
+    for item in items:
+        subitems = item.split(":")
+        if subitems[0] == "name":
+            name = subitems[1]
+        if subitems[0] == "baseurl":
+            baseurl = item[8:]
+    if not baseurl:
+        baseurl = repostr
+
+    for section in siteconf._sections:
+        if section != "main":
+            if not siteconf.has_option(section, "enabled") or siteconf.get(section, "enabled") == "0":
+                continue
+            if siteconf.has_option(section, "equalto"):
+                equalto = siteconf.get(section, "equalto")
+                if (name and equalto == name) or (baseurl and equalto == baseurl):
+                    remap_baseurl = siteconf.get(section, "baseurl")
+                    repostr = repostr.replace(baseurl, remap_baseurl)
+                    return repostr
+
+    return repostr
+
+
+def get_temp_reponame(baseurl):
+    md5obj = hashlib.md5(baseurl)
+    tmpreponame = "%s" % md5obj.hexdigest()
+    return tmpreponame
+
+def get_repostr(repo, siteconf = None):
+    if siteconf:
+        repo = remap_repostr(repo, siteconf)
+    keys = ("baseurl", "mirrorlist", "name", "cost", "includepkgs", "excludepkgs", "proxy", "save", "proxyuser", "proxypasswd", "debuginfo", "source", "gpgkey")
+    repostr = "repo"
+    items = repo.split(",")
+    if len(items) == 1:
+        subitems = items[0].split(":")
+        if len(subitems) == 1:
+            url = subitems[0]
+            repostr += " --baseurl=%s" % url
+        elif subitems[0] == "baseurl":
+            url = items[0][8:]
+            repostr += " --baseurl=%s" % url
+        elif subitems[0] in ("http", "ftp", "https", "ftps", "file"):
+            url = items[0]
+            repostr += " --baseurl=%s" % url
+        else:
+            raise ValueError("Invalid repo string")
+        if url.find("://") == -1 \
+           or url[0:url.index("://")] not in ("http", "ftp", "https", "ftps", "file") \
+           or url.find("/", url.index("://")+3) == -1:
+            raise ValueError("Invalid repo string")
+    else:
+        if repo.find("baseurl:") == -1 and repo.find("mirrorlist:") == -1:
+            raise ValueError("Invalid repo string")
+        url = None
+        for item in items:
+            if not item:
+                continue
+            subitems = item.split(":")
+            if subitems[0] in keys:
+                if subitems[0] in ("baseurl", "mirrorlist"):
+                    url = item[len(subitems[0])+1:]
+                if subitems[0] in ("save", "debuginfo", "source"):
+                    repostr += " --%s" % subitems[0]
+                elif subitems[0] in ("includepkgs", "excludepkgs"):
+                    repostr += " --%s=%s" % (subitems[0], item[len(subitems[0])+1:].replace(";", ","))
+                else:
+                    repostr += " --%s=%s" % (subitems[0], item[len(subitems[0])+1:])
+            else:
+                raise ValueError("Invalid repo string")
+    if url.find("://") != -1 \
+       and url[0:url.index("://")] in ("http", "ftp", "https", "ftps", "file") \
+       and url.find("/", url.index("://")+3) != -1:
+        if repostr.find("--proxy=") == -1:
+            proxy = get_proxy(url)
+            if proxy:
+                repostr += " --proxy=%s" % proxy
+    else:
+        raise ValueError("Invalid repo string")
+
+    if repostr.find("--name=") == -1:
+        repostr += " --name=%s" % get_temp_reponame(url)
+
+    return repostr
+
+DEFAULT_SITECONF_GLOBAL="/etc/mic2/mic2.conf"
+DEFAULT_SITECONF_USER="~/.mic2.conf"
+
+def read_siteconf(siteconf = None):
+    from ConfigParser import SafeConfigParser
+
+    my_siteconf_parser = SafeConfigParser()
+    if not siteconf:
+        global_siteconf = DEFAULT_SITECONF_GLOBAL
+        if os.path.isfile(global_siteconf):
+            my_siteconf_parser.read(global_siteconf)
+
+        local_siteconf = os.path.expanduser(DEFAULT_SITECONF_USER)
+        if os.path.isfile(local_siteconf):
+            my_siteconf_parser.read(local_siteconf)
+    else:
+        my_siteconf_parser.read(siteconf)
+
+    if not my_siteconf_parser.sections():
+        return None
+    else:
+        return my_siteconf_parser
+
+def output_siteconf(siteconf):
+    output = ""
+    if not siteconf:
+        return output
+
+    for section in siteconf.sections():
+        output += "[%s]\n" % section
+        for option in siteconf.options(section):
+            output += "%s=%s\n" % (option, siteconf.get(section, option))
+        output += "\n\n"
+
+    print output
+    return output
+
+def get_repostrs_from_ks(ks):
+    kickstart_repos = []
+    for repodata in ks.handler.repo.repoList:
+        repostr = ""
+        if hasattr(repodata, "name") and repodata.name:
+            repostr += ",name:" + repodata.name
+        if hasattr(repodata, "baseurl") and repodata.baseurl:
+            repostr += ",baseurl:" + repodata.baseurl
+        if hasattr(repodata, "mirrorlist") and repodata.mirrorlist:
+            repostr += ",mirrorlist:" + repodata.mirrorlist
+        if hasattr(repodata, "includepkgs") and repodata.includepkgs:
+            repostr += ",includepkgs:" + ";".join(repodata.includepkgs)
+        if hasattr(repodata, "excludepkgs") and repodata.excludepkgs:
+            repostr += ",excludepkgs:" + ";".join(repodata.excludepkgs)
+        if hasattr(repodata, "cost") and repodata.cost:
+            repostr += ",cost:%d" % repodata.cost
+        if hasattr(repodata, "save") and repodata.save:
+            repostr += ",save:"
+        if hasattr(repodata, "proxy") and repodata.proxy:
+            repostr += ",proxy:" + repodata.proxy
+        if hasattr(repodata, "proxyuser") and repodata.proxy_username:
+            repostr += ",proxyuser:" + repodata.proxy_username
+        if  hasattr(repodata, "proxypasswd") and repodata.proxy_password:
+            repostr += ",proxypasswd:" + repodata.proxy_password
+        if repostr.find("name:") == -1:
+            repostr = ",name:%s" % get_temp_reponame(repodata.baseurl)
+        if hasattr(repodata, "debuginfo") and repodata.debuginfo:
+            repostr += ",debuginfo:"
+        if hasattr(repodata, "source") and repodata.source:
+            repostr += ",source:"
+        if  hasattr(repodata, "gpgkey") and repodata.gpgkey:
+            repostr += ",gpgkey:" + repodata.gpgkey
+        kickstart_repos.append(repostr[1:])
+    return kickstart_repos
+
+def get_repostrs_from_siteconf(siteconf):
+    site_repos = []
+    if not siteconf:
+        return site_repos
+
+    for section in siteconf._sections:
+        if section != "main":
+            repostr = ""
+            if siteconf.has_option(section, "enabled") \
+               and siteconf.get(section, "enabled") == "1" \
+               and (not siteconf.has_option(section, "equalto") or not siteconf.get(section, "equalto")):
+                if siteconf.has_option(section, "name") and siteconf.get(section, "name"):
+                    repostr += ",name:%s" % siteconf.get(section, "name")
+                if siteconf.has_option(section, "baseurl") and siteconf.get(section, "baseurl"):
+                    repostr += ",baseurl:%s" % siteconf.get(section, "baseurl")
+                if siteconf.has_option(section, "mirrorlist") and siteconf.get(section, "mirrorlist"):
+                    repostr += ",mirrorlist:%s" % siteconf.get(section, "mirrorlist")
+                if siteconf.has_option(section, "includepkgs") and siteconf.get(section, "includepkgs"):
+                    repostr += ",includepkgs:%s" % siteconf.get(section, "includepkgs").replace(",", ";")
+                if siteconf.has_option(section, "excludepkgs") and siteconf.get(section, "excludepkgs"):
+                    repostr += ",excludepkgs:%s" % siteconf.get(section, "excludepkgs").replace(",", ";")
+                if siteconf.has_option(section, "cost") and siteconf.get(section, "cost"):
+                    repostr += ",cost:%s" % siteconf.get(section, "cost")
+                if siteconf.has_option(section, "save") and siteconf.get(section, "save"):
+                    repostr += ",save:"
+                if siteconf.has_option(section, "proxy") and siteconf.get(section, "proxy"):
+                    repostr += ",proxy:%s" % siteconf.get(section, "proxy")
+                if siteconf.has_option(section, "proxy_username") and siteconf.get(section, "proxy_username"):
+                    repostr += ",proxyuser:%s" % siteconf.get(section, "proxy_username")
+                if siteconf.has_option(section, "proxy_password") and siteconf.get(section, "proxy_password"):
+                    repostr += ",proxypasswd:%s" % siteconf.get(section, "proxy_password")
+            if repostr != "":
+                if repostr.find("name:") == -1:
+                    repostr = ",name:%s" % get_temp_reponame()
+                site_repos.append(repostr[1:])
+    return site_repos
+
+def get_uncompressed_data_from_url(url, filename, proxies):
+    filename = myurlgrab(url, filename, proxies)
+    suffix = None
+    if filename.endswith(".gz"):
+        suffix = ".gz"
+        gunzip = find_binary_path('gunzip')
+        subprocess.call([gunzip, "-f", filename])
+    elif filename.endswith(".bz2"):
+        suffix = ".bz2"
+        bunzip2 = find_binary_path('bunzip2')
+        subprocess.call([bunzip2, "-f", filename])
+    if suffix:
+        filename = filename.replace(suffix, "")
+    return filename
+
+def get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename):
+    url = str(baseurl + "/" + filename)
+    filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename)))
+    return get_uncompressed_data_from_url(url,filename_tmp,proxies)
+
+def get_metadata_from_repos(repostrs, cachedir):
+    if not cachedir:
+        CreatorError("No cache dir defined.")
+
+    my_repo_metadata = []
+    for repostr in repostrs:
+        reponame = None
+        baseurl = None
+        proxy = None
+        items = repostr.split(",")
+        for item in items:
+            subitems = item.split(":")
+            if subitems[0] == "name":
+                reponame = subitems[1]
+            if subitems[0] == "baseurl":
+                baseurl = item[8:]
+            if subitems[0] == "proxy":
+                proxy = item[6:]
+            if subitems[0] in ("http", "https", "ftp", "ftps", "file"):
+                baseurl = item
+        if not proxy:
+            proxy = get_proxy(baseurl)
+        proxies = None
+        if proxy:
+           proxies = {str(proxy.split(":")[0]):str(proxy)}
+        makedirs(cachedir + "/" + reponame)
+        url = str(baseurl + "/repodata/repomd.xml")
+        filename = str("%s/%s/repomd.xml" % (cachedir, reponame))
+        repomd = myurlgrab(url, filename, proxies)
+        try:
+            root = xmlparse(repomd)
+        except SyntaxError:
+            raise CreatorError("repomd.xml syntax error.")
+
+        ns = root.getroot().tag
+        ns = ns[0:ns.rindex("}")+1]
+
+        patterns = None
+        for elm in root.getiterator("%sdata" % ns):
+            if elm.attrib["type"] == "patterns":
+                patterns = elm.find("%slocation" % ns).attrib['href']
+                break
+
+        comps = None
+        for elm in root.getiterator("%sdata" % ns):
+            if elm.attrib["type"] == "group_gz":
+                comps = elm.find("%slocation" % ns).attrib['href']
+                break
+        if not comps:
+            for elm in root.getiterator("%sdata" % ns):
+                if elm.attrib["type"] == "group":
+                    comps = elm.find("%slocation" % ns).attrib['href']
+                    break
+
+        primary_type = None
+        for elm in root.getiterator("%sdata" % ns):
+            if elm.attrib["type"] == "primary_db":
+                primary_type=".sqlite"
+                break
+
+        if not primary_type:
+            for elm in root.getiterator("%sdata" % ns):
+                if elm.attrib["type"] == "primary":
+                    primary_type=".xml"
+                    break
+
+        if not primary_type:
+            continue
+
+        primary = elm.find("%slocation" % ns).attrib['href']
+        primary = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, primary)
+
+        if patterns:
+            patterns = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, patterns)
+
+        if comps:
+            comps = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, comps)
+
+        """ Get repo key """
+        try:
+            repokey = get_metadata_from_repo(baseurl, proxies, cachedir, reponame, "repodata/repomd.xml.key")
+        except CreatorError:
+            repokey = None
+            print "Warning: can't get %s/%s" % (baseurl, "repodata/repomd.xml.key")
+
+        my_repo_metadata.append({"name":reponame, "baseurl":baseurl, "repomd":repomd, "primary":primary, "cachedir":cachedir, "proxies":proxies, "patterns":patterns, "comps":comps, "repokey":repokey})
+    return my_repo_metadata
+
+def get_arch(repometadata):
+    archlist = []
+    for repo in repometadata:
+        if repo["primary"].endswith(".xml"):
+            root = xmlparse(repo["primary"])
+            ns = root.getroot().tag
+            ns = ns[0:ns.rindex("}")+1]
+            for elm in root.getiterator("%spackage" % ns):
+                if elm.find("%sarch" % ns).text not in ("noarch", "src"):
+                    arch = elm.find("%sarch" % ns).text
+                    if arch not in archlist:
+                        archlist.append(arch)
+        elif repo["primary"].endswith(".sqlite"):
+            con = sqlite.connect(repo["primary"])
+            for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"):
+                if row[0] not in archlist:
+                    archlist.append(row[0])
+
+            con.close()
+    return archlist
+
+
+def get_package(pkg, repometadata, arch = None):
+    ver = ""
+    target_repo = None
+    for repo in repometadata:
+        if repo["primary"].endswith(".xml"):
+            root = xmlparse(repo["primary"])
+            ns = root.getroot().tag
+            ns = ns[0:ns.rindex("}")+1]
+            for elm in root.getiterator("%spackage" % ns):
+                if elm.find("%sname" % ns).text == pkg:
+                    if elm.find("%sarch" % ns).text != "src":
+                        version = elm.find("%sversion" % ns)
+                        tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
+                        if tmpver > ver:
+                            ver = tmpver
+                            location = elm.find("%slocation" % ns)
+                            pkgpath = "%s" % location.attrib['href']
+                            target_repo = repo
+                        break
+        if repo["primary"].endswith(".sqlite"):
+            con = sqlite.connect(repo["primary"])
+            if not arch:
+                for row in con.execute("select version, release,location_href from packages where name = \"%s\" and arch != \"src\"" % pkg):
+                    tmpver = "%s-%s" % (row[0], row[1])
+                    if tmpver > ver:
+                        pkgpath = "%s" % row[2]
+                        target_repo = repo
+                    break
+            else:
+                for row in con.execute("select version, release,location_href from packages where name = \"%s\"" % pkg):
+                    tmpver = "%s-%s" % (row[0], row[1])
+                    if tmpver > ver:
+                        pkgpath = "%s" % row[2]
+                        target_repo = repo
+                    break
+            con.close()
+    if target_repo: 
+        makedirs("%s/%s/packages" % (target_repo["cachedir"], target_repo["name"]))
+        url = str(target_repo["baseurl"] + "/" + pkgpath)
+        filename = str("%s/%s/packages/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
+        pkg = myurlgrab(url, filename, target_repo["proxies"])
+        return pkg
+    else:
+        return None
+
+def get_source_name(pkg, repometadata):
+
+    def get_bin_name(pkg):
+        m = re.match("(.*)-(.*)-(.*)\.(.*)\.rpm", pkg)
+        if m:
+            return m.group(1)
+        return None
+
+    def get_src_name(srpm):
+        m = re.match("(.*)-(\d+.*)-(\d+\.\d+).src.rpm", srpm)
+        if m:
+            return m.group(1)
+        return None
+
+    ver = ""
+    target_repo = None
+
+    pkg_name = get_bin_name(pkg)
+    if not pkg_name:
+        return None
+
+    for repo in repometadata:
+        if repo["primary"].endswith(".xml"):
+            root = xmlparse(repo["primary"])
+            ns = root.getroot().tag
+            ns = ns[0:ns.rindex("}")+1]
+            for elm in root.getiterator("%spackage" % ns):
+                if elm.find("%sname" % ns).text == pkg_name:
+                    if elm.find("%sarch" % ns).text != "src":
+                        version = elm.find("%sversion" % ns)
+                        tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
+                        if tmpver > ver:
+                            ver = tmpver
+                            fmt = elm.find("%sformat" % ns)
+                            if fmt:
+                                fns = fmt.getchildren()[0].tag
+                                fns = fns[0:fns.rindex("}")+1]
+                                pkgpath = fmt.find("%ssourcerpm" % fns).text
+                                target_repo = repo
+                        break
+
+        if repo["primary"].endswith(".sqlite"):
+            con = sqlite.connect(repo["primary"])
+            for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name):
+                tmpver = "%s-%s" % (row[0], row[1])
+                if tmpver > ver:
+                    pkgpath = "%s" % row[2]
+                    target_repo = repo
+                break
+            con.close()
+    if target_repo:
+        return get_src_name(pkgpath)
+    else:
+        return None
+
+def get_release_no(repometadata, distro="meego"):
+    cpio = find_binary_path("cpio")
+    rpm2cpio = find_binary_path("rpm2cpio")
+    release_pkg = get_package("%s-release" % distro, repometadata)
+    if release_pkg:
+        tmpdir = mkdtemp()
+        oldcwd = os.getcwd()
+        os.chdir(tmpdir)
+        p1 = subprocess.Popen([rpm2cpio, release_pkg], stdout = subprocess.PIPE)
+        p2 = subprocess.Popen([cpio, "-idv"], stdin = p1.stdout, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
+        p2.communicate()
+        f = open("%s/etc/%s-release" % (tmpdir, distro), "r")
+        content = f.read()
+        f.close()
+        os.chdir(oldcwd)
+        shutil.rmtree(tmpdir, ignore_errors = True)
+        return content.split(" ")[2]
+    else:
+        return "UNKNOWN"
+
+def get_kickstarts_from_repos(repometadata):
+    kickstarts = []
+    for repo in repometadata:
+        try:
+            root = xmlparse(repo["repomd"])
+        except SyntaxError:
+            raise CreatorError("repomd.xml syntax error.")
+
+        ns = root.getroot().tag
+        ns = ns[0:ns.rindex("}")+1]
+
+        for elm in root.getiterator("%sdata" % ns):
+            if elm.attrib["type"] == "image-config":
+                break
+
+        if elm.attrib["type"] != "image-config":
+            continue
+
+        location = elm.find("%slocation" % ns)
+        image_config = str(repo["baseurl"] + "/" + location.attrib["href"])
+        filename = str("%s/%s/image-config.xml%s" % (repo["cachedir"], repo["name"], suffix))
+
+        image_config = get_uncompressed_data_from_url(image_config,filename,repo["proxies"])
+
+        try:
+            root = xmlparse(image_config)
+        except SyntaxError:
+            raise CreatorError("image-config.xml syntax error.")
+
+        for elm in root.getiterator("config"):
+            path = elm.find("path").text
+            path = path.replace("images-config", "image-config")
+            description = elm.find("description").text
+            makedirs(os.path.dirname("%s/%s/%s" % (repo["cachedir"], repo["name"], path)))
+            url = path
+            if "http" not in path:
+                url = str(repo["baseurl"] + "/" + path)
+            filename = str("%s/%s/%s" % (repo["cachedir"], repo["name"], path))
+            path = myurlgrab(url, filename, repo["proxies"])
+            kickstarts.append({"filename":path,"description":description})
+        return kickstarts
+
+def select_ks(ksfiles):
+    print "Available kickstart files:"
+    i = 0
+    for ks in ksfiles:
+        i += 1
+        print "\t%d. %s (%s)" % (i, ks["description"], os.path.basename(ks["filename"]))
+    while True:
+        choice = raw_input("Please input your choice and press ENTER. [1..%d] ? " % i)
+        if choice.lower() == "q":
+            sys.exit(1)
+        if choice.isdigit():
+            choice = int(choice)
+            if choice >= 1 and choice <= i:
+                break
+
+    return ksfiles[choice-1]["filename"]
+
+
+def get_pkglist_in_patterns(group, patterns):
+    found = False
+    pkglist = []
+    try:
+        root = xmlparse(patterns)
+    except SyntaxError:
+        raise SyntaxError("%s syntax error." % patterns)
+
+    for elm in list(root.getroot()):
+        ns = elm.tag
+        ns = ns[0:ns.rindex("}")+1]
+        name = elm.find("%sname" % ns)
+        summary = elm.find("%ssummary" % ns)
+        if name.text == group or summary.text == group:
+            found = True
+            break
+
+    if not found:
+        return pkglist
+
+    found = False
+    for requires in list(elm):
+        if requires.tag.endswith("requires"):
+            found = True
+            break
+
+    if not found:
+        return pkglist
+
+    for pkg in list(requires):
+        pkgname = pkg.attrib["name"]
+        if pkgname not in pkglist:
+            pkglist.append(pkgname)
+
+    return pkglist
+
+def get_pkglist_in_comps(group, comps):
+    found = False
+    pkglist = []
+    try:
+        root = xmlparse(comps)
+    except SyntaxError:
+        raise SyntaxError("%s syntax error." % comps)
+
+    for elm in root.getiterator("group"):
+        id = elm.find("id")
+        name = elm.find("name")
+        if id.text == group or name.text == group:
+            packagelist = elm.find("packagelist")
+            found = True
+            break
+
+    if not found:
+        return pkglist
+
+    for require in elm.getiterator("packagereq"):
+        if require.tag.endswith("packagereq"):
+            pkgname = require.text
+        if pkgname not in pkglist:
+            pkglist.append(pkgname)
+
+    return pkglist
+
+def is_statically_linked(binary):
+    ret = False
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    filecmd = find_binary_path("file")
+    args = [ filecmd, binary ]
+    file = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=dev_null)
+    output = file.communicate()[0]
+    os.close(dev_null)
+    if output.find(", statically linked, ") > 0:
+        ret = True
+    return ret
+
+def setup_qemu_emulator(rootdir, arch):
+    # mount binfmt_misc if it doesn't exist
+    if not os.path.exists("/proc/sys/fs/binfmt_misc"):
+        modprobecmd = find_binary_path("modprobe")
+        subprocess.call([modprobecmd, "binfmt_misc"])
+    if not os.path.exists("/proc/sys/fs/binfmt_misc/register"):
+        mountcmd = find_binary_path("mount")
+        subprocess.call([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"])
+
+    # qemu_emulator is a special case, we can't use find_binary_path
+    # qemu emulator should be a statically-linked executable file
+    qemu_emulator = "/usr/bin/qemu-arm"
+    if not os.path.exists(qemu_emulator) or not is_statically_linked(qemu_emulator):
+        qemu_emulator = "/usr/bin/qemu-arm-static"
+    if not os.path.exists(qemu_emulator):
+        raise CreatorError("Please install a statically-linked qemu-arm")
+    if not os.path.exists(rootdir + "/usr/bin"):
+        makedirs(rootdir + "/usr/bin")
+    shutil.copy(qemu_emulator, rootdir + qemu_emulator)
+
+    # disable selinux, selinux will block qemu emulator to run
+    if os.path.exists("/usr/sbin/setenforce"):
+        subprocess.call(["/usr/sbin/setenforce", "0"])
+
+    node = "/proc/sys/fs/binfmt_misc/arm"
+    if is_statically_linked(qemu_emulator) and os.path.exists(node):
+        return qemu_emulator
+
+    # unregister it if it has been registered and is a dynamically-linked executable
+    if not is_statically_linked(qemu_emulator) and os.path.exists(node):
+        qemu_unregister_string = "-1\n"
+        fd = open("/proc/sys/fs/binfmt_misc/arm", "w")
+        fd.write(qemu_unregister_string)
+        fd.close()
+
+    # register qemu emulator for interpreting other arch executable file
+    if not os.path.exists(node):
+        qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator
+        fd = open("/proc/sys/fs/binfmt_misc/register", "w")
+        fd.write(qemu_arm_string)
+        fd.close()
+
+    return qemu_emulator
+
+def create_release(config, destdir, name, outimages, release):
+    """ TODO: This functionality should really be in creator.py inside the
+    ImageCreator class. """
+
+    # For virtual machine images, we have a subdir for it, this is unnecessary
+    # for release
+    thatsubdir = None
+    for i in range(len(outimages)):
+        file = outimages[i]
+        if not os.path.isdir(file) and os.path.dirname(file) != destdir:
+            thatsubdir = os.path.dirname(file)
+            newfile = os.path.join(destdir, os.path.basename(file))
+            shutil.move(file, newfile)
+            outimages[i] = newfile
+    if thatsubdir:
+        shutil.rmtree(thatsubdir, ignore_errors = True)
+
+    """ Create release directory and files """
+    os.system ("cp %s %s/%s.ks" % (config, destdir, name))
+    # When building a release we want to make sure the .ks 
+    # file generates the same build even when --release= is not used.
+    fd = open(config, "r")
+    kscont = fd.read()
+    fd.close()
+    kscont = kscont.replace("@BUILD_ID@",release)
+    fd = open("%s/%s.ks" % (destdir,name), "w")
+    fd.write(kscont)
+    fd.close()
+    outimages.append("%s/%s.ks" % (destdir,name))
+
+    # Using system + mv, because of * in filename.
+    os.system ("mv %s/*-pkgs.txt %s/%s.packages" % (destdir, destdir, name))
+    outimages.append("%s/%s.packages" % (destdir,name))
+
+    d = os.listdir(destdir)
+    for f in d:
+        if f.endswith(".iso"):
+            ff = f.replace(".iso", ".img")
+            os.rename("%s/%s" %(destdir, f ), "%s/%s" %(destdir, ff))
+            outimages.append("%s/%s" %(destdir, ff))
+        elif f.endswith(".usbimg"):
+            ff = f.replace(".usbimg", ".img")
+            os.rename("%s/%s" %(destdir, f ), "%s/%s" %(destdir, ff))
+            outimages.append("%s/%s" %(destdir, ff))
+
+    fd = open(destdir + "/MANIFEST", "w")
+    d = os.listdir(destdir)
+    for f in d:
+        if f == "MANIFEST":
+            continue
+        if os.path.exists("/usr/bin/md5sum"):
+            p = subprocess.Popen(["/usr/bin/md5sum", "-b", "%s/%s" %(destdir, f )],
+                             stdout=subprocess.PIPE)
+            (md5sum, errorstr) = p.communicate()
+            if p.returncode != 0:
+                logging.warning("Can't generate md5sum for image %s/%s" %(destdir, f ))
+            else:
+                md5sum = md5sum.split(" ")[0]
+                fd.write(md5sum+" "+f+"\n")
+
+    outimages.append("%s/MANIFEST" % destdir)
+    fd.close()
+
+    """ Update the file list. """
+    updated_list = []
+    for file in outimages:
+        if os.path.exists("%s" % file):
+            updated_list.append(file)
+
+    return updated_list
+
+def get_local_distro():
+    print "Local linux distribution:"
+    for file in glob.glob("/etc/*-release"):
+        fd = open(file, "r")
+        content = fd.read()
+        fd.close()
+        print content
+    if os.path.exists("/etc/issue"):
+        fd = open("/etc/issue", "r")
+        content = fd.read()
+        fd.close()
+        print content
+    print "Local Kernel version: " + os.uname()[2]
+
+def check_mic_installation(argv):
+    creator_name = os.path.basename(argv[0])
+    if os.path.exists("/usr/local/bin/" + creator_name) \
+        and os.path.exists("/usr/bin/" + creator_name):
+        raise CreatorError("There are two mic2 installations existing, this will result in some unpredictable errors, the reason is installation path of mic2 binary is different from  installation path of mic2 source on debian-based distros, please remove one of them to ensure it can work normally.")
+
+def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir):
+
+    def get_source_repometadata(repometadata):
+        src_repometadata=[]
+        for repo in repometadata:
+            if repo["name"].endswith("-source"):
+                src_repometadata.append(repo)
+        if src_repometadata:
+            return src_repometadata
+        return None
+
+    def get_src_name(srpm):
+        m = re.match("(.*)-(\d+.*)-(\d+\.\d+).src.rpm", srpm)
+        if m:
+            return m.group(1)
+        return None    
+
+    src_repometadata = get_source_repometadata(repometadata)
+
+    if not src_repometadata:
+        print "No source repo found"
+        return None
+
+    src_pkgs = []
+    lpkgs_dict = {}
+    lpkgs_path = []
+    for repo in src_repometadata:
+        cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"])
+        lpkgs_path += glob.glob(cachepath)
+    
+    for lpkg in lpkgs_path:
+        lpkg_name = get_src_name(os.path.basename(lpkg))
+        lpkgs_dict[lpkg_name] = lpkg
+    localpkgs = lpkgs_dict.keys()
+    
+    cached_count = 0
+    destdir = instroot+'/usr/src/SRPMS'
+    if not os.path.exists(destdir):
+        os.makedirs(destdir)
+    
+    srcpkgset = set()
+    for _pkg in pkgs:
+        srcpkg_name = get_source_name(_pkg, repometadata)
+        if not srcpkg_name:
+            return None
+        srcpkgset.add(srcpkg_name)
+    
+    for pkg in list(srcpkgset):
+        if pkg in localpkgs:
+            cached_count += 1
+            shutil.copy(lpkgs_dict[pkg], destdir)
+            src_pkgs.append(os.path.basename(lpkgs_dict[pkg]))
+        else:
+            src_pkg = get_package(pkg, src_repometadata, 'src')
+            if src_pkg:
+                shutil.copy(src_pkg, destdir)            
+                src_pkgs.append(src_pkg)
+    print '--------------------------------------------------'
+    print "%d source packages gotten from cache" %cached_count
+
+    return src_pkgs
+
+def add_optparser(arg):
+    def decorate(f):
+        if not hasattr(f, "optparser"):
+            f.optparser = arg
+        return f
+    return decorate
diff --git a/micng/utils/pkgmanagers/__init__.py b/micng/utils/pkgmanagers/__init__.py
new file mode 100644 (file)
index 0000000..c18877e
--- /dev/null
@@ -0,0 +1,55 @@
+#!/usr/bin/python
+
+import os
+from micng.utils.errors import *
+
+class pkgManager:
+    def __init__(self):
+        self.managers = {}
+        self.default_pkg_manager = None
+
+    def register_pkg_manager(self, name, manager):
+#        print "Registering package manager: %s" % name
+        if not self.managers.has_key(name):
+            self.managers[name] = manager
+        
+    def unregister_pkg_manager(self, name):
+        if self.managers.has_key(name):
+            del self.managers[name]
+
+    def set_default_pkg_manager(self, name):
+        if self.managers.has_key(name):
+            self.default_pkg_manager = self.managers[name]
+            print "Use package manager %s" % name
+
+    def get_default_pkg_manager(self):
+        if self.default_pkg_manager:
+            return self.default_pkg_manager
+        else:
+            if self.managers.has_key("zypp"):
+                print "Use package manager zypp"
+                return self.managers["zypp"]
+            elif self.managers.has_key("yum"):
+                print "Use package manager yum"
+                return self.managers["yum"]
+            else:
+                keys = self.managers.keys()
+                if keys:
+                    print "Use package manager %s" % keys[0]
+                    return self.managers[keys[0]]
+                else:
+                    return None
+
+    def load_pkg_managers(self):
+        mydir = os.path.dirname(os.path.realpath(__file__))
+        for file in os.listdir(mydir):
+            if os.path.isfile(mydir + "/" + file) and file.endswith(".py") and file != "__init__.py":
+                pkgmgrmod = file[:file.rfind(".py")]
+                try:
+                    exec("import micng.utils.pkgmanagers.%s as %s " % (pkgmgrmod, pkgmgrmod))
+                    exec("pkgmgr = %s._pkgmgr" % pkgmgrmod)
+                    self.register_pkg_manager(pkgmgr[0], pkgmgr[1])
+                except:
+                    continue
+        if not self.managers.keys():
+            raise CreatorError("No packag manager available")
diff --git a/micng/utils/pkgmanagers/yumpkgmgr.py b/micng/utils/pkgmanagers/yumpkgmgr.py
new file mode 100644 (file)
index 0000000..0f7e2ee
--- /dev/null
@@ -0,0 +1,448 @@
+#
+# yum.py : yum utilities
+#
+# Copyright 2007, Red Hat  Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+import glob
+import os
+import sys
+import logging
+
+import yum
+import rpmUtils
+import pykickstart.parser
+
+import urlparse
+import urllib2 as u2
+import tempfile
+import shutil
+import subprocess
+
+from micng.utils.errors import *
+from micng.utils.fs_related import *
+from micng.imager.BaseImageCreator import ImageCreator
+
+class MyYumRepository(yum.yumRepo.YumRepository):
+    def __init__(self, repoid):
+        yum.yumRepo.YumRepository.__init__(self, repoid)
+        self.sslverify = False
+
+    def _setupGrab(self):
+        self.sslverify = False
+        yum.yumRepo.YumRepository._setupGrab(self)
+
+    def __del__(self):
+        pass
+
+class Yum(yum.YumBase):
+    def __init__(self, creator = None, recording_pkgs=None):
+        if not isinstance(creator, ImageCreator):
+            raise CreatorError("Invalid argument: creator")
+        yum.YumBase.__init__(self)
+        
+        self.creator = creator
+        
+        if self.creator.target_arch:
+            if rpmUtils.arch.arches.has_key(self.creator.target_arch):
+                self.arch.setup_arch(self.creator.target_arch)
+            else:
+                raise CreatorError("Invalid target arch: %s" % self.creator.target_arch)
+
+        self.__recording_pkgs = recording_pkgs
+        self.__pkgs_content = {}
+
+    def doFileLogSetup(self, uid, logfile):
+        # don't do the file log for the livecd as it can lead to open fds
+        # being left and an inability to clean up after ourself
+        pass
+
+    def close(self):
+        try:
+            os.unlink(self.conf.installroot + "/yum.conf")
+        except:
+            pass
+        self.closeRpmDB()
+        yum.YumBase.close(self)
+        self._delRepos()
+        self._delSacks()
+
+        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
+            for i in range(3, os.sysconf("SC_OPEN_MAX")):
+                try:
+                    os.close(i)
+                except:
+                    pass
+
+    def __del__(self):
+        pass
+
+    def _writeConf(self, confpath, installroot):
+        conf  = "[main]\n"
+        conf += "installroot=%s\n" % installroot
+        conf += "cachedir=/var/cache/yum\n"
+        conf += "plugins=0\n"
+        conf += "reposdir=\n"
+        conf += "failovermethod=priority\n"
+        conf += "http_caching=packages\n"
+        conf += "sslverify=0\n"
+
+        f = file(confpath, "w+")
+        f.write(conf)
+        f.close()
+
+        os.chmod(confpath, 0644)
+
+    def _cleanupRpmdbLocks(self, installroot):
+        # cleans up temporary files left by bdb so that differing
+        # versions of rpm don't cause problems
+        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
+            os.unlink(f)
+
+    def setup(self, confpath, installroot):
+        self._writeConf(confpath, installroot)
+        self._cleanupRpmdbLocks(installroot)
+        self.doConfigSetup(fn = confpath, root = installroot)
+        self.conf.cache = 0
+        self.doTsSetup()
+        self.doRpmDBSetup()
+        self.doRepoSetup()
+        self.doSackSetup()
+
+    def selectPackage(self, pkg):
+        """Select a given package.  Can be specified with name.arch or name*"""
+        try:
+            self.install(pattern = pkg)
+            return None
+        except yum.Errors.InstallError, e:
+            return e
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def deselectPackage(self, pkg):
+        """Deselect package.  Can be specified as name.arch or name*"""
+        sp = pkg.rsplit(".", 2)
+        txmbrs = []
+        if len(sp) == 2:
+            txmbrs = self.tsInfo.matchNaevr(name=sp[0], arch=sp[1])
+
+        if len(txmbrs) == 0:
+            exact, match, unmatch = yum.packages.parsePackages(self.pkgSack.returnPackages(), [pkg], casematch=1)
+            for p in exact + match:
+                txmbrs.append(p)
+
+        if len(txmbrs) > 0:
+            for x in txmbrs:
+                self.tsInfo.remove(x.pkgtup)
+                # we also need to remove from the conditionals
+                # dict so that things don't get pulled back in as a result
+                # of them.  yes, this is ugly.  conditionals should die.
+                for req, pkgs in self.tsInfo.conditionals.iteritems():
+                    if x in pkgs:
+                        pkgs.remove(x)
+                        self.tsInfo.conditionals[req] = pkgs
+        else:
+            logging.warn("No such package %s to remove" %(pkg,))
+
+    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
+        try:
+            yum.YumBase.selectGroup(self, grp)
+            if include == pykickstart.parser.GROUP_REQUIRED:
+                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
+            elif include == pykickstart.parser.GROUP_ALL:
+                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
+            return None
+        except (yum.Errors.InstallError, yum.Errors.GroupsError), e:
+            return e
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def __checkAndDownloadURL(self, u2opener, url, savepath):
+        try:
+            if u2opener:
+                f = u2opener.open(url)
+            else:
+                f = u2.urlopen(url)
+        except u2.HTTPError, httperror:
+            if httperror.code in (404, 503):
+                return None
+            else:
+                raise CreatorError(httperror)
+        except OSError, oserr:
+            if oserr.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except IOError, oserr:
+            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except u2.URLError, err:
+            raise CreatorError(err)
+
+        # save to file
+        licf = open(savepath, "w")
+        licf.write(f.read())
+        licf.close()
+        f.close()
+
+        return savepath
+
+    def __pagerFile(self, savepath):
+        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
+            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
+        else:
+            pagers = ('less', 'more')
+
+        file_showed = None
+        for pager in pagers:
+            try:
+                subprocess.call([pager, savepath])
+            except OSError:
+                continue
+            else:
+                file_showed = True
+                break
+        if not file_showed:
+            f = open(savepath)
+            print f.read()
+            f.close()
+            raw_input('press <ENTER> to continue...')
+
+    def checkRepositoryEULA(self, name, repo):
+        """ This function is to check the LICENSE file if provided. """
+
+        # when proxy needed, make urllib2 follow it
+        proxy = repo.proxy
+        proxy_username = repo.proxy_username
+        proxy_password = repo.proxy_password
+
+        handlers = []
+        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
+        u2opener = None
+        if proxy:
+            if proxy_username:
+                proxy_netloc = urlparse.urlsplit(proxy).netloc
+                if proxy_password:
+                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
+                else:
+                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
+            else:
+                proxy_url = proxy
+
+            proxy_support = u2.ProxyHandler({'http': proxy_url,
+                                             'ftp': proxy_url})
+            handlers.append(proxy_support)
+
+        # download all remote files to one temp dir
+        baseurl = None
+        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
+
+        for url in repo.baseurl:
+            if not url.endswith('/'):
+                url += '/'
+            tmphandlers = handlers
+            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+            if scheme not in ("http", "https", "ftp", "ftps", "file"):
+                raise CreatorError("Error: invalid url %s" % url)
+            if '@' in host:
+                try:
+                    user_pass, host = host.split('@', 1)
+                    if ':' in user_pass:
+                        user, password = user_pass.split(':', 1)
+                except ValueError, e:
+                    raise CreatorError('Bad URL: %s' % url)
+                print "adding HTTP auth: %s, %s" %(user, password)
+                auth_handler.add_password(None, host, user, password)
+                tmphandlers.append(auth_handler)
+                url = scheme + "://" + host + path + parm + query + frag
+            if len(tmphandlers) != 0:
+                u2opener = u2.build_opener(*tmphandlers)
+            # try to download
+            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
+            repo_eula_path = self.__checkAndDownloadURL(
+                                    u2opener,
+                                    repo_eula_url,
+                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
+            if repo_eula_path:
+                # found
+                baseurl = url
+                break
+
+        if not baseurl:
+            return True
+
+        # show the license file
+        print 'For the software packages in this yum repo:'
+        print '    %s: %s' % (name, baseurl)
+        print 'There is an "End User License Agreement" file that need to be checked.'
+        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
+        raw_input('press <ENTER> to continue...')
+
+        self.__pagerFile(repo_eula_path)
+
+        # Asking for the "Accept/Decline"
+        accept = True
+        while accept:
+            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
+            if input_accept.upper() in ('YES', 'Y'):
+                break
+            elif input_accept.upper() in ('NO', 'N'):
+                accept = None
+                print 'Will not install pkgs from this repo.'
+
+        if not accept:
+            #cleanup
+            shutil.rmtree(repo_lic_dir)
+            return None
+
+        # try to find support_info.html for extra infomation
+        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
+        repo_info_path = self.__checkAndDownloadURL(
+                                u2opener,
+                                repo_info_url,
+                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
+        if repo_info_path:
+            print 'There is one more file in the repo for additional support information, please read it'
+            raw_input('press <ENTER> to continue...')
+            self.__pagerFile(repo_info_path)
+
+        #cleanup
+        shutil.rmtree(repo_lic_dir)
+        return True
+
+    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
+        def _varSubstitute(option):
+            # takes a variable and substitutes like yum configs do
+            option = option.replace("$basearch", rpmUtils.arch.getBaseArch())
+            option = option.replace("$arch", rpmUtils.arch.getCanonArch())
+            return option
+
+        repo = MyYumRepository(name)
+        repo.sslverify = False
+
+        """Set proxy"""
+        repo.proxy = proxy
+        repo.proxy_username = proxy_username
+        repo.proxy_password = proxy_password
+
+        if url:
+            repo.baseurl.append(_varSubstitute(url))
+
+        # check LICENSE files
+        if not self.checkRepositoryEULA(name, repo):
+            return None
+
+        if mirrorlist:
+            repo.mirrorlist = _varSubstitute(mirrorlist)
+        conf = yum.config.RepoConf()
+        for k, v in conf.iteritems():
+            if v or not hasattr(repo, k):
+                repo.setAttribute(k, v)
+        repo.basecachedir = self.conf.cachedir
+        repo.failovermethod = "priority"
+        repo.metadata_expire = 0
+        # Enable gpg check for verifying corrupt packages
+        repo.gpgcheck = 1
+        repo.enable()
+        repo.setup(0)
+        repo.setCallback(TextProgress())
+        self.repos.add(repo)
+        return repo
+
+    def installHasFile(self, file):
+        provides_pkg = self.whatProvides(file, None, None)
+        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
+        for p in dlpkgs:
+            for q in provides_pkg:
+                if (p == q):
+                    return True
+        return False
+
+    def runInstall(self, checksize = 0):
+        os.environ["HOME"] = "/"
+        try:
+            (res, resmsg) = self.buildTransaction()
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" %(e,))
+        if res != 2:
+            raise CreatorError("Failed to build transaction : %s" % str.join("\n", resmsg))
+
+        dlpkgs = map(lambda x: x.po, filter(lambda txmbr: txmbr.ts_state in ("i", "u"), self.tsInfo.getMembers()))
+
+        # record the total size of installed pkgs
+        pkgs_total_size = sum(map(lambda x: int(x.size), dlpkgs))
+
+        # check needed size before actually download and install
+        if checksize and pkgs_total_size > checksize:
+            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
+
+        if self.__recording_pkgs:
+            # record all pkg and the content
+            for pkg in dlpkgs:
+                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name, pkg.printVer(), pkg.arch)
+                self.__pkgs_content[pkg_long_name] = pkg.files
+
+        total_count = len(dlpkgs)
+        cached_count = 0
+        print "Checking packages cache and packages integrity..."
+        for po in dlpkgs:
+            local = po.localPkg()
+            if not os.path.exists(local):
+                continue
+            if not self.verifyPkg(local, po, False):
+                print "Package %s is damaged: %s" % (os.path.basename(local), local)
+            else:
+                cached_count +=1
+        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
+        try:
+            self.downloadPkgs(dlpkgs)
+            # FIXME: sigcheck?
+    
+            self.initActionTs()
+            self.populateTs(keepold=0)
+            deps = self.ts.check()
+            if len(deps) != 0:
+                """ This isn't fatal, Ubuntu has this issue but it is ok. """
+                print deps
+                logging.warn("Dependency check failed!")
+            rc = self.ts.order()
+            if rc != 0:
+                raise CreatorError("ordering packages for installation failed!")
+    
+            # FIXME: callback should be refactored a little in yum
+            sys.path.append('/usr/share/yum-cli')
+            import callback
+            cb = callback.RPMInstallCallback()
+            cb.tsInfo = self.tsInfo
+            cb.filelog = False
+            ret = self.runTransaction(cb)
+            print ""
+            self._cleanupRpmdbLocks(self.conf.installroot)
+            return ret
+        except yum.Errors.RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except yum.Errors.YumBaseError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def getAllContent(self):
+        return self.__pkgs_content
+
+_pkgmgr = ["yum", Yum]
diff --git a/micng/utils/pkgmanagers/zypppkgmgr.py b/micng/utils/pkgmanagers/zypppkgmgr.py
new file mode 100644 (file)
index 0000000..49bc88f
--- /dev/null
@@ -0,0 +1,752 @@
+#!/usr/bin/python
+
+import os
+import sys
+import glob
+import re
+import zypp
+import rpm
+import shutil
+import tempfile
+import urlparse
+import urllib2 as u2
+from micng.utils.errors import *
+from micng.imager.BaseImageCreator import ImageCreator
+import pykickstart.parser
+from micng.utils.fs_related import *
+from micng.utils.misc import *
+from micng.utils.rpmmisc import *
+
+class RepositoryStub:
+    def __init__(self):
+        self.name = None
+        self.baseurl = []
+        self.mirrorlist = None
+        self.proxy = None
+        self.proxy_username = None
+        self.proxy_password = None
+        self.includepkgs = None
+        self.includepkgs = None
+        self.exclude = None
+
+        self.enabled = True
+        self.autorefresh = True
+        self.keeppackages = True
+
+class RepoError(CreatorError):
+    pass
+
+class RpmError(CreatorError):
+    pass
+
+class Zypp:
+    def __init__(self, creator = None, recording_pkgs=None):
+        if not isinstance(creator, ImageCreator):
+            raise CreatorError("Invalid argument: creator")
+
+        self.__recording_pkgs = recording_pkgs
+        self.__pkgs_content = {}
+        self.creator = creator
+        self.repos = []
+        self.packages = []
+        self.patterns = []
+        self.localpkgs = {}
+        self.repo_manager = None
+        self.repo_manager_options = None
+        self.Z = None
+        self.ts = None
+        self.probFilterFlags = []
+        self.bin_rpm = find_binary_path("rpm")
+        self.incpkgs = []
+        self.excpkgs = []
+
+    def doFileLogSetup(self, uid, logfile):
+        # don't do the file log for the livecd as it can lead to open fds
+        # being left and an inability to clean up after ourself
+        pass
+
+    def closeRpmDB(self):
+        pass
+
+    def close(self):
+        try:
+            os.unlink(self.installroot + "/yum.conf")
+        except:
+            pass
+        self.closeRpmDB()
+        if not os.path.exists("/etc/fedora-release") and not os.path.exists("/etc/meego-release"):
+            for i in range(3, os.sysconf("SC_OPEN_MAX")):
+                try:
+                    os.close(i)
+                except:
+                    pass
+        if self.ts:
+            self.ts.closeDB()
+            self.ts = None
+
+    def __del__(self):
+        self.close()
+
+    def _writeConf(self, confpath, installroot):
+        conf  = "[main]\n"
+        conf += "installroot=%s\n" % installroot
+        conf += "cachedir=/var/cache/yum\n"
+        conf += "plugins=0\n"
+        conf += "reposdir=\n"
+        conf += "failovermethod=priority\n"
+        conf += "http_caching=packages\n"
+
+        f = file(confpath, "w+")
+        f.write(conf)
+        f.close()
+
+        os.chmod(confpath, 0644)
+
+    def _cleanupRpmdbLocks(self, installroot):
+        # cleans up temporary files left by bdb so that differing
+        # versions of rpm don't cause problems
+        for f in glob.glob(installroot + "/var/lib/rpm/__db*"):
+            os.unlink(f)
+
+    def setup(self, confpath, installroot):
+        self._writeConf(confpath, installroot)
+        self._cleanupRpmdbLocks(installroot)
+        self.installroot = installroot
+
+    def selectPackage(self, pkg):
+        """ Select a given package or package pattern, can be specified with name.arch or name* or *name """
+        if not self.Z:
+            self.__initialize_zypp()
+        
+        found = False
+        startx = pkg.startswith("*")
+        endx = pkg.endswith("*")
+        ispattern = startx or endx
+        sp = pkg.rsplit(".", 2)
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "package":
+                name = "%s" % item.name()
+                if not ispattern:
+                    if name in self.incpkgs or self.excpkgs:
+                        found = True
+                        break
+                    if len(sp) == 2:
+                        arch = "%s" % item.arch()
+                        if name == sp[0] and arch == sp[1]:
+                            found = True
+                            if name not in self.packages:
+                                self.packages.append(name)
+                                item.status().setToBeInstalled (zypp.ResStatus.USER)
+                            break
+                    else:
+                        if name == sp[0]:
+                            found = True
+                            if name not in self.packages:
+                                self.packages.append(name)
+                                item.status().setToBeInstalled (zypp.ResStatus.USER)
+                            break
+                else:
+                    if name in self.incpkgs or self.excpkgs:
+                        found =  True
+                        continue
+                    if startx and name.endswith(sp[0][1:]):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+
+                    if endx and name.startswith(sp[0][:-1]):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+        if found:
+            return None
+        else:
+            e = CreatorError("Unable to find package: %s" % (pkg,))
+            return e
+
+    def deselectPackage(self, pkg):
+        """Deselect package.  Can be specified as name.arch or name*"""
+        
+        if not self.Z:
+            self.__initialize_zypp()
+        
+        startx = pkg.startswith("*")
+        endx = pkg.endswith("*")
+        ispattern = startx or endx
+        sp = pkg.rsplit(".", 2)
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "package":
+                name = "%s" % item.name()
+                if not ispattern:
+                    if len(sp) == 2:
+                        arch = "%s" % item.arch()
+                        if name == sp[0] and arch == sp[1]:
+                            if item.status().isToBeInstalled():
+                                item.status().resetTransact(zypp.ResStatus.USER)
+                            if name in self.packages:
+                                self.packages.remove(name)
+                            break
+                    else:
+                        if name == sp[0]:
+                            if item.status().isToBeInstalled():
+                                item.status().resetTransact(zypp.ResStatus.USER)
+                            if name in self.packages:
+                                self.packages.remove(name)
+                            break                             
+                else:
+                    if startx and name.endswith(sp[0][1:]):
+                        if item.status().isToBeInstalled():
+                            item.status().resetTransact(zypp.ResStatus.USER)
+                        if name in self.packages:
+                            self.packages.remove(name)
+
+                    if endx and name.startswith(sp[0][:-1]):
+                        if item.status().isToBeInstalled():
+                            item.status().resetTransact(zypp.ResStatus.USER)
+                        if name in self.packages:
+                            self.packages.remove(name)
+    
+    def __selectIncpkgs(self):        
+        found = False
+        for pkg in self.incpkgs:
+            for item in self.Z.pool():
+                kind = "%s" % item.kind()
+                if kind == "package":
+                    name = "%s" % item.name()
+                    repoalias = "%s" % item.repoInfo().alias()
+                    if name == pkg and repoalias.endswith("include"):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+                        break         
+        if not found:
+            raise CreatorError("Unable to find package: %s" % (pkg,))
+    
+    def __selectExcpkgs(self):    
+        found = False        
+        for pkg in self.excpkgs:
+            for item in self.Z.pool():
+                kind = "%s" % item.kind()
+                if kind == "package":
+                    name = "%s" % item.name()
+                    repoalias = "%s" % item.repoInfo().alias()
+                    if name == pkg and not repoalias.endswith("exclude"):
+                        found = True
+                        if name not in self.packages:
+                            self.packages.append(name)
+                            item.status().setToBeInstalled (zypp.ResStatus.USER)
+                        break                     
+        if not found:
+            raise CreatorError("Unable to find package: %s" % (pkg,))
+
+        
+    def selectGroup(self, grp, include = pykickstart.parser.GROUP_DEFAULT):
+        if not self.Z:
+            self.__initialize_zypp()
+        found = False
+        for item in self.Z.pool():
+            kind = "%s" % item.kind()
+            if kind == "pattern":
+                summary = "%s" % item.summary()
+                name = "%s" % item.name()
+                if name == grp or summary == grp:
+                    found = True
+                    if name not in self.patterns:
+                        self.patterns.append(name)
+                        item.status().setToBeInstalled (zypp.ResStatus.USER)
+                    break
+                
+        if found:
+            if include == pykickstart.parser.GROUP_REQUIRED:
+                map(lambda p: self.deselectPackage(p), grp.default_packages.keys())
+            elif include == pykickstart.parser.GROUP_ALL:
+                map(lambda p: self.selectPackage(p), grp.optional_packages.keys())
+            return None
+        else:
+            e = CreatorError("Unable to find pattern: %s" % (grp,))
+            return e
+
+    def __checkAndDownloadURL(self, u2opener, url, savepath):
+        try:
+            if u2opener:
+                f = u2opener.open(url)
+            else:
+                f = u2.urlopen(url)
+        except u2.HTTPError, httperror:
+            if httperror.code in (404, 503):
+                return None
+            else:
+                raise CreatorError(httperror)
+        except OSError, oserr:
+            if oserr.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except IOError, oserr:
+            if hasattr(oserr, "reason") and oserr.reason.errno == 2:
+                return None
+            else:
+                raise CreatorError(oserr)
+        except u2.URLError, err:
+            raise CreatorError(err)
+
+        # save to file
+        licf = open(savepath, "w")
+        licf.write(f.read())
+        licf.close()
+        f.close()
+
+        return savepath
+
+    def __pagerFile(self, savepath):
+        if os.path.splitext(savepath)[1].upper() in ('.HTM', '.HTML'):
+            pagers = ('w3m', 'links', 'lynx', 'less', 'more')
+        else:
+            pagers = ('less', 'more')
+
+        file_showed = None
+        for pager in pagers:
+            try:
+                subprocess.call([pager, savepath])
+            except OSError:
+                continue
+            else:
+                file_showed = True
+                break
+        if not file_showed:
+            f = open(savepath)
+            print f.read()
+            f.close()
+            raw_input('press <ENTER> to continue...')
+
+    def checkRepositoryEULA(self, name, repo):
+        """ This function is to check the LICENSE file if provided. """
+
+        # when proxy needed, make urllib2 follow it
+        proxy = repo.proxy
+        proxy_username = repo.proxy_username
+        proxy_password = repo.proxy_password
+
+        handlers = []
+        auth_handler = u2.HTTPBasicAuthHandler(u2.HTTPPasswordMgrWithDefaultRealm())
+        u2opener = None
+        if proxy:
+            if proxy_username:
+                proxy_netloc = urlparse.urlsplit(proxy).netloc
+                if proxy_password:
+                    proxy_url = 'http://%s:%s@%s' % (proxy_username, proxy_password, proxy_netloc)
+                else:
+                    proxy_url = 'http://%s@%s' % (proxy_username, proxy_netloc)
+            else:
+                proxy_url = proxy
+
+            proxy_support = u2.ProxyHandler({'http': proxy_url,
+                                             'ftp': proxy_url})
+            handlers.append(proxy_support)
+
+        # download all remote files to one temp dir
+        baseurl = None
+        repo_lic_dir = tempfile.mkdtemp(prefix = 'repolic')
+
+        for url in repo.baseurl:
+            if not url.endswith('/'):
+                url += '/'
+            tmphandlers = handlers
+            (scheme, host, path, parm, query, frag) = urlparse.urlparse(url)
+            if scheme not in ("http", "https", "ftp", "ftps", "file"):
+                raise CreatorError("Error: invalid url %s" % url)
+            if '@' in host:
+                try:
+                    user_pass, host = host.split('@', 1)
+                    if ':' in user_pass:
+                        user, password = user_pass.split(':', 1)
+                except ValueError, e:
+                    raise CreatorError('Bad URL: %s' % url)
+                print "adding HTTP auth: %s, %s" %(user, password)
+                auth_handler.add_password(None, host, user, password)
+                tmphandlers.append(auth_handler)
+                url = scheme + "://" + host + path + parm + query + frag
+            if len(tmphandlers) != 0:
+                u2opener = u2.build_opener(*tmphandlers)
+            # try to download
+            repo_eula_url = urlparse.urljoin(url, "LICENSE.txt")
+            repo_eula_path = self.__checkAndDownloadURL(
+                                    u2opener,
+                                    repo_eula_url,
+                                    os.path.join(repo_lic_dir, repo.id + '_LICENSE.txt'))
+            if repo_eula_path:
+                # found
+                baseurl = url
+                break
+
+        if not baseurl:
+            return True
+
+        # show the license file
+        print 'For the software packages in this yum repo:'
+        print '    %s: %s' % (name, baseurl)
+        print 'There is an "End User License Agreement" file that need to be checked.'
+        print 'Please read the terms and conditions outlined in it and answer the followed qustions.'
+        raw_input('press <ENTER> to continue...')
+
+        self.__pagerFile(repo_eula_path)
+
+        # Asking for the "Accept/Decline"
+        accept = True
+        while accept:
+            input_accept = raw_input('Would you agree to the terms and conditions outlined in the above End User License Agreement? (Yes/No): ')
+            if input_accept.upper() in ('YES', 'Y'):
+                break
+            elif input_accept.upper() in ('NO', 'N'):
+                accept = None
+                print 'Will not install pkgs from this repo.'
+
+        if not accept:
+            #cleanup
+            shutil.rmtree(repo_lic_dir)
+            return None
+
+        # try to find support_info.html for extra infomation
+        repo_info_url = urlparse.urljoin(baseurl, "support_info.html")
+        repo_info_path = self.__checkAndDownloadURL(
+                                u2opener,
+                                repo_info_url,
+                                os.path.join(repo_lic_dir, repo.id + '_support_info.html'))
+        if repo_info_path:
+            print 'There is one more file in the repo for additional support information, please read it'
+            raw_input('press <ENTER> to continue...')
+            self.__pagerFile(repo_info_path)
+
+        #cleanup
+        shutil.rmtree(repo_lic_dir)
+        return True
+
+    def addRepository(self, name, url = None, mirrorlist = None, proxy = None, proxy_username = None, proxy_password = None, inc = None, exc = None):
+        if not self.repo_manager:
+            self.__initialize_repo_manager()
+
+        repo = RepositoryStub()
+        repo.name = name
+        repo.id = name
+        repo.proxy = proxy
+        repo.proxy_username = proxy_username
+        repo.proxy_password = proxy_password
+        repo.baseurl.append(url)
+        repo_alias = repo.id
+        if inc:
+            repo_alias = name + "include"
+            self.incpkgs = inc
+        if exc:
+            repo_alias = name + "exclude"
+            self.excpkgs = exc
+
+        # check LICENSE files
+        if not self.checkRepositoryEULA(name, repo):
+            return None
+
+        if mirrorlist:
+            repo.mirrorlist = mirrorlist
+
+        # Enable gpg check for verifying corrupt packages
+        repo.gpgcheck = 1
+        self.repos.append(repo)
+
+
+        try:
+            repo_info = zypp.RepoInfo()
+            repo_info.setAlias(repo_alias)
+            repo_info.setName(repo.name)
+            repo_info.setEnabled(repo.enabled)
+            repo_info.setAutorefresh(repo.autorefresh)
+            repo_info.setKeepPackages(repo.keeppackages)
+            repo_info.addBaseUrl(zypp.Url(repo.baseurl[0]))
+            self.repo_manager.addRepository(repo_info)
+            self.__build_repo_cache(name)
+        except RuntimeError, e:
+            raise CreatorError("%s" % (e,))
+
+        return repo
+
+    def installHasFile(self, file):
+        return False
+
+    def runInstall(self, checksize = 0):
+        if self.incpkgs:
+            self.__selectIncpkgs()
+        if self.excpkgs:
+            self.__selectExcpkgs()
+        
+        os.environ["HOME"] = "/"
+        self.buildTransaction()
+
+        todo = zypp.GetResolvablesToInsDel(self.Z.pool())
+        installed_pkgs = todo._toInstall
+        dlpkgs = []
+        for item in installed_pkgs:
+            if not zypp.isKindPattern(item):
+                dlpkgs.append(item)
+
+        # record the total size of installed pkgs
+        pkgs_total_size = sum(map(lambda x: int(x.installSize()), dlpkgs))
+
+        # check needed size before actually download and install
+        if checksize and pkgs_total_size > checksize:
+            raise CreatorError("Size of specified root partition in kickstart file is too small to install all selected packages.")
+
+        if self.__recording_pkgs:
+            # record all pkg and the content
+            for pkg in dlpkgs:
+                pkg_long_name = "%s-%s.%s.rpm" % (pkg.name(), pkg.edition(), pkg.arch())
+                self.__pkgs_content[pkg_long_name] = {} #TBD: to get file list
+
+        total_count = len(dlpkgs)
+        cached_count = 0
+        localpkgs = self.localpkgs.keys()
+        print "Checking packages cache and packages integrity..."
+        for po in dlpkgs:
+            """ Check if it is cached locally """
+            if po.name() in localpkgs:
+                cached_count += 1
+            else:
+                local = self.getLocalPkgPath(po)
+                if os.path.exists(local):
+                    if self.checkPkg(local) != 0:
+                        os.unlink(local)
+                    else:
+                        cached_count += 1
+        print "%d packages to be installed, %d packages gotten from cache, %d packages to be downloaded" % (total_count, cached_count, total_count - cached_count)
+        try:
+            print "downloading packages..."
+            self.downloadPkgs(dlpkgs)
+            self.installPkgs(dlpkgs)
+    
+        except RepoError, e:
+            raise CreatorError("Unable to download from repo : %s" % (e,))
+        except RpmError, e:
+            raise CreatorError("Unable to install: %s" % (e,))
+
+    def getAllContent(self):
+        return self.__pkgs_content
+
+    def __initialize_repo_manager(self):
+        if self.repo_manager:
+            return
+
+        """ Clean up repo metadata """
+        shutil.rmtree(self.creator.cachedir + "/var", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/etc", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/raw", ignore_errors = True)
+        shutil.rmtree(self.creator.cachedir + "/solv", ignore_errors = True)
+        
+        zypp.KeyRing.setDefaultAccept( zypp.KeyRing.ACCEPT_UNSIGNED_FILE
+                                       | zypp.KeyRing.ACCEPT_VERIFICATION_FAILED
+                                       | zypp.KeyRing.ACCEPT_UNKNOWNKEY
+                                       | zypp.KeyRing.TRUST_KEY_TEMPORARILY
+                                     )
+        self.repo_manager_options = zypp.RepoManagerOptions(zypp.Pathname(self.creator._instroot))
+        self.repo_manager_options.knownReposPath = zypp.Pathname(self.creator.cachedir + "/etc/zypp/repos.d")
+        self.repo_manager_options.repoCachePath = zypp.Pathname(self.creator.cachedir + "/var/cache/zypp")
+        self.repo_manager_options.repoRawCachePath = zypp.Pathname(self.creator.cachedir + "/raw")
+        self.repo_manager_options.repoSolvCachePath = zypp.Pathname(self.creator.cachedir + "/solv")
+        self.repo_manager_options.repoPackagesCachePath = zypp.Pathname(self.creator.cachedir + "/packages")
+        
+        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
+
+
+    def __build_repo_cache(self, name):
+        repos = self.repo_manager.knownRepositories()
+        for repo in repos:
+            if not repo.enabled():
+                continue
+            reponame = "%s" % repo.name()
+            if reponame != name:
+                continue
+            if self.repo_manager.isCached( repo ):
+                return
+            #print "Retrieving repo metadata from %s ..." % repo.url()
+            self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
+
+
+    def __initialize_zypp(self):
+        if self.Z:
+            return
+
+        zconfig = zypp.ZConfig_instance()
+
+        """ Set system architecture """
+        if self.creator.target_arch and self.creator.target_arch.startswith("arm"):
+            arches = ["armv7l", "armv7nhl", "armv7hl"]
+            if self.creator.target_arch not in arches:
+                raise CreatorError("Invalid architecture: %s" % self.creator.target_arch)
+            arch_map = {}
+            if self.creator.target_arch == "armv7l":
+                arch_map["armv7l"] = zypp.Arch_armv7l()
+            elif self.creator.target_arch == "armv7nhl":
+                arch_map["armv7nhl"] = zypp.Arch_armv7nhl()
+            elif self.creator.target_arch == "armv7hl":
+                arch_map["armv7hl"] = zypp.Arch_armv7hl() 
+            zconfig.setSystemArchitecture(arch_map[self.creator.target_arch])
+
+        print "zypp architecture: %s" % zconfig.systemArchitecture()
+
+        """ repoPackagesCachePath is corrected by this """
+        self.repo_manager = zypp.RepoManager(self.repo_manager_options)
+        repos = self.repo_manager.knownRepositories()
+        for repo in repos:
+            if not repo.enabled():
+                continue
+            if not self.repo_manager.isCached( repo ):
+                print "Retrieving repo metadata from %s ..." % repo.url()
+                self.repo_manager.buildCache( repo, zypp.RepoManager.BuildIfNeeded )
+            else:
+                self.repo_manager.refreshMetadata(repo, zypp.RepoManager.BuildIfNeeded)
+            self.repo_manager.loadFromCache( repo );
+
+        self.Z = zypp.ZYppFactory_instance().getZYpp()
+        self.Z.initializeTarget( zypp.Pathname(self.creator._instroot) )
+        self.Z.target().load();
+
+
+    def buildTransaction(self):
+        if not self.Z.resolver().resolvePool():
+            print "Problem count: %d" % len(self.Z.resolver().problems())
+            for problem in self.Z.resolver().problems():
+                print "Problem: %s, %s" % (problem.description().decode("utf-8"), problem.details().decode("utf-8"))
+
+    def getLocalPkgPath(self, po):
+        repoinfo = po.repoInfo()
+        name = po.name()
+        cacheroot = repoinfo.packagesPath()
+        arch =  po.arch()
+        edition = po.edition()
+        version = "%s-%s" % (edition.version(), edition.release())
+        pkgpath = "%s/%s/%s-%s.%s.rpm" % (cacheroot, arch, name, version, arch)
+        return pkgpath
+
+    def installLocal(self, pkg, po=None, updateonly=False):
+        if not self.ts:
+            self.__initialize_transaction()
+        pkgname = self.__get_pkg_name(pkg)
+        self.localpkgs[pkgname] = pkg
+        self.selectPackage(pkgname)
+
+    def __get_pkg_name(self, pkgpath):
+        h = readRpmHeader(self.ts, pkgpath)
+        return h["name"]
+
+    def downloadPkgs(self, package_objects):
+        localpkgs = self.localpkgs.keys()
+        for po in package_objects:
+            if po.name() in localpkgs:
+                continue
+            filename = self.getLocalPkgPath(po)
+            if os.path.exists(filename):
+                if self.checkPkg(filename) == 0:
+                    continue
+            dir = os.path.dirname(filename)
+            if not os.path.exists(dir):
+                makedirs(dir)
+            baseurl = po.repoInfo().baseUrls()[0].__str__()
+            proxy = self.get_proxy(po.repoInfo())
+            proxies = {}
+            if proxy:
+                proxies = {str(proxy.split(":")[0]):str(proxy)}
+          
+            location = zypp.asKindPackage(po).location()
+            location = location.filename().__str__()
+            if location.startswith("./"):
+                location = location[2:]
+            url = baseurl + "/%s" % location
+            try:
+                filename = myurlgrab(url, filename, proxies)
+            except CreatorError, e:
+                self.close()
+                raise CreatorError("%s" % e)
+
+    def installPkgs(self, package_objects):
+        if not self.ts:
+            self.__initialize_transaction()
+
+        """ Set filters """
+        probfilter = 0
+        for flag in self.probFilterFlags:
+            probfilter |= flag
+        self.ts.setProbFilter(probfilter)
+
+        localpkgs = self.localpkgs.keys()
+        for po in package_objects:
+            pkgname = po.name()
+            if pkgname in localpkgs:
+                rpmpath = self.localpkgs[pkgname]
+            else:
+                rpmpath = self.getLocalPkgPath(po)
+            if not os.path.exists(rpmpath):
+                """ Maybe it is a local repo """
+                baseurl = po.repoInfo().baseUrls()[0].__str__()
+                baseurl = baseurl.strip()
+                if baseurl.startswith("file:/"):
+                    rpmpath = baseurl[5:] + "/%s/%s" % (po.arch(), os.path.basename(rpmpath))
+            if not os.path.exists(rpmpath):
+                raise RpmError("Error: %s doesn't exist" % rpmpath)
+            h = readRpmHeader(self.ts, rpmpath)
+            self.ts.addInstall(h, rpmpath, 'u')
+
+        unresolved_dependencies = self.ts.check()
+        if not unresolved_dependencies:
+            self.ts.order()
+            cb = RPMInstallCallback(self.ts)
+            self.ts.run(cb.callback, '')
+            self.ts.closeDB()
+            self.ts = None
+        else:
+            print unresolved_dependencies
+            raise RepoError("Error: Unresolved dependencies, transaction failed.")
+
+    def __initialize_transaction(self):
+        if not self.ts:
+            self.ts = rpm.TransactionSet(self.creator._instroot)
+            # Set to not verify DSA signatures.
+            self.ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
+
+    def checkPkg(self, pkg):
+        ret = 1
+        if not os.path.exists(pkg):
+            return ret
+        ret = checkRpmIntegrity(self.bin_rpm, pkg)
+        if ret != 0:
+            print "Package %s is damaged: %s" % (os.path.basename(pkg), pkg)
+        return ret
+
+    def zypp_install(self):
+        policy = zypp.ZYppCommitPolicy()
+        policy.downloadMode(zypp.DownloadInAdvance)
+        policy.dryRun( False )
+        policy.syncPoolAfterCommit( False )
+        result = self.Z.commit( policy )
+        print result
+
+    def _add_prob_flags(self, *flags):
+        for flag in flags:
+           if flag not in self.probFilterFlags:
+               self.probFilterFlags.append(flag)
+
+    def get_proxy(self, repoinfo):
+        proxy = None
+        reponame = "%s" % repoinfo.name()
+        for repo in self.repos:
+            if repo.name == reponame:
+                proxy = repo.proxy
+                break
+        if proxy:
+            return proxy
+        else:
+            repourl = repoinfo.baseUrls()[0].__str__()
+            return get_proxy(repourl)
+
+_pkgmgr = ["zypp", Zypp]
+
diff --git a/micng/utils/rpmmisc.py b/micng/utils/rpmmisc.py
new file mode 100644 (file)
index 0000000..e1030c2
--- /dev/null
@@ -0,0 +1,406 @@
+import rpm, os, sys, re
+import locale
+import subprocess
+import logging
+
+class RPMInstallCallback:
+    """
+    command line callback class for callbacks from the RPM library.
+    """
+
+    def __init__(self, ts, output=1):
+        self.output = output
+        self.callbackfilehandles = {}
+        self.total_actions = 0
+        self.total_installed = 0
+        self.installed_pkg_names = []
+        self.total_removed = 0
+        self.mark = "+"
+        self.marks = 40
+        self.lastmsg = None
+        self.tsInfo = None # this needs to be set for anything else to work
+        self.ts = ts
+        self.logString = []
+
+    def _dopkgtup(self, hdr):
+        tmpepoch = hdr['epoch']
+        if tmpepoch is None: epoch = '0'
+        else: epoch = str(tmpepoch)
+
+        return (hdr['name'], hdr['arch'], epoch, hdr['version'], hdr['release'])
+
+    def _makeHandle(self, hdr):
+        handle = '%s:%s.%s-%s-%s' % (hdr['epoch'], hdr['name'], hdr['version'],
+          hdr['release'], hdr['arch'])
+
+        return handle
+
+    def _localprint(self, msg):
+        if self.output:
+            print msg
+
+    def _makefmt(self, percent, progress = True):
+        l = len(str(self.total_actions))
+        size = "%s.%s" % (l, l)
+        fmt_done = "[%" + size + "s/%" + size + "s]"
+        done = fmt_done % (self.total_installed + self.total_removed,
+                           self.total_actions)
+        marks = self.marks - (2 * l)
+        width = "%s.%s" % (marks, marks)
+        fmt_bar = "%-" + width + "s"
+        if progress:
+            bar = fmt_bar % (self.mark * int(marks * (percent / 100.0)), )
+            fmt = "\r  %-10.10s: " + bar + " " + done
+        else:
+            bar = fmt_bar % (self.mark * marks, )
+            fmt = "  %-10.10s: "  + bar + " " + done 
+        return fmt
+
+    def _logPkgString(self, hdr):
+        """return nice representation of the package for the log"""
+        (n,a,e,v,r) = self._dopkgtup(hdr)
+        if e == '0':
+            pkg = '%s.%s %s-%s' % (n, a, v, r)
+        else:
+            pkg = '%s.%s %s:%s-%s' % (n, a, e, v, r)
+
+        return pkg
+
+    def callback(self, what, bytes, total, h, user):
+        if what == rpm.RPMCALLBACK_TRANS_START:
+            if bytes == 6:
+                self.total_actions = total
+
+        elif what == rpm.RPMCALLBACK_TRANS_PROGRESS:
+            pass
+
+        elif what == rpm.RPMCALLBACK_TRANS_STOP:
+            pass
+
+        elif what == rpm.RPMCALLBACK_INST_OPEN_FILE:
+            self.lastmsg = None
+            hdr = None
+            if h is not None:
+                rpmloc = h
+                hdr = readRpmHeader(self.ts, h)
+                handle = self._makeHandle(hdr)
+                fd = os.open(rpmloc, os.O_RDONLY)
+                self.callbackfilehandles[handle]=fd
+                self.total_installed += 1
+                self.installed_pkg_names.append(hdr['name'])
+                return fd
+            else:
+                self._localprint("No header - huh?")
+
+        elif what == rpm.RPMCALLBACK_INST_CLOSE_FILE:
+            hdr = None
+            if h is not None:
+                rpmloc = h
+                hdr = readRpmHeader(self.ts, h)
+                handle = self._makeHandle(hdr)
+                os.close(self.callbackfilehandles[handle])
+                fd = 0
+
+                # log stuff
+                #pkgtup = self._dopkgtup(hdr)
+                self.logString.append(self._logPkgString(hdr))
+                
+
+        elif what == rpm.RPMCALLBACK_INST_PROGRESS:
+            if h is not None:
+                percent = (self.total_installed*100L)/self.total_actions
+                if self.output and (sys.stdout.isatty() or self.total_installed == self.total_actions):
+                    fmt = self._makefmt(percent)
+                    msg = fmt % ("Installing")
+                    if msg != self.lastmsg:
+                        sys.stdout.write(msg)
+                        sys.stdout.flush()
+                        self.lastmsg = msg
+                        if self.total_installed == self.total_actions:
+                             sys.stdout.write("\n")
+                             logging.info('\n'.join(self.logString))
+
+        elif what == rpm.RPMCALLBACK_UNINST_START:
+            pass
+
+        elif what == rpm.RPMCALLBACK_UNINST_PROGRESS:
+            pass
+
+        elif what == rpm.RPMCALLBACK_UNINST_STOP:
+            self.total_removed += 1
+            
+        elif what == rpm.RPMCALLBACK_REPACKAGE_START:
+            pass
+        elif what == rpm.RPMCALLBACK_REPACKAGE_STOP:
+            pass
+        elif what == rpm.RPMCALLBACK_REPACKAGE_PROGRESS:
+            pass
+
+def readRpmHeader(ts, filename):
+    """ Read an rpm header. """
+    fd = os.open(filename, os.O_RDONLY)
+    h = ts.hdrFromFdno(fd)
+    os.close(fd)
+    return h
+
+def splitFilename(filename):
+    """
+    Pass in a standard style rpm fullname
+
+    Return a name, version, release, epoch, arch, e.g.::
+        foo-1.0-1.i386.rpm returns foo, 1.0, 1, i386
+        1:bar-9-123a.ia64.rpm returns bar, 9, 123a, 1, ia64
+    """
+
+    if filename[-4:] == '.rpm':
+        filename = filename[:-4]
+
+    archIndex = filename.rfind('.')
+    arch = filename[archIndex+1:]
+
+    relIndex = filename[:archIndex].rfind('-')
+    rel = filename[relIndex+1:archIndex]
+
+    verIndex = filename[:relIndex].rfind('-')
+    ver = filename[verIndex+1:relIndex]
+
+    epochIndex = filename.find(':')
+    if epochIndex == -1:
+        epoch = ''
+    else:
+        epoch = filename[:epochIndex]
+
+    name = filename[epochIndex + 1:verIndex]
+    return name, ver, rel, epoch, arch
+
+def getCanonX86Arch(arch):
+    #
+    if arch == "i586":
+        f = open("/proc/cpuinfo", "r")
+        lines = f.readlines()
+        f.close()
+        for line in lines:
+            if line.startswith("model name") and line.find("Geode(TM)") != -1:
+                return "geode"
+        return arch
+    # only athlon vs i686 isn't handled with uname currently
+    if arch != "i686":
+        return arch
+
+    # if we're i686 and AuthenticAMD, then we should be an athlon
+    f = open("/proc/cpuinfo", "r")
+    lines = f.readlines()
+    f.close()
+    for line in lines:
+        if line.startswith("vendor") and line.find("AuthenticAMD") != -1:
+            return "athlon"
+        # i686 doesn't guarantee cmov, but we depend on it
+        elif line.startswith("flags") and line.find("cmov") == -1:
+            return "i586"
+
+    return arch
+
+def getCanonX86_64Arch(arch):
+    if arch != "x86_64":
+        return arch
+
+    vendor = None
+    f = open("/proc/cpuinfo", "r")
+    lines = f.readlines()
+    f.close()
+    for line in lines:
+        if line.startswith("vendor_id"):
+            vendor = line.split(':')[1]
+            break
+    if vendor is None:
+        return arch
+
+    if vendor.find("Authentic AMD") != -1 or vendor.find("AuthenticAMD") != -1:
+        return "amd64"
+    if vendor.find("GenuineIntel") != -1:
+        return "ia32e"
+    return arch
+
+def getCanonArch():
+    arch = os.uname()[4]
+
+    if (len(arch) == 4 and arch[0] == "i" and arch[2:4] == "86"):
+        return getCanonX86Arch(arch)
+
+    if arch == "x86_64":
+        return getCanonX86_64Arch(arch)
+
+    return arch
+
+# dict mapping arch -> ( multicompat, best personality, biarch personality )
+multilibArches = { "x86_64":  ( "athlon", "x86_64", "athlon" ),
+                   "sparc64v": ( "sparc", "sparcv9v", "sparc64v" ),
+                   "sparc64": ( "sparc", "sparcv9", "sparc64" ),
+                   "ppc64":   ( "ppc", "ppc", "ppc64" ),
+                   "s390x":   ( "s390", "s390x", "s390" ),
+                   }
+
+arches = {
+    # ia32
+    "athlon": "i686",
+    "i686": "i586",
+    "geode": "i586",
+    "i586": "i486",
+    "i486": "i386",
+    "i386": "noarch",
+
+    # amd64
+    "x86_64": "athlon",
+    "amd64": "x86_64",
+    "ia32e": "x86_64",
+
+    # ppc
+    "ppc64pseries": "ppc64",
+    "ppc64iseries": "ppc64",
+    "ppc64": "ppc",
+    "ppc": "noarch",
+
+    # s390{,x}
+    "s390x": "s390",
+    "s390": "noarch",
+
+    # sparc
+    "sparc64v": "sparc64",
+    "sparc64v": "sparcv9v",
+    "sparc64": "sparcv9",
+    "sparcv9v": "sparcv9",
+    "sparcv9": "sparcv8",
+    "sparcv8": "sparc",
+    "sparc": "noarch",
+
+    # alpha
+    "alphaev7":   "alphaev68",
+    "alphaev68":  "alphaev67",
+    "alphaev67":  "alphaev6",
+    "alphaev6":   "alphapca56",
+    "alphapca56": "alphaev56",
+    "alphaev56":  "alphaev5",
+    "alphaev5":   "alphaev45",
+    "alphaev45":  "alphaev4",
+    "alphaev4":   "alpha",
+    "alpha":      "noarch",
+
+    # arm
+    "armv7nhl": "armv7hl",
+    "armv7hl": "noarch",
+    "armv7l": "armv6l",
+    "armv6l": "armv5tejl",
+    "armv5tejl": "armv5tel",
+    "armv5tel": "noarch",
+
+    # super-h
+    "sh4a": "sh4",
+    "sh4": "noarch",
+    "sh3": "noarch",
+
+    #itanium
+    "ia64": "noarch",
+    }
+
+def isMultiLibArch(arch=None):
+    """returns true if arch is a multilib arch, false if not"""
+    if arch is None:
+        arch = canonArch
+
+    if not arches.has_key(arch): # or we could check if it is noarch
+        return 0
+
+    if multilibArches.has_key(arch):
+        return 1
+
+    if multilibArches.has_key(arches[arch]):
+        return 1
+
+    return 0
+
+def getBaseArch():
+    myarch = getCanonArch()
+    if not arches.has_key(myarch):
+        return myarch
+
+    if isMultiLibArch(arch=myarch):
+        if multilibArches.has_key(myarch):
+            return myarch
+        else:
+            return arches[myarch]
+
+    if arches.has_key(myarch):
+        basearch = myarch
+        value = arches[basearch]
+        while value != 'noarch':
+            basearch = value
+            value = arches[basearch]
+
+        return basearch
+
+def checkRpmIntegrity(bin_rpm, package):
+    argv = [bin_rpm, "--checksig", "--nogpg", package]
+    dev_null = os.open("/dev/null", os.O_WRONLY)
+    try:
+        ret = subprocess.call(argv, stdout = dev_null, stderr = dev_null)
+    finally:
+        os.close(dev_null)
+    return ret 
+
+def checkSig(ts, package):
+    """Takes a transaction set and a package, check it's sigs,
+    return 0 if they are all fine
+    return 1 if the gpg key can't be found
+    return 2 if the header is in someway damaged
+    return 3 if the key is not trusted
+    return 4 if the pkg is not gpg or pgp signed"""
+
+    value = 0
+    currentflags = ts.setVSFlags(0)
+    fdno = os.open(package, os.O_RDONLY)
+    try:
+        hdr = ts.hdrFromFdno(fdno)
+    except rpm.error, e:
+        if str(e) == "public key not availaiable":
+            value = 1
+        if str(e) == "public key not available":
+            value = 1
+        if str(e) == "public key not trusted":
+            value = 3
+        if str(e) == "error reading package header":
+            value = 2
+    else:
+        error, siginfo = getSigInfo(hdr)
+        if error == 101:
+            os.close(fdno)
+            del hdr
+            value = 4
+        else:
+            del hdr
+
+    try:
+        os.close(fdno)
+    except OSError, e: # if we're not opened, don't scream about it
+        pass
+
+    ts.setVSFlags(currentflags) # put things back like they were before
+    return value
+
+def getSigInfo(hdr):
+    """checks signature from an hdr hand back signature information and/or
+       an error code"""
+
+    locale.setlocale(locale.LC_ALL, 'C')
+    string = '%|DSAHEADER?{%{DSAHEADER:pgpsig}}:{%|RSAHEADER?{%{RSAHEADER:pgpsig}}:{%|SIGGPG?{%{SIGGPG:pgpsig}}:{%|SIGPGP?{%{SIGPGP:pgpsig}}:{(none)}|}|}|}|'
+    siginfo = hdr.sprintf(string)
+    if siginfo != '(none)':
+        error = 0
+        sigtype, sigdate, sigid = siginfo.split(',')
+    else:
+        error = 101
+        sigtype = 'MD5'
+        sigdate = 'None'
+        sigid = 'None'
+
+    infotuple = (sigtype, sigdate, sigid)
+    return error, infotuple
diff --git a/plugins/backend/yumpkgmgr.py b/plugins/backend/yumpkgmgr.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/plugins/backend/zypppkgmgr.py b/plugins/backend/zypppkgmgr.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/plugins/hook/_hook.py b/plugins/hook/_hook.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/plugins/imager/fs_plugin.py b/plugins/imager/fs_plugin.py
new file mode 100644 (file)
index 0000000..f90b94a
--- /dev/null
@@ -0,0 +1,47 @@
+#!/usr/bin/python
+
+from micng.pluginbase.imager_plugin import ImagerPlugin
+from micng.imager.fs import *
+import micng.configmgr as configmgr
+try:
+    import argparse
+except:
+    import micng.utils.argparse
+
+class FsPlugin(ImagerPlugin):
+    """hello livecd
+    """
+    @classmethod
+    def do_options(self, parser):
+        parser.add_argument('ksfile', nargs='?', help='kickstart file')
+        parser.add_argument('--release', help='fs options test')
+
+    @classmethod
+    def do_create(self, args):
+        if args.release:
+            print "fs option release: ", args.release
+        if not args.ksfile:
+            print "please specify a kickstart file"
+            return
+#        print "ksfile", args.ksfile
+        self.configmgr = configmgr.getConfigMgr()
+        self.configmgr.setProperty('ksfile', args.ksfile)
+#        print "ksfile", self.configmgr.getProperty('ksfile')
+        self.ks = self.configmgr.getProperty('kickstart')
+        self.name = self.configmgr.getProperty('name')
+        fs = FsImageCreator(self.ks, self.name)
+        try:
+            fs.outdir = self.configmgr.getProperty('outdir')
+            fs.mount(None, self.configmgr.cache)
+            fs.install()
+            fs.configure(self.configmgr.repometadata)
+            fs.unmount()
+            fs.package(self.configmgr.outdir)
+            print "Finished"
+        except Exception, e:
+            print "failed to create image: %s" % e
+        finally:
+            fs.cleanup()
+
+
+mic_plugin = ["fs", FsPlugin]
diff --git a/plugins/imager/livecd_plugin.py b/plugins/imager/livecd_plugin.py
new file mode 100644 (file)
index 0000000..9edddb8
--- /dev/null
@@ -0,0 +1,71 @@
+#!/usr/bin/python
+from micng.pluginbase.imager_plugin import ImagerPlugin
+import micng.imager as imager
+import micng.configmgr as cfgmgr
+import micng.utils as utils
+import micng.utils.cmdln as cmdln
+import os, time
+
+class LivecdPlugin(ImagerPlugin):
+    @classmethod
+    def do_options(self, parser):
+        parser.add_argument("-vid", "--volumeid", type="string", default=None, help="Specify volume id")
+        parser.add_argument("ksfile", help="kickstart file")
+
+    @classmethod
+    def do_create(self, args):
+        if not args.ksfile:
+            print "please specify kickstart file"
+            return
+
+        self.configmgr = cfgmgr.getConfigMgr()
+        self.configmgr.setProperty('ksfile', args.ksfile)
+
+        fs_label = utils.kickstart.build_name(
+                     args.ksfile,
+                     "%s-" % self.configmgr.name,
+                     maxlen = 32,
+                     suffix = "%s-%s" %(os.uname()[4], time.strftime("%Y%m%d%H%M")))
+        
+        creator = imager.livecd.LivecdImageCreator(
+                    self.configmgr.kickstart, self.configmgr.name, fs_label)
+        
+        creator.skip_compression = False
+        creator.skip_minimize = False
+            
+        creator.tmpdir = self.configmgr.tmpdir
+        creator._alt_initrd_name = None
+        creator._recording_pkgs = None
+        creator._include_src = False
+        creator._local_pkgs_path = None
+        creator._genchecksum = False
+        creator.distro_name = self.configmgr.name
+        creator.image_format = "livecd"
+    
+        
+        utils.kickstart.resolve_groups(creator, self.configmgr.repometadata, False)
+    
+        imgname = creator.name
+            
+        try:
+            creator.check_depend_tools()
+            creator.mount(None, self.configmgr.cache)
+            creator.install()
+    
+            creator.configure(self.configmgr.repometadata)
+            creator.unmount()
+            creator.package(self.configmgr.outdir)
+            outimage = creator.outimage
+                
+            creator.package_output("livecd", self.configmgr.outdir, "none")
+            creator.print_outimage_info()
+            outimage = creator.outimage
+            
+        except Exception, e:
+            raise Exception("failed to create image : %s" % e)
+        finally:
+            creator.cleanup()
+    
+        print "Finished."        
+
+mic_plugin = ["livecd", LivecdPlugin]
diff --git a/setup.py b/setup.py
new file mode 100644 (file)
index 0000000..3e1a699
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+import os, sys
+from distutils.core import setup
+#try:
+#    import setuptools
+#    # enable "setup.py develop", optional
+#except ImportError:
+#    pass
+
+MOD_NAME = 'micng'
+
+version_path = 'VERSION'
+if not os.path.isfile(version_path):
+    print 'No VERSION file in topdir, abort'
+    sys.exit(1)
+
+try:
+    # first line should be the version number
+    version = open(version_path).readline().strip()
+    if not version:
+        print 'VERSION file is invalid, abort'
+        sys.exit(1)
+
+    ver_file = open('%s/__version__.py' % MOD_NAME, 'w')
+    ver_file.write("VERSION = \"%s\"\n" % version)
+    ver_file.close()
+except IOError:
+    print 'WARNING: Cannot write version number file'
+    pass
+
+PACKAGES = [MOD_NAME,
+            MOD_NAME + '/utils',
+            MOD_NAME + '/utils/kscommands',
+            MOD_NAME + '/utils/pkgmanagers',
+            MOD_NAME + '/imager',
+            MOD_NAME + '/pluginbase',
+           ]
+setup(name=MOD_NAME,
+      version = version,
+      description = 'New MeeGo Image Creator',
+      author='Jian-feng Ding',
+      author_email='jian-feng.ding@intel.com',
+      url='https://meego.gitorious.org/meego-developer-tools/image-creator',
+      scripts=[
+          'tools/micng',
+          'tools/mic-image-create',
+          ],
+      packages = PACKAGES,
+)
+
diff --git a/tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks b/tests/meego-ivi-ia32-1.2.80.0.20110502.2.ks
new file mode 100644 (file)
index 0000000..722a62e
--- /dev/null
@@ -0,0 +1,69 @@
+# -*-mic2-options-*- -f livecd -*-mic2-options-*-
+
+# 
+# Do not Edit! Generated by:
+# kickstarter.py
+# 
+
+lang en_US.UTF-8
+keyboard us
+timezone --utc America/Los_Angeles
+part / --size 2200 --ondisk sda --fstype=ext3
+rootpw meego 
+xconfig --startxonboot
+bootloader --timeout=0 --append="quiet"
+desktop --autologinuser=meego  --defaultdesktop=X-IVI --session="/usr/bin/startivi"
+user --name meego  --groups audio,video --password meego 
+
+repo --name=oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo/builds/1.2.80/1.2.80.0.20110503.2/repos/oss/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+repo --name=non-oss --baseurl=http://linux-ftp.jf.intel.com/pub/mirrors/MeeGo/builds/1.2.80/1.2.80.0.20110503.2/repos/non-oss/ia32/packages/ --save --debuginfo --source --gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-meego
+
+%packages --ignoremissing
+
+@MeeGo Core
+@MeeGo Compliance
+@IVI Desktop
+@MeeGo X Window System
+@MeeGo IVI Applications
+@MeeGo Base Development
+@X for IVI
+@Chinese Support
+@Japanese Support
+@Korean Support
+
+kernel-adaptation-intel-automotive
+
+meego-ux-daemon
+meegotouch-compositor
+-dsme
+-ngfd
+-mce
+-qmsystem
+%end
+
+%post
+# save a little bit of space at least...
+rm -f /boot/initrd*
+
+# Prelink can reduce boot time
+if [ -x /usr/sbin/prelink ]; then
+    /usr/sbin/prelink -aRqm
+fi
+
+rm -f /var/lib/rpm/__db*
+rpm --rebuilddb
+
+# Get rid of /etc/xdg/autostart/applauncherd.desktop line that causes IVI duicontrolpanel issues
+# until fixed upstream. BMC#13570
+sed -ri '/OnlyShowIn=X-MEEGO-HS;/d' /etc/xdg/autostart/applauncherd.desktop
+
+
+%end
+
+%post --nochroot
+if [ -n "$IMG_NAME" ]; then
+    echo "BUILD: $IMG_NAME" >> $INSTALL_ROOT/etc/meego-release
+fi
+
+
+%end
diff --git a/tests/micng.conf b/tests/micng.conf
new file mode 100644 (file)
index 0000000..69e77bf
--- /dev/null
@@ -0,0 +1,4 @@
+[main]
+cache=./cache
+outdir=.
+tmpdir=/var/tmp
diff --git a/tools/mic-image-create b/tools/mic-image-create
new file mode 100755 (executable)
index 0000000..49bf4b4
--- /dev/null
@@ -0,0 +1,67 @@
+#!/usr/bin/python -t
+
+import sys, os, os.path, string
+import micng.utils.argparse as argparse
+import micng.configmgr as configmgr
+import micng.pluginmgr as pluginmgr
+
+class Creator(object):
+    name = 'create'
+
+    def __init__(self):
+        self.configmgr = configmgr.getConfigMgr()
+        self.pluginmgr = pluginmgr.PluginMgr()
+        self.pluginmgr.loadPlugins()
+        self.plugincmds = self.pluginmgr.getPluginByCateg('imager')
+
+    def main(self, argv=None):
+#        import pdb
+#        pdb.set_trace()
+        if os.getuid() != 0:
+            print "Please run the program as root"
+            return 0
+        prog = os.path.basename(sys.argv[0])
+        parser = argparse.ArgumentParser(
+                  usage='%s [COMMONOPT] <subcommand> [SUBOPT] ARGS' % prog,
+                  ) 
+        parser.add_argument('-k', '--cache', dest='cache', help='cache diretory')
+        parser.add_argument('-o', '--outdir', dest='outdir', help='output diretory')
+        parser.add_argument('-t', '--tmpdir', dest='tmpdir', help='temp diretory')
+
+
+        subparsers = parser.add_subparsers(title='subcommands')
+        for subcmd, klass in self.plugincmds:
+            subcmd_help = 'create ' + subcmd + ' image'
+            subcmd_parser = subparsers.add_parser(
+                              subcmd, 
+                              usage=prog+' [COMMONOPT] '+subcmd+'  [SUBOPT] ARGS',
+                              help=subcmd_help
+                              )
+            if hasattr(klass, 'do_options'):
+                add_subopt = getattr(klass, 'do_options')
+                add_subopt(subcmd_parser)
+            if hasattr(klass, 'do_create'):
+                do_create = getattr(klass, 'do_create')
+                subcmd_parser.set_defaults(func=do_create)
+
+        if not argv:
+            parser.print_help()
+            return True
+
+        args = parser.parse_args(argv)
+        if args.outdir:
+            self.configmgr.setProperty('outdir', args.outdir)
+        if args.tmpdir:
+            self.configmgr.setProperty('tmpdir', args.tmpdir)
+        if args.cache:
+            self.configmgr.setProperty('cache', args.cache)
+#        print 'outdir', self.configmgr.getProperty('outdir')
+#        print 'tmpdir', self.configmgr.getProperty('tmpdir')
+#        print 'cache', self.configmgr.getProperty('cache')
+        args.func(args)
+        return True
+
+if __name__ == "__main__":
+    create = Creator()
+    ret = create.main(sys.argv[1:])
+    sys.exit(ret)
diff --git a/tools/micng b/tools/micng
new file mode 100755 (executable)
index 0000000..ea98dbd
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/python -t
+
+import sys, os
+import subprocess
+import micng.utils.cmdln as cmdln
+
+class Mic(cmdln.Cmdln):
+    def run_subcmd(self, subcmd, opts, args):
+        creator = "mic-image-create"
+        tools = {
+                 "cr":creator, "create":creator,
+                }
+        
+        argv = [tools[subcmd]]
+        argv.extend(args)
+        subprocess.call(argv)        
+       
+    @cmdln.alias("cr")
+    def do_create(self, argv):
+        """${cmd_name}: create image
+
+           ${cmd_usage}
+           ${cmd_option_list}
+        """
+        self.run_subcmd("create", None, argv[1:])
+    
+    @cmdln.alias("cv")
+    def do_convert(self, argv):
+        """${cmd_name}: convert an image format to another one
+        """
+
+if __name__ == "__main__":
+       mic = Mic()
+       ret = mic.main()
+       sys.exit(ret)
diff --git a/tools/micng.ref b/tools/micng.ref
new file mode 100755 (executable)
index 0000000..5d2ad7b
--- /dev/null
@@ -0,0 +1,120 @@
+#!/usr/bin/python
+
+# Copyright (C) 2010 Intel Inc.  All rights reserved.
+# This program is free software; it may be used, copied, modified
+# and distributed under the terms of the GNU General Public Licence,
+# either version 2, or version 3 (at your option).
+
+import sys
+import mic3.cmdln as cmdln
+import optparse as _optparse
+
+try:
+    import mic3.__version__
+    VERSION = mic3.__version__.version
+except:
+    VERSION = 'unknown'
+
+class MIC3(cmdln.Cmdln):
+    """Usage: mic [GLOBALOPTS] SUBCOMMAND [OPTS] [ARGS...]
+    or: mic help SUBCOMMAND
+
+    MeeGo Image Tool.
+    Type 'mic help <subcommand>' for help on a specific subcommand.
+
+    ${command_list}
+    ${help_list}
+    global ${option_list}
+    For additional information, see
+    * http://www.meego.com/
+    """
+
+    name = 'mic'
+    version = VERSION
+
+    @cmdln.option("-v", "--verbose", action="store_true",
+                           help="print extra information")
+
+    def get_cmd_help(self, cmdname):
+        doc = self._get_cmd_handler(cmdname).__doc__
+        doc = self._help_reindent(doc)
+        doc = self._help_preprocess(doc, cmdname)
+        doc = doc.rstrip() + '\n' # trim down trailing space
+        return self._str(doc)
+
+    """ create image """
+    @cmdln.alias('cr')
+    @cmdln.option("-c", "--config", type="string", dest="config",
+                    help="Path to kickstart config file")
+
+    @cmdln.option("-f", "--format", type="string", dest="format",
+                    help="Image format, you can specify as fs, livecd, liveusb, loop, raw, nand, mrstnand, ubi, jffs2, vdi or vmdk")
+
+    @cmdln.option("-t", "--tmpdir", type="string",
+                      dest="tmpdir",
+                      help="Temporary directory to use (default: /var/tmp)")
+    @cmdln.option("-k", "--cache", type="string",
+                      dest="cachedir", default=None,
+                      help="Cache directory to use (default: private cache)")
+    @cmdln.option("-o", "--outdir", type="string",
+                      dest="outdir", default=None,
+                      help="Output directory to use (default: current work dir)")
+    @cmdln.option("", "--release", type="string",
+                      dest="release", default=None,
+                      help="Generate a MeeGo release with all necessary files for publishing.")
+    @cmdln.option("", "--genchecksum", action="store_true",
+                      dest="genchecksum", default=False,
+                      help="Generate checksum for image file if this option is provided")
+    @cmdln.option("-P", "--prefix", type="string",
+                      dest="prefix", default=None,
+                      help="Image name prefix (default: meego)")
+    @cmdln.option("-S", "--suffix", type="string",
+                      dest="suffix", default=None,
+                      help="Image name suffix (default: date stamp)")
+    @cmdln.option("-a", "--arch", type="string",
+                      dest="arch", default=None,
+                      help="Specify target arch of image, for example: arm")
+    @cmdln.option("", "--use-comps", action="store_true",
+                      dest="use_comps", default=False,
+                      help="Use comps instead of patterns if comps exists")
+    @cmdln.option("", "--record-pkgs", type="string",
+                      dest="record_pkgs", default=None,
+                      help="Record the installed packages, valid values: name, content")
+    @cmdln.option("", "--fstype", type="string",
+                      dest="fstype", default="vfat",
+                      help="File system type for live USB file image, ext3 or vfat, the default is vfat.")
+    @cmdln.option("", "--overlay-size-mb", type="int", default=64,
+                      help="Overlay size in MB as unit, it means how size changes you can save in your live USB disk.")
+    @cmdln.option('-d', '--debug', action='store_true',
+                      help='Output debugging information')
+    @cmdln.option('-v', '--verbose', dest='verbose', action='store_true',
+                      help='Output verbose information')
+    @cmdln.option('', '--logfile', type="string", dest="file",
+                      help='Save debug information to FILE')
+    @cmdln.option("", "--save-kernel", action="store_true",
+                      dest="save_kernel", default=False,
+                      help="Save kernel image file into outdir")
+    @cmdln.option("", "--pkgmgr", type="string",
+                      help="Specify the package manager, the available package managers have zypper and yum currently.")
+    @cmdln.option("", "--volumeid", type="string", default=None,
+                      help="Specify volume id, valid only for livecd")
+    def do_create(self, subcmd, opts, *args):
+        """${cmd_name}: Create an image
+
+        This command is used to create various images, including
+        live CD, live USB, loop, raw/KVM/QEMU, VMWare/vmdk,
+        VirtualBox/vdi, Moorestown/mrstnand, jffs2 and ubi.
+
+        Examples:
+           mic create                         # create an image according to the default config
+           mic create --format=liveusb        # create a live USB image
+
+        ${cmd_usage}
+        ${cmd_option_list}
+        """
+
+        print subcmd, opts, args
+
+if __name__ == "__main__":
+    mic = MIC3()
+    sys.exit(mic.main(sys.argv))