Merge tag 'v1.6.0' into tizen_qemu_1.6
authorSeokYeon Hwang <syeon.hwang@samsung.com>
Tue, 10 Sep 2013 03:18:05 +0000 (12:18 +0900)
committerSeokYeon Hwang <syeon.hwang@samsung.com>
Tue, 10 Sep 2013 03:18:05 +0000 (12:18 +0900)
Signed-off-by: SeokYeon Hwang <syeon.hwang@samsung.com>
Conflicts:
VERSION
arch_init.c
block/vmdk.c
configure
cpu-exec.c
cpus.c
exec.c
hw/9pfs/virtio-9p-device.c
hw/display/vga-pci.c
hw/pci-host/q35.c
hw/usb/dev-storage.c
hw/virtio/virtio-mmio.c
include/exec/cpu-defs.h
include/qemu/error-report.h
include/qemu/log.h
include/sysemu/kvm.h
include/sysemu/sysemu.h
pc-bios/bios.bin
qapi-schema.json
qemu-char.c
ui/input.c
util/oslib-win32.c
vl.c

46 files changed:
1  2 
.gitignore
Makefile
Makefile.target
arch_init.c
block/raw-win32.c
block/vmdk.c
blockdev.c
configure
cpu-exec.c
cpus.c
exec.c
hw/9pfs/virtio-9p-device.c
hw/display/vga-pci.c
hw/display/vga.c
hw/i386/pc.c
hw/i386/pc_piix.c
hw/i386/pc_sysfw.c
hw/input/pl050.c
hw/pci/pci-hotplug-old.c
hw/pci/pci.c
hw/usb/dev-hid.c
hw/usb/dev-storage.c
hw/virtio/virtio-pci.c
include/exec/cpu-defs.h
include/exec/hwaddr.h
include/hw/i386/pc.h
include/qemu-common.h
include/sysemu/kvm.h
include/sysemu/sysemu.h
include/ui/console.h
main-loop.c
qapi-schema.json
qemu-char.c
qemu-options.hx
qom/object.c
target-i386/hax-all.c
target-i386/translate.c
tcg/i386/tcg-target.c
tcg/tcg.c
tcg/tcg.h
ui/input.c
util/oslib-posix.c
util/oslib-win32.c
util/qemu-sockets.c
vl.c
xen-all.c

diff --cc .gitignore
Simple merge
diff --cc Makefile
Simple merge
diff --cc Makefile.target
index ecfe538,9a49852..409b843
mode 100755,100644..100755
@@@ -121,18 -127,8 +127,18 @@@ LIBS+=$(libs_softmmu
  obj-$(CONFIG_XEN) += xen-all.o xen-mapcache.o
  obj-$(CONFIG_NO_XEN) += xen-stub.o
  
 +# HAX support
 +ifdef CONFIG_WIN32
 +obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-windows.o
 +obj-$(CONFIG_NO_HAX) += hax-stub.o
 +endif
 +ifdef CONFIG_DARWIN
 +obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-darwin.o
 +obj-$(CONFIG_NO_HAX) += hax-stub.o
 +endif
 +
  # Hardware support
- ifeq ($(TARGET_ARCH), sparc64)
+ ifeq ($(TARGET_NAME), sparc64)
  obj-y += hw/sparc64/
  else
  obj-y += hw/$(TARGET_BASE_ARCH)/
diff --cc arch_init.c
@@@ -1098,11 -1182,52 +1182,61 @@@ TargetInfo *qmp_query_target(Error **er
      return info;
  }
  
+ /* Stub function that's gets run on the vcpu when its brought out of the
+    VM to run inside qemu via async_run_on_cpu()*/
+ static void mig_sleep_cpu(void *opq)
+ {
+     qemu_mutex_unlock_iothread();
+     g_usleep(30*1000);
+     qemu_mutex_lock_iothread();
+ }
+ /* To reduce the dirty rate explicitly disallow the VCPUs from spending
+    much time in the VM. The migration thread will try to catchup.
+    Workload will experience a performance drop.
+ */
+ static void mig_throttle_cpu_down(CPUState *cpu, void *data)
+ {
+     async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
+ }
+ static void mig_throttle_guest_down(void)
+ {
+     qemu_mutex_lock_iothread();
+     qemu_for_each_cpu(mig_throttle_cpu_down, NULL);
+     qemu_mutex_unlock_iothread();
+ }
+ static void check_guest_throttling(void)
+ {
+     static int64_t t0;
+     int64_t        t1;
+     if (!mig_throttle_on) {
+         return;
+     }
+     if (!t0)  {
+         t0 = qemu_get_clock_ns(rt_clock);
+         return;
+     }
+     t1 = qemu_get_clock_ns(rt_clock);
+     /* If it has been more than 40 ms since the last time the guest
+      * was throttled then do it again.
+      */
+     if (40 < (t1-t0)/1000000) {
+         mig_throttle_guest_down();
+         t0 = t1;
+     }
+ }
++
 +int hax_available(void)
 +{
 +#ifdef CONFIG_HAX
 +    return 1;
 +#else
 +    return 0;
 +#endif
 +}
Simple merge
diff --cc block/vmdk.c
@@@ -508,7 -524,7 +524,8 @@@ static int vmdk_open_vmdk4(BlockDriverS
          return ret;
      }
      if (header.capacity == 0) {
 +        int64_t desc_offset = le64_to_cpu(header.desc_offset);
+         uint64_t desc_offset = le64_to_cpu(header.desc_offset);
          if (desc_offset) {
              return vmdk_open_desc_file(bs, flags, desc_offset << 9);
          }
diff --cc blockdev.c
@@@ -697,26 -700,17 +705,27 @@@ static DriveInfo *blockdev_init(QemuOpt
      bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
  
      if (ro && copy_on_read) {
-         error_report("warning: disabling copy_on_read on readonly drive");
+         error_report("warning: disabling copy_on_read on read-only drive");
      }
  
+     QINCREF(bs_opts);
      ret = bdrv_open(dinfo->bdrv, file, bs_opts, bdrv_flags, drv);
-     bs_opts = NULL;
  
      if (ret < 0) {
 +#ifdef CONFIG_MARU
 +        const char _msg[] = "Failed to load disk file from the following path. Check if the file is corrupted or missing.\n\n";
 +          char* err_msg = NULL;
 +        err_msg = maru_convert_path((char*)_msg, file);
 +        start_simple_client(err_msg);
 +        if (err_msg) {
 +            g_free(err_msg);
 +        }
 +#endif
 +
          if (ret == -EMEDIUMTYPE) {
              error_report("could not open disk image %s: not in %s format",
-                          file ?: dinfo->id, drv->format_name);
+                          file ?: dinfo->id, drv ? drv->format_name :
+                          qdict_get_str(bs_opts, "driver"));
          } else {
              error_report("could not open disk image %s: %s",
                           file ?: dinfo->id, strerror(-ret));
diff --cc configure
+++ b/configure
@@@ -179,7 -179,7 +179,8 @@@ xfs="
  vhost_net="no"
  vhost_scsi="no"
  kvm="no"
 +hax="no"
+ rdma=""
  gprof="no"
  debug_tcg="no"
  debug="no"
@@@ -229,25 -229,15 +230,26 @@@ rbd="
  smartcard_nss=""
  libusb=""
  usb_redir=""
 +opengl=""
 +efence="no"
 +yagl="no"
 +yagl_stats="no"
  glx=""
 +vigs="no"
  zlib="yes"
- guest_agent="yes"
+ guest_agent=""
  want_tools="yes"
  libiscsi=""
  coroutine=""
  seccomp=""
 +gl="yes"
 +
 +# for TIZEN-maru 
 +maru="no"
 +shm="no"
 +#
  glusterfs=""
+ glusterfs_discard="no"
  virtio_blk_data_plane=""
  gtk=""
  gtkabi="2.0"
@@@ -558,9 -553,7 +565,11 @@@ Haiku
    if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
      audio_possible_drivers="$audio_possible_drivers fmod"
    fi
-   libs_qga="-lrt $libs_qga"
-   QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers $QEMU_INCLUDES"
++
 +# fix linking error on Ubuntu 13.04
++#  libs_qga="-lrt $libs_qga"
++#  QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers $QEMU_INCLUDES"
+   QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers -I$(pwd)/linux-headers $QEMU_INCLUDES"
  ;;
  esac
  
@@@ -1183,16 -1095,9 +1145,16 @@@ echo "  --enable-bluez           enabl
  echo "  --disable-slirp          disable SLIRP userspace network connectivity"
  echo "  --disable-kvm            disable KVM acceleration support"
  echo "  --enable-kvm             enable KVM acceleration support"
 +echo "  --disable-gl             disable GL acceleration support"
 +
 +echo "  --disable-hax            disable HAX acceleration support"
 +echo "  --enable-hax             enable HAX acceleration support"
 +
 +echo "  --disable-gl             disable GL acceleration support"
 +echo "  --enable-gl              enable GL acceleration support"
+ echo "  --disable-rdma           disable RDMA-based migration support"
+ echo "  --enable-rdma            enable RDMA-based migration support"
  echo "  --enable-tcg-interpreter enable TCG with bytecode interpreter (TCI)"
- echo "  --disable-nptl           disable usermode NPTL support"
- echo "  --enable-nptl            enable usermode NPTL support"
  echo "  --enable-system          enable all system emulation targets"
  echo "  --disable-system         disable all system emulation targets"
  echo "  --enable-user            enable supported user emulation targets"
@@@ -3622,8 -3580,7 +3651,9 @@@ echo "Linux AIO support $linux_aio
  echo "ATTR/XATTR support $attr"
  echo "Install blobs     $blobs"
  echo "KVM support       $kvm"
 +echo "HAX support       $hax"
 +echo "GL support        $gl"
+ echo "RDMA support      $rdma"
  echo "TCG interpreter   $tcg_interpreter"
  echo "fdt support       $fdt"
  echo "preadv support    $preadv"
@@@ -4156,16 -4068,11 +4189,20 @@@ echo "CONFIG_TRACE_FILE=$trace_file" >
  if test "$trace_default" = "yes"; then
    echo "CONFIG_TRACE_DEFAULT=y" >> $config_host_mak
  fi
 +if test "$hax" = "yes" ; then
 +  if test "$mingw32" = "yes" ; then
 +    echo "CONFIG_HAX_BACKEND=y" >> $config_host_mak
 +  elif test "$darwin" = "yes" ; then
 +    echo "CONFIG_HAX_BACKEND=y" >> $config_host_mak
 +  else
 +    hax="no"
 +  fi
 +fi
  
+ if test "$rdma" = "yes" ; then
+   echo "CONFIG_RDMA=y" >> $config_host_mak
+ fi
  if test "$tcg_interpreter" = "yes"; then
    QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/tci $QEMU_INCLUDES"
  elif test "$ARCH" = "sparc64" ; then
diff --cc cpu-exec.c
@@@ -307,16 -293,11 +313,16 @@@ int cpu_exec(CPUArchState *env
                  }
              }
  
 +#ifdef CONFIG_HAX
 +            if (hax_enabled() && !hax_vcpu_exec(env))
 +                longjmp(env->jmp_env, 1);
 +#endif
 +
              next_tb = 0; /* force lookup of first TB */
              for(;;) {
 -                interrupt_request = cpu->interrupt_request;
 +                interrupt_request = need_handle_intr_request(cpu);
                  if (unlikely(interrupt_request)) {
-                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
                          /* Mask out external interrupts for this step. */
                          interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
                      }
                              cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
                                                            0);
                              cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
-                             do_smm_enter(env);
 +#ifdef CONFIG_HAX
 +                            if (hax_enabled())
 +                                env->hax_vcpu->resync = 1;
 +#endif
+                             do_smm_enter(x86_env_get_cpu(env));
                              next_tb = 0;
                          } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
                                     !(env->hflags2 & HF2_NMI_MASK)) {
diff --cc cpus.c
--- 1/cpus.c
--- 2/cpus.c
+++ b/cpus.c
@@@ -74,7 -71,7 +72,7 @@@ static bool cpu_thread_is_idle(CPUStat
          return true;
      }
      if (!cpu->halted || qemu_cpu_has_work(cpu) ||
-         kvm_async_interrupts_enabled() || hax_enabled()) {
 -        kvm_halt_in_kernel()) {
++        kvm_halt_in_kernel() || hax_enabled()) {
          return false;
      }
      return true;
diff --cc exec.c
--- 1/exec.c
--- 2/exec.c
+++ b/exec.c
@@@ -31,7 -31,7 +31,8 @@@
  #include "hw/qdev.h"
  #include "qemu/osdep.h"
  #include "sysemu/kvm.h"
 +#include "sysemu/hax.h"
+ #include "sysemu/sysemu.h"
  #include "hw/xen/xen.h"
  #include "qemu/timer.h"
  #include "qemu/config-file.h"
@@@ -104,18 -101,7 +106,17 @@@ static int virtio_9p_device_init(VirtIO
      if (s->ops->init(&s->ctx) < 0) {
          fprintf(stderr, "Virtio-9p Failed to initialize fs-driver with id:%s"
                  " and export path:%s\n", s->fsconf.fsdev_id, s->ctx.fs_root);
 +#ifdef CONFIG_MARU
 +        const char _msg[] = "Failed to find the file sharing path. Check if the path is correct or not.\n\n";
 +        char* err_msg = NULL;
 +        err_msg = maru_convert_path((char*)_msg, s->ctx.fs_root);
 +              maru_register_exit_msg(MARU_EXIT_UNKNOWN, err_msg);
 +        if (err_msg) {
 +            g_free(err_msg);
 +        }
 +#endif
 +
-         return -1;
+         goto out;
      }
      if (v9fs_init_worker_threads() < 0) {
          fprintf(stderr, "worker thread initialization failed\n");
@@@ -150,12 -147,9 +150,13 @@@ static int pci_std_vga_initfn(PCIDevic
      VGACommonState *s = &d->vga;
  
      /* vga + console init */
-     vga_common_init(s);
 +#ifdef CONFIG_MARU
 +    maru_vga_common_init(s);
 +#else
-     vga_init(s, pci_address_space(dev), pci_address_space_io(dev), true);
+     vga_common_init(s, OBJECT(dev));
 +#endif
+     vga_init(s, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev),
+              true);
  
      s->con = graphic_console_init(DEVICE(dev), s->hw_ops, s);
  
Simple merge
diff --cc hw/i386/pc.c
  #include "hw/acpi/acpi.h"
  #include "hw/cpu/icc_bus.h"
  #include "hw/boards.h"
+ #include "hw/pci/pci_host.h"
  
 +#ifdef CONFIG_MARU
 +#include "../../tizen/src/maru_err_table.h"
 +#endif
  /* debug PC/ISA interrupts */
  //#define DEBUG_IRQ
  
Simple merge
Simple merge
Simple merge
@@@ -298,11 -295,7 +337,10 @@@ void pci_device_hot_add(Monitor *mon, c
                         PCI_FUNC(dev->devfn));
      } else
          monitor_printf(mon, "failed to add %s\n", opts);
 +#ifdef CONFIG_MARU
 +    return dev;
 +#endif
  }
- #endif
  
  static int pci_device_hot_remove(Monitor *mon, const char *pci_addr)
  {
diff --cc hw/pci/pci.c
Simple merge
Simple merge
@@@ -769,9 -746,7 +764,10 @@@ static void usb_msd_class_initfn_common
      uc->handle_reset   = usb_msd_handle_reset;
      uc->handle_control = usb_msd_handle_control;
      uc->handle_data    = usb_msd_handle_data;
 +#ifdef CONFIG_MARU
 +    uc->handle_destroy = usb_msd_handle_destroy;
 +#endif
+     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
      dc->fw_name = "storage";
      dc->vmsd = &vmstate_usb_msd;
  }
Simple merge
@@@ -179,11 -176,6 +176,12 @@@ typedef struct CPUWatchpoint 
      sigjmp_buf jmp_env;                                                 \
      int exception_index;                                                \
                                                                          \
 +    /* for hax */                                                       \
 +    int hax_vcpu_dirty;                                                 \
 +    struct hax_vcpu_state *hax_vcpu;                                    \
 +                                                                        \
 +    CPUArchState *next_cpu; /* next CPU sharing TB cache */                 \
++
      /* user data */                                                     \
      void *opaque;                                                       \
                                                                          \
Simple merge
Simple merge
Simple merge
@@@ -249,21 -270,15 +270,23 @@@ int kvm_check_extension(KVMState *s, un
  
  uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
                                        uint32_t index, int reg);
- void kvm_cpu_synchronize_state(CPUArchState *env);
++
+ void kvm_cpu_synchronize_state(CPUState *cpu);
 +#ifdef CONFIG_HAX
- void hax_cpu_synchronize_state(CPUArchState *env);
++void hax_cpu_synchronize_state(CPUState *cpu);
 +#endif
 +
  /* generic hooks - to be moved/refactored once there are more users */
  
- static inline void cpu_synchronize_state(CPUArchState *env)
+ static inline void cpu_synchronize_state(CPUState *cpu)
  {
      if (kvm_enabled()) {
-         kvm_cpu_synchronize_state(env);
+         kvm_cpu_synchronize_state(cpu);
      }
-     hax_cpu_synchronize_state(env);
 +#ifdef CONFIG_HAX
++    hax_cpu_synchronize_state(cpu);
 +#endif
  }
  
  #if !defined(CONFIG_USER_ONLY)
@@@ -109,10 -103,6 +109,9 @@@ typedef enum 
  
  extern int vga_interface_type;
  #define xenfb_enabled (vga_interface_type == VGA_XENFB)
- #define qxl_enabled (vga_interface_type == VGA_QXL)
 +#ifdef CONFIG_MARU
 +#define maru_vga_enabled (vga_interface_type == VGA_MARU)
 +#endif
  
  extern int graphic_width;
  extern int graphic_height;
Simple merge
diff --cc main-loop.c
Simple merge
Simple merge
diff --cc qemu-char.c
@@@ -3516,11 -3557,8 +3572,11 @@@ QemuOptsList qemu_chardev_opts = 
  static CharDriverState *qmp_chardev_open_file(ChardevFile *file, Error **errp)
  {
      HANDLE out;
 +#ifdef CONFIG_MARU
 +    int open_flags, ret;
 +#endif
  
-     if (file->in) {
+     if (file->has_in) {
          error_setg(errp, "input file not supported");
          return NULL;
      }
diff --cc qemu-options.hx
Simple merge
diff --cc qom/object.c
@@@ -688,16 -688,10 +688,15 @@@ void object_ref(Object *obj
  
  void object_unref(Object *obj)
  {
 +// WA for avoid QOM bug related with qbus_create_inplace()... see hw/qdev.c
 +    if(obj->ref == 0) { // Object already finalized...
 +         return;
 +    }
 +//
      g_assert(obj->ref > 0);
-     obj->ref--;
  
      /* parent always holds a reference to its children */
-     if (obj->ref == 0) {
+     if (atomic_fetch_dec(&obj->ref) == 1) {
          object_finalize(obj);
      }
  }
index 7ef9063,0000000..e44e2c0
mode 100644,000000..100644
--- /dev/null
@@@ -1,1228 -1,0 +1,1228 @@@
- void hax_cpu_synchronize_state(CPUArchState *env)
 +/*
 + * QEMU KVM support
 + *
 + * Copyright IBM, Corp. 2008
 + *           Red Hat, Inc. 2008
 + *
 + * Authors:
 + *  Anthony Liguori   <aliguori@us.ibm.com>
 + *  Glauber Costa     <gcosta@redhat.com>
 + *
 + * Copyright (c) 2011 Intel Corporation
 + *  Written by:
 + *  Jiang Yunhong<yunhong.jiang@intel.com>
 + *  Xin Xiaohui<xiaohui.xin@intel.com>
 + *  Zhang Xiantao<xiantao.zhang@intel.com>
 + *
 + * This work is licensed under the terms of the GNU GPL, version 2 or later.
 + * See the COPYING file in the top-level directory.
 + *
 + */
 +
 +/*
 + * HAX common code for both windows and darwin
 + * some code from KVM side
 + */
 +
 +#include "hax-i386.h"
 +
 +#define HAX_EMUL_ONE    0x1
 +#define HAX_EMUL_REAL   0x2
 +#define HAX_EMUL_HLT    0x4
 +#define HAX_EMUL_EXITLOOP    0x5
 +
 +#define HAX_EMULATE_STATE_MMIO  0x1
 +#define HAX_EMULATE_STATE_REAL  0x2
 +#define HAX_EMULATE_STATE_NONE  0x3
 +#define HAX_EMULATE_STATE_INITIAL       0x4
 +
 +struct hax_state hax_global;
 +int ret_hax_init = 0;
 +static int hax_disabled = 1;
 +
 +int hax_support = -1;
 +
 +/* Called after hax_init */
 +int hax_enabled(void)
 +{
 +    return (!hax_disabled && hax_support);
 +}
 +
 +void hax_disable(int disable)
 +{
 +      hax_disabled = disable;
 +}
 +
 +/* Currently non-PG modes are emulated by QEMU */
 +int hax_vcpu_emulation_mode(CPUArchState *env)
 +{
 +    return !(env->cr[0] & CR0_PG_MASK);
 +}
 +
 +static int hax_prepare_emulation(CPUArchState *env)
 +{
 +    /* Flush all emulation states */
 +    tlb_flush(env, 1);
 +    tb_flush(env);
 +    /* Sync the vcpu state from hax kernel module */
 +    hax_vcpu_sync_state(env, 0);
 +    return 0;
 +}
 +
 +/*
 + * Check whether to break the translation block loop
 + * break tbloop after one MMIO emulation, or after finish emulation mode
 + */
 +static int hax_stop_tbloop(CPUArchState *env)
 +{
 +      switch (env->hax_vcpu->emulation_state)
 +      {
 +      case HAX_EMULATE_STATE_MMIO:
 +              if (env->hax_vcpu->resync) {
 +                      hax_prepare_emulation(env);     
 +                      env->hax_vcpu->resync = 0;
 +                      return 0;
 +              }
 +              return 1;
 +              break;
 +      case HAX_EMULATE_STATE_INITIAL:
 +        case HAX_EMULATE_STATE_REAL:
 +              if (!hax_vcpu_emulation_mode(env))
 +                      return 1;
 +            break;
 +        default:
 +            dprint("Invalid emulation state in hax_sto_tbloop state %x\n",
 +              env->hax_vcpu->emulation_state);
 +            break;
 +    }
 +
 +    return 0;
 +}
 +
 +int hax_stop_emulation(CPUArchState *env)
 +{
 +    if (hax_stop_tbloop(env))
 +    {
 +        env->hax_vcpu->emulation_state =  HAX_EMULATE_STATE_NONE;
 +        /*
 +         * QEMU emulation changes vcpu state,
 +         * Sync the vcpu state to HAX kernel module
 +         */
 +        hax_vcpu_sync_state(env, 1);
 +        return 1;
 +    }
 +
 +    return 0;
 +}
 +
 +int hax_stop_translate(CPUArchState *env)
 +{
 +    struct hax_vcpu_state *vstate;
 +
 +    vstate = env->hax_vcpu;
 +    assert(vstate->emulation_state);
 +    if (vstate->emulation_state == HAX_EMULATE_STATE_MMIO )
 +        return 1;
 +
 +    return 0;
 +}
 +
 +int valid_hax_tunnel_size(uint16_t size)
 +{
 +    return size >= sizeof(struct hax_tunnel);
 +}
 +
 +hax_fd hax_vcpu_get_fd(CPUArchState *env)
 +{
 +    struct hax_vcpu_state *vcpu = env->hax_vcpu;
 +    if (!vcpu)
 +        return HAX_INVALID_FD;
 +    return vcpu->fd;
 +}
 +
 +/* Current version */
 +uint32_t hax_cur_version = 0x1;
 +/* Least HAX kernel version */
 +uint32_t hax_lest_version = 0x1;
 +
 +static int hax_get_capability(struct hax_state *hax)
 +{
 +      int ret;
 +      struct hax_capabilityinfo capinfo, *cap = &capinfo;
 +
 +      ret = hax_capability(hax, cap);
 +      if (ret)
 +              return ret;
 +
 +      if ( ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) ==
 +            HAX_CAP_STATUS_NOTWORKING ))
 +      {
 +              if (cap->winfo & HAX_CAP_FAILREASON_VT)
 +                      dprint("VTX feature is not enabled. which will cause HAX driver not working.\n");
 +              else if (cap->winfo & HAX_CAP_FAILREASON_NX)
 +                      dprint("NX feature is not enabled, which will cause HAX driver not working.\n");
 +              return -ENXIO;
 +      }
 +
 +      if (cap->wstatus & HAX_CAP_MEMQUOTA)
 +      {
 +              if (cap->mem_quota < hax->mem_quota)
 +              {
 +                      dprint("The memory needed by this VM exceeds the driver limit.\n");
 +                      return -ENOSPC;
 +              }
 +      }
 +      return 0;
 +}
 +
 +static int hax_version_support(struct hax_state *hax)
 +{
 +    int ret;
 +    struct hax_module_version version;
 +
 +    ret = hax_mod_version(hax, &version);
 +    if (ret < 0)
 +        return 0;
 +
 +    if ( (hax_lest_version > version.cur_version) ||
 +         (hax_cur_version < version.compat_version) )
 +        return 0;
 +
 +    return 1;
 +}
 +
 +int hax_vcpu_create(int id)
 +{
 +    struct hax_vcpu_state *vcpu = NULL;
 +    int ret;
 +
 +    if (!hax_global.vm)
 +    {
 +        dprint("vcpu %x created failed, vm is null\n", id);
 +        return -1;
 +    }
 +
 +    if (hax_global.vm->vcpus[id])
 +    {
 +        dprint("vcpu %x allocated already\n", id);
 +        return 0;
 +    }
 +
 +    vcpu = g_malloc(sizeof(struct hax_vcpu_state));
 +    if (!vcpu)
 +    {
 +        dprint("Failed to alloc vcpu state\n");
 +        return -ENOMEM;
 +    }
 +
 +    memset(vcpu, 0, sizeof(struct hax_vcpu_state));
 +
 +    ret = hax_host_create_vcpu(hax_global.vm->fd, id);
 +    if (ret)
 +    {
 +        dprint("Failed to create vcpu %x\n", id);
 +        goto error;
 +    }
 +
 +    vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
 +    if (hax_invalid_fd(vcpu->fd))
 +    {
 +        dprint("Failed to open the vcpu\n");
 +        ret = -ENODEV;
 +        goto error;
 +    }
 +
 +    hax_global.vm->vcpus[id] = vcpu;
 +
 +    ret = hax_host_setup_vcpu_channel(vcpu);
 +    if (ret)
 +    {
 +        dprint("Invalid hax tunnel size \n");
 +        ret = -EINVAL;
 +        goto error;
 +    }
 +    return 0;
 +
 +error:
 +    /* vcpu and tunnel will be closed automatically */
 +    if (vcpu && !hax_invalid_fd(vcpu->fd))
 +        hax_close_fd(vcpu->fd);
 +
 +    hax_global.vm->vcpus[id] = NULL;
 +    g_free(vcpu);
 +    return -1;
 +}
 +
 +int hax_vcpu_destroy(CPUArchState *env)
 +{
 +    struct hax_vcpu_state *vcpu = env->hax_vcpu;
 +
 +    if (!hax_global.vm)
 +    {
 +        dprint("vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
 +        return -1;
 +    }
 +
 +    if (!vcpu)
 +        return 0;
 +
 +    /*
 +     * 1. The hax_tunnel is also destroied when vcpu destroy
 +     * 2. close fd will cause hax module vcpu be cleaned
 +     */
 +    hax_close_fd(vcpu->fd);
 +    hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
 +    g_free(vcpu);
 +    return 0;
 +}
 +
 +int hax_init_vcpu(CPUArchState *env)
 +{
 +    int ret;
 +    CPUState *cpu = ENV_GET_CPU(env);
 +
 +    ret = hax_vcpu_create(cpu->cpu_index);
 +    if (ret < 0)
 +    {
 +        dprint("Failed to create HAX vcpu\n");
 +        exit(-1);
 +    }
 +
 +    env->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
 +    env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL;
 +    env->hax_vcpu_dirty = 1;
 +    qemu_register_reset(hax_reset_vcpu_state, env);
 +
 +    return ret;
 +}
 +
 +struct hax_vm *hax_vm_create(struct hax_state *hax)
 +{
 +    struct hax_vm *vm;
 +    int vm_id = 0, ret;
 +    char *vm_name = NULL;
 +
 +    if (hax_invalid_fd(hax->fd))
 +        return NULL;
 +
 +    if (hax->vm)
 +        return hax->vm;
 +
 +    vm = g_malloc(sizeof(struct hax_vm));
 +    if (!vm)
 +        return NULL;
 +    memset(vm, 0, sizeof(struct hax_vm));
 +    ret = hax_host_create_vm(hax, &vm_id);
 +    if (ret) {
 +        dprint("Failed to create vm %x\n", ret);
 +        goto error;
 +    }
 +    vm->id = vm_id;
 +    vm->fd = hax_host_open_vm(hax, vm_id);
 +    if (hax_invalid_fd(vm->fd))
 +    {
 +        dprint("Open the vm devcie error:%s\n", vm_name);
 +        goto error;
 +    }
 +
 +    hax->vm = vm;
 +    dprint("End of VM create, id %d\n", vm->id);
 +    return vm;
 +
 +error:
 +    g_free(vm);
 +    hax->vm = NULL;
 +    return NULL;
 +}
 +
 +int hax_vm_destroy(struct hax_vm *vm)
 +{
 +    int i;
 +
 +    for (i = 0; i < HAX_MAX_VCPU; i++)
 +        if (vm->vcpus[i])
 +        {
 +            dprint("VCPU should be cleaned before vm clean\n");
 +            return -1;
 +        }
 +    hax_close_fd(vm->fd);
 +    g_free(vm);
 +    hax_global.vm = NULL;
 +    return 0;
 +}
 +
 +static void
 +hax_region_add(MemoryListener *listener, MemoryRegionSection *section)
 +{
 +      hax_set_phys_mem(section);
 +}
 +
 +static void
 +hax_region_del(MemoryListener *listener, MemoryRegionSection *section)
 +{
 +      hax_set_phys_mem(section);
 +}
 +
 +
 +/* currently we fake the dirty bitmap sync, always dirty */
 +static void hax_log_sync(MemoryListener *listener, MemoryRegionSection *section)
 +{
 +    MemoryRegion *mr = section->mr;
 +    unsigned long c;
 +    unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
 +                      HOST_LONG_BITS;
 +    unsigned long bitmap[len];
 +    int i, j;
 +
 +    for (i = 0; i < len; i++) {
 +          bitmap[i] = 1;
 +          c = leul_to_cpu(bitmap[i]);
 +          do {
 +              j = ffsl(c) - 1;
 +              c &= ~(1ul << j);
 +            memory_region_set_dirty(mr, (i * HOST_LONG_BITS + j) *
 +                      TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
 +          } while (c != 0);
 +    }
 +}
 +
 +static void hax_log_global_start(struct MemoryListener *listener)
 +{
 +}
 +
 +static void hax_log_global_stop(struct MemoryListener *listener)
 +{
 +}
 +
 +static void hax_log_start(MemoryListener *listener,
 +                           MemoryRegionSection *section)
 +{
 +}
 +
 +static void hax_log_stop(MemoryListener *listener,
 +                          MemoryRegionSection *section)
 +{
 +}
 +
 +static void hax_begin(MemoryListener *listener)
 +{
 +}
 +
 +static void hax_commit(MemoryListener *listener)
 +{
 +}
 +
 +static void hax_region_nop(MemoryListener *listener,
 +                      MemoryRegionSection *section)
 +{
 +}
 +
 +static MemoryListener hax_memory_listener = {
 +    .begin = hax_begin,
 +    .commit = hax_commit,
 +    .region_add = hax_region_add,
 +    .region_del = hax_region_del,
 +    .region_nop = hax_region_nop,
 +    .log_start = hax_log_start,
 +    .log_stop = hax_log_stop,
 +    .log_sync = hax_log_sync,
 +    .log_global_start = hax_log_global_start,
 +    .log_global_stop = hax_log_global_stop,
 +};
 +
 +static void hax_handle_interrupt(CPUArchState *env, int mask)
 +{
 +    CPUState *cpu = ENV_GET_CPU(env);
 +    cpu->interrupt_request |= mask;
 +
 +    if (!qemu_cpu_is_self(env)) {
 +        qemu_cpu_kick(env);
 +    }
 +}
 +
 +int hax_pre_init(uint64_t ram_size)
 +{
 +      struct hax_state *hax = NULL;
 +
 +      dprint("hax_disabled %d\n", hax_disabled);
 +      if (hax_disabled)
 +              return 0;
 +      hax = &hax_global;
 +      memset(hax, 0, sizeof(struct hax_state));
 +      hax->mem_quota = ram_size;
 +      dprint("ram_size %lx\n", ram_size);
 +      return 0;
 +}
 +
 +static int hax_init(void)
 +{
 +    struct hax_state *hax = NULL;
 +    int ret;
 +
 +    hax_support = 0;
 +
 +    hax = &hax_global;
 +
 +
 +    hax->fd = hax_mod_open();
 +    if (hax_invalid_fd(hax->fd))
 +    {
 +        hax->fd = 0;
 +        ret = -ENODEV;
 +        goto error;
 +    }
 +
 +    ret = hax_get_capability(hax);
 +
 +    if (ret)
 +    {
 +      if (ret != -ENOSPC)
 +          ret = -EINVAL;
 +      goto error;
 +    }
 +
 +    if (!hax_version_support(hax))
 +    {
 +        dprint("Incompat Hax version. Qemu current version %x ", hax_cur_version );
 +        dprint("requires least HAX version %x\n", hax_lest_version);
 +        ret = -EINVAL;
 +        goto error;
 +    }
 +
 +    hax->vm = hax_vm_create(hax);
 +    if (!hax->vm)
 +    {
 +        dprint("Failed to create HAX VM\n");
 +        ret = -EINVAL;
 +        goto error;
 +    }
 +
 +    memory_listener_register(&hax_memory_listener, NULL);
 +
 +    hax_support = 1;
 +
 +    return ret;
 +error:
 +    if (hax->vm)
 +        hax_vm_destroy(hax->vm);
 +    if (hax->fd)
 +        hax_mod_close(hax);
 +
 +    return ret;
 +}
 +
 +int hax_accel_init(void)
 +{
 +      if (hax_disabled) {
 +              dprint("HAX is disabled and emulator runs in emulation mode.\n");
 +              return 0;
 +      }
 +
 +      ret_hax_init = hax_init();
 +      if (ret_hax_init && (ret_hax_init != -ENOSPC)) {
 +              dprint("No accelerator found.\n");
 +          return ret_hax_init;
 +      } else {
 +              dprint("HAX is %s and emulator runs in %s mode.\n",
 +              !ret_hax_init ? "working" : "not working",
 +              !ret_hax_init ? "fast virt" : "emulation");
 +              return 0;
 +      }
 +}
 +
 +int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, int direction,
 +  int size, int count, void *buffer)
 +{
 +    uint8_t *ptr;
 +    int i;
 +
 +    if (!df)
 +        ptr = (uint8_t *)buffer;
 +    else
 +        ptr = buffer + size * count - size;
 +    for (i = 0; i < count; i++)
 +    {
 +        if (direction == HAX_EXIT_IO_IN) {
 +            switch (size) {
 +                case 1:
 +                    stb_p(ptr, cpu_inb(port));
 +                    break;
 +                case 2:
 +                    stw_p(ptr, cpu_inw(port));
 +                    break;
 +                case 4:
 +                    stl_p(ptr, cpu_inl(port));
 +                    break;
 +            }
 +        } else {
 +            switch (size) {
 +                case 1:
 +                    cpu_outb(port, ldub_p(ptr));
 +                    break;
 +                case 2:
 +                    cpu_outw(port, lduw_p(ptr));
 +                    break;
 +                case 4:
 +                    cpu_outl(port, ldl_p(ptr));
 +                    break;
 +            }
 +        }
 +        if (!df)
 +            ptr += size;
 +        else
 +            ptr -= size;
 +    }
 +
 +    return 0;
 +}
 +
 +static int hax_vcpu_interrupt(CPUArchState *env)
 +{
 +    struct hax_vcpu_state *vcpu = env->hax_vcpu;
 +    struct hax_tunnel *ht = vcpu->tunnel;
 +    CPUState *cpu = ENV_GET_CPU(env);
 +
 +    /*
 +     * Try to inject an interrupt if the guest can accept it
 +     * Unlike KVM, HAX kernel check for the eflags, instead of qemu
 +     */
 +    if (ht->ready_for_interrupt_injection &&
 +      (cpu->interrupt_request & CPU_INTERRUPT_HARD))
 +    {
 +        int irq;
 +
 +        cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
 +        irq = cpu_get_pic_interrupt(env);
 +        if (irq >= 0) {
 +            hax_inject_interrupt(env, irq);
 +        }
 +    }
 +
 +    /* If we have an interrupt but the guest is not ready to receive an
 +     * interrupt, request an interrupt window exit.  This will
 +     * cause a return to userspace as soon as the guest is ready to
 +     * receive interrupts. */
 +    if ((cpu->interrupt_request & CPU_INTERRUPT_HARD))
 +        ht->request_interrupt_window = 1;
 +    else
 +        ht->request_interrupt_window = 0;
 +    return 0;
 +}
 +
 +void hax_raise_event(CPUArchState *env)
 +{
 +    struct hax_vcpu_state *vcpu = env->hax_vcpu;
 +
 +    if (!vcpu)
 +        return;
 +    vcpu->tunnel->user_event_pending = 1;
 +}
 +
 +/*
 + * Ask hax kernel module to run the CPU for us till:
 + * 1. Guest crash or shutdown
 + * 2. Need QEMU's emulation like guest execute MMIO instruction or guest
 + *    enter emulation mode (non-PG mode)
 + * 3. Guest execute HLT
 + * 4. Qemu have Signal/event pending
 + * 5. An unknown VMX exit happens
 + */
 +extern void qemu_system_reset_request(void);
 +static int hax_vcpu_hax_exec(CPUArchState *env)
 +{
 +    int ret = 0;
 +    struct hax_vcpu_state *vcpu = env->hax_vcpu;
 +    struct hax_tunnel *ht = vcpu->tunnel;
 +    CPUState *cpu = ENV_GET_CPU(env);
 +
 +    if (hax_vcpu_emulation_mode(env))
 +    {
 +        dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
 +        return  HAX_EMUL_EXITLOOP;
 +    }
 +
 +    
 +    //hax_cpu_synchronize_state(env);
 +
 +    do {
 +        int hax_ret;
 +
 +      
 +        if (cpu->exit_request) {
 +            ret = HAX_EMUL_EXITLOOP ;
 +            break;
 +        }
 +
 +#if 0
 +      if (env->hax_vcpu_dirty) {
 +              hax_vcpu_sync_state(env, 1);
 +              env->hax_vcpu_dirty = 0;
 +      }
 +#endif
 +
 +        hax_vcpu_interrupt(env);
 +
 +        hax_ret = hax_vcpu_run(vcpu);
 +
 +        /* Simply continue the vcpu_run if system call interrupted */
 +        if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
 +            dprint("io window interrupted\n");
 +            continue;
 +        }
 +
 +        if (hax_ret < 0)
 +        {
 +            dprint("vcpu run failed for vcpu  %x\n", vcpu->vcpu_id);
 +            abort();
 +        }
 +        switch (ht->_exit_status)
 +        {
 +            case HAX_EXIT_IO:
 +                {
 +                      ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
 +                      ht->pio._direction,
 +                      ht->pio._size, ht->pio._count, vcpu->iobuf);
 +                }
 +                break;
 +            case HAX_EXIT_MMIO:
 +                ret = HAX_EMUL_ONE;
 +                break;
 +            case HAX_EXIT_REAL:
 +                ret = HAX_EMUL_REAL;
 +                break;
 +                /* Guest state changed, currently only for shutdown */
 +            case HAX_EXIT_STATECHANGE:
 +              dprint("VCPU shutdown request\n");
 +                qemu_system_reset_request();
 +                hax_prepare_emulation(env);
 +                cpu_dump_state(env, stderr, fprintf, 0);
 +                ret = HAX_EMUL_EXITLOOP;
 +                break;
 +            case HAX_EXIT_UNKNOWN_VMEXIT:
 +                dprint("Unknown VMX exit %x from guest\n", ht->_exit_reason);
 +                qemu_system_reset_request();
 +                hax_prepare_emulation(env);
 +                cpu_dump_state(env, stderr, fprintf, 0);
 +                ret = HAX_EMUL_EXITLOOP;
 +                break;
 +            case HAX_EXIT_HLT:
 +                if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
 +                  !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
 +                    /* hlt instruction with interrupt disabled is shutdown */
 +                    env->eflags |= IF_MASK;
 +                    cpu->halted = 1;
 +                    env->exception_index = EXCP_HLT;
 +                    ret = HAX_EMUL_HLT;
 +                }
 +                break;
 +                /* these situation will continue to hax module */
 +            case HAX_EXIT_INTERRUPT:
 +            case HAX_EXIT_PAUSED:
 +                break;
 +            default:
 +                dprint("Unknow exit %x from hax\n", ht->_exit_status);
 +                qemu_system_reset_request();
 +                hax_prepare_emulation(env);
 +                cpu_dump_state(env, stderr, fprintf, 0);
 +                ret = HAX_EMUL_EXITLOOP;
 +                break;
 +        }
 +    }while (!ret);
 +
 +    if (cpu->exit_request) {
 +        cpu->exit_request = 0;
 +        env->exception_index = EXCP_INTERRUPT;
 +    }
 +    return ret;
 +}
 +
 +static void do_hax_cpu_synchronize_state(void *_env)
 +{
 +      CPUArchState *env = _env;
 +      if (!env->hax_vcpu_dirty) {
 +              hax_vcpu_sync_state(env, 0);
 +              env->hax_vcpu_dirty = 1;
 +      }
 +}
 +
-       if (!env->hax_vcpu_dirty) {
-               run_on_cpu(env, do_hax_cpu_synchronize_state, env);
++void hax_cpu_synchronize_state(CPUState *cpu)
 +{
++      if (!cpu->env.hax_vcpu_dirty) {
++              run_on_cpu(cpu, do_hax_cpu_synchronize_state, cpu);
 +      }
 +}
 +
 +void hax_cpu_synchronize_post_reset(CPUArchState *env)
 +{
 +      hax_vcpu_sync_state(env, 1);
 +      env->hax_vcpu_dirty = 0;
 +}
 +
 +void hax_cpu_synchronize_post_init(CPUArchState *env)
 +{
 +      hax_vcpu_sync_state(env, 1);
 +      env->hax_vcpu_dirty = 0;
 +}
 +
 +/*
 + * return 1 when need emulate, 0 when need exit loop
 + */
 +int hax_vcpu_exec(CPUArchState *env)
 +{
 +    int next = 0, ret = 0;
 +    struct hax_vcpu_state *vcpu;
 +
 +    if (env->hax_vcpu->emulation_state != HAX_EMULATE_STATE_NONE)
 +        return 1;
 +
 +    vcpu = env->hax_vcpu;
 +    next = hax_vcpu_hax_exec(env);
 +    switch (next)
 +    {
 +        case HAX_EMUL_ONE:
 +            ret = 1;
 +            env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_MMIO;
 +            hax_prepare_emulation(env);
 +            break;
 +        case HAX_EMUL_REAL:
 +            ret = 1;
 +            env->hax_vcpu->emulation_state =
 +              HAX_EMULATE_STATE_REAL;
 +            hax_prepare_emulation(env);
 +            break;
 +        case HAX_EMUL_HLT:
 +        case HAX_EMUL_EXITLOOP:
 +            break;
 +        default:
 +            dprint("Unknown hax vcpu exec return %x\n", next);
 +            abort();
 +    }
 +
 +    return ret;
 +}
 +
 +#define HAX_RAM_INFO_ROM 0x1
 +
 +static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
 +{
 +    memset(lhs, 0, sizeof(struct segment_desc_t ));
 +    lhs->selector = rhs->selector;
 +    lhs->base = rhs->base;
 +    lhs->limit = rhs->limit;
 +    lhs->type = 3;
 +    lhs->present = 1;
 +    lhs->dpl = 3;
 +    lhs->operand_size = 0;
 +    lhs->desc = 1;
 +    lhs->long_mode = 0;
 +    lhs->granularity = 0;
 +    lhs->available = 0;
 +}
 +
 +static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
 +{
 +    lhs->selector = rhs->selector;
 +    lhs->base = rhs->base;
 +    lhs->limit = rhs->limit;
 +    lhs->flags =
 +      (rhs->type << DESC_TYPE_SHIFT)
 +      | (rhs->present * DESC_P_MASK)
 +      | (rhs->dpl << DESC_DPL_SHIFT)
 +      | (rhs->operand_size << DESC_B_SHIFT)
 +      | (rhs->desc * DESC_S_MASK)
 +      | (rhs->long_mode << DESC_L_SHIFT)
 +      | (rhs->granularity * DESC_G_MASK)
 +      | (rhs->available * DESC_AVL_MASK);
 +}
 +
 +static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
 +{
 +    unsigned flags = rhs->flags;
 +
 +    memset(lhs, 0, sizeof(struct segment_desc_t));
 +    lhs->selector = rhs->selector;
 +    lhs->base = rhs->base;
 +    lhs->limit = rhs->limit;
 +    lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
 +    lhs->present = (flags & DESC_P_MASK) != 0;
 +    lhs->dpl = rhs->selector & 3;
 +    lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
 +    lhs->desc = (flags & DESC_S_MASK) != 0;
 +    lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
 +    lhs->granularity = (flags & DESC_G_MASK) != 0;
 +    lhs->available = (flags & DESC_AVL_MASK) != 0;
 +}
 +
 +static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
 +{
 +    target_ulong reg = *hax_reg;
 +
 +    if (set)
 +        *hax_reg = *qemu_reg;
 +    else
 +        *qemu_reg = reg;
 +}
 +
 +/* The sregs has been synced with HAX kernel already before this call */
 +static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
 +{
 +    get_seg(&env->segs[R_CS], &sregs->_cs);
 +    get_seg(&env->segs[R_DS], &sregs->_ds);
 +    get_seg(&env->segs[R_ES], &sregs->_es);
 +    get_seg(&env->segs[R_FS], &sregs->_fs);
 +    get_seg(&env->segs[R_GS], &sregs->_gs);
 +    get_seg(&env->segs[R_SS], &sregs->_ss);
 +
 +    get_seg(&env->tr, &sregs->_tr);
 +    get_seg(&env->ldt, &sregs->_ldt);
 +    env->idt.limit = sregs->_idt.limit;
 +    env->idt.base = sregs->_idt.base;
 +    env->gdt.limit = sregs->_gdt.limit;
 +    env->gdt.base = sregs->_gdt.base;
 +    return 0;
 +}
 +
 +static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
 +{
 +    if ((env->eflags & VM_MASK)) {
 +        set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
 +        set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
 +        set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
 +        set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
 +        set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
 +        set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
 +    } else {
 +        set_seg(&sregs->_cs, &env->segs[R_CS]);
 +        set_seg(&sregs->_ds, &env->segs[R_DS]);
 +        set_seg(&sregs->_es, &env->segs[R_ES]);
 +        set_seg(&sregs->_fs, &env->segs[R_FS]);
 +        set_seg(&sregs->_gs, &env->segs[R_GS]);
 +        set_seg(&sregs->_ss, &env->segs[R_SS]);
 +
 +        if (env->cr[0] & CR0_PE_MASK) {
 +            /* force ss cpl to cs cpl */
 +            sregs->_ss.selector = (sregs->_ss.selector & ~3) |
 +              (sregs->_cs.selector & 3);
 +            sregs->_ss.dpl = sregs->_ss.selector & 3;
 +        }
 +    }
 +
 +    set_seg(&sregs->_tr, &env->tr);
 +    set_seg(&sregs->_ldt, &env->ldt);
 +    sregs->_idt.limit = env->idt.limit;
 +    sregs->_idt.base = env->idt.base;
 +    sregs->_gdt.limit = env->gdt.limit;
 +    sregs->_gdt.base = env->gdt.base;
 +    return 0;
 +}
 +
 +/*
 + * After get the state from the kernel module, some
 + * qemu emulator state need be updated also
 + */
 +static int hax_setup_qemu_emulator(CPUArchState *env)
 +{
 +
 +#define HFLAG_COPY_MASK ~( \
 +  HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
 +  HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
 +  HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
 +  HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
 +
 +    uint32_t hflags;
 +
 +    hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
 +    hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
 +    hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
 +      (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
 +    hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
 +    hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
 +      (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
 +
 +    if (env->efer & MSR_EFER_LMA) {
 +        hflags |= HF_LMA_MASK;
 +    }
 +
 +    if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
 +        hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
 +    } else {
 +        hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
 +          (DESC_B_SHIFT - HF_CS32_SHIFT);
 +        hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
 +          (DESC_B_SHIFT - HF_SS32_SHIFT);
 +        if (!(env->cr[0] & CR0_PE_MASK) ||
 +          (env->eflags & VM_MASK) ||
 +          !(hflags & HF_CS32_MASK)) {
 +            hflags |= HF_ADDSEG_MASK;
 +        } else {
 +            hflags |= ((env->segs[R_DS].base |
 +                  env->segs[R_ES].base |
 +                  env->segs[R_SS].base) != 0) <<
 +              HF_ADDSEG_SHIFT;
 +        }
 +    }
 +    env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
 +    return 0;
 +}
 +
 +static int hax_sync_vcpu_register(CPUArchState *env, int set)
 +{
 +    struct vcpu_state_t regs;
 +    int ret;
 +    memset(&regs, 0, sizeof(struct vcpu_state_t));
 +
 +    if (!set)
 +    {
 +        ret = hax_sync_vcpu_state(env, &regs, 0);
 +        if (ret < 0)
 +            return -1;
 +    }
 +
 +    /*generic register */
 +    hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
 +    hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
 +    hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
 +    hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
 +    hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
 +    hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
 +    hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
 +    hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
 +
 +    hax_getput_reg(&regs._rflags, &env->eflags, set);
 +    hax_getput_reg(&regs._rip, &env->eip, set);
 +
 +    if (set)
 +    {
 +
 +        regs._cr0 = env->cr[0];
 +        regs._cr2 = env->cr[2];
 +        regs._cr3 = env->cr[3];
 +        regs._cr4 = env->cr[4];
 +        hax_set_segments(env, &regs);
 +    }
 +    else
 +    {
 +        env->cr[0] = regs._cr0;
 +        env->cr[2] = regs._cr2;
 +        env->cr[3] = regs._cr3;
 +        env->cr[4] = regs._cr4;
 +        hax_get_segments(env, &regs);
 +    }
 +
 +    if (set)
 +    {
 +        ret = hax_sync_vcpu_state(env, &regs, 1);
 +        if (ret < 0)
 +            return -1;
 +    }
 +    if (!set)
 +        hax_setup_qemu_emulator(env);
 +    return 0;
 +}
 +
 +static void hax_msr_entry_set(struct vmx_msr *item,
 +  uint32_t index, uint64_t value)
 +{
 +    item->entry = index;
 +    item->value = value;
 +}
 +
 +static int hax_get_msrs(CPUArchState *env)
 +{
 +    struct hax_msr_data md;
 +    struct vmx_msr *msrs = md.entries;
 +    int ret, i, n;
 +
 +    n = 0;
 +    msrs[n++].entry = MSR_IA32_SYSENTER_CS;
 +    msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
 +    msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
 +    msrs[n++].entry = MSR_IA32_TSC;
 +    md.nr_msr = n;
 +    ret = hax_sync_msr(env, &md, 0);
 +    if (ret < 0)
 +        return ret;
 +
 +    for (i = 0; i < md.done; i++) {
 +        switch (msrs[i].entry) {
 +            case MSR_IA32_SYSENTER_CS:
 +                env->sysenter_cs = msrs[i].value;
 +                break;
 +            case MSR_IA32_SYSENTER_ESP:
 +                env->sysenter_esp = msrs[i].value;
 +                break;
 +            case MSR_IA32_SYSENTER_EIP:
 +                env->sysenter_eip = msrs[i].value;
 +                break;
 +            case MSR_IA32_TSC:
 +                env->tsc = msrs[i].value;
 +                break;
 +        }
 +    }
 +
 +    return 0;
 +}
 +
 +static int hax_set_msrs(CPUArchState *env)
 +{
 +    struct hax_msr_data md;
 +    struct vmx_msr *msrs;
 +    msrs = md.entries;
 +    int n = 0;
 +
 +    memset(&md, 0, sizeof(struct hax_msr_data));
 +    hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
 +    hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
 +    hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
 +    hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
 +    md.nr_msr = n;
 +    md.done = 0;
 +
 +    return hax_sync_msr(env, &md, 1);
 +
 +}
 +
 +static int hax_get_fpu(CPUArchState *env)
 +{
 +    struct fx_layout fpu;
 +    int i, ret;
 +
 +    ret = hax_sync_fpu(env, &fpu, 0);
 +    if (ret < 0)
 +        return ret;
 +
 +    env->fpstt = (fpu.fsw >> 11) & 7;
 +    env->fpus = fpu.fsw;
 +    env->fpuc = fpu.fcw;
 +    for (i = 0; i < 8; ++i)
 +        env->fptags[i] = !((fpu.ftw >> i) & 1);
 +    memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
 +
 +    memcpy(env->xmm_regs, fpu.mmx_1, sizeof(fpu.mmx_1));
 +    memcpy((XMMReg *)(env->xmm_regs) + 8, fpu.mmx_2, sizeof(fpu.mmx_2));
 +    env->mxcsr = fpu.mxcsr;
 +
 +    return 0;
 +}
 +
 +static int hax_set_fpu(CPUArchState *env)
 +{
 +    struct fx_layout fpu;
 +    int i;
 +
 +    memset(&fpu, 0, sizeof(fpu));
 +    fpu.fsw = env->fpus & ~(7 << 11);
 +    fpu.fsw |= (env->fpstt & 7) << 11;
 +    fpu.fcw = env->fpuc;
 +
 +    for (i = 0; i < 8; ++i)
 +        fpu.ftw |= (!env->fptags[i]) << i;
 +
 +    memcpy(fpu.st_mm, env->fpregs, sizeof (env->fpregs));
 +    memcpy(fpu.mmx_1, env->xmm_regs, sizeof (fpu.mmx_1));
 +    memcpy(fpu.mmx_2, (XMMReg *)(env->xmm_regs) + 8, sizeof (fpu.mmx_2));
 +
 +    fpu.mxcsr = env->mxcsr;
 +
 +    return hax_sync_fpu(env, &fpu, 1);
 +}
 +
 +int hax_arch_get_registers(CPUArchState *env)
 +{
 +    int ret;
 +
 +    ret = hax_sync_vcpu_register(env, 0);
 +    if (ret < 0)
 +        return ret;
 +
 +    ret = hax_get_fpu(env);
 +    if (ret < 0)
 +        return ret;
 +
 +    ret = hax_get_msrs(env);
 +    if (ret < 0)
 +        return ret;
 +
 +    return 0;
 +}
 +
 +static int hax_arch_set_registers(CPUArchState *env)
 +{
 +    int ret;
 +    ret = hax_sync_vcpu_register(env, 1);
 +
 +    if (ret < 0)
 +    {
 +        dprint("Failed to sync vcpu reg\n");
 +        return ret;
 +    }
 +    ret = hax_set_fpu(env);
 +    if (ret < 0)
 +    {
 +        dprint("FPU failed\n");
 +        return ret;
 +    }
 +    ret = hax_set_msrs(env);
 +    if (ret < 0)
 +    {
 +        dprint("MSR failed\n");
 +        return ret;
 +    }
 +
 +    return 0;
 +}
 +
 +void hax_vcpu_sync_state(CPUArchState *env, int modified)
 +{
 +    if (hax_enabled()) {
 +        if (modified)
 +            hax_arch_set_registers(env);
 +        else
 +            hax_arch_get_registers(env);
 +    }
 +}
 +
 +/*
 + * much simpler than kvm, at least in first stage because:
 + * We don't need consider the device pass-through, we don't need
 + * consider the framebuffer, and we may even remove the bios at all
 + */
 +int hax_sync_vcpus(void)
 +{
 +    if (hax_enabled())
 +    {
 +        CPUArchState *env;
 +
 +        env = first_cpu;
 +        if (!env)
 +            return 0;
 +
 +        for (; env != NULL; env = env->next_cpu) {
 +            int ret;
 +
 +            ret = hax_arch_set_registers(env);
 +            if (ret < 0)
 +            {
 +                dprint("Failed to sync HAX vcpu context\n");
 +                exit(1);
 +            }
 +        }
 +    }
 +
 +    return 0;
 +}
 +void hax_reset_vcpu_state(void *opaque)
 +{
 +      CPUArchState *env = opaque;
 +    for (env = first_cpu; env != NULL; env = env->next_cpu)
 +       {
 +               dprint("*********ReSet hax_vcpu->emulation_state \n");
 +               env->hax_vcpu->emulation_state  = HAX_EMULATE_STATE_INITIAL;
 +               env->hax_vcpu->tunnel->user_event_pending = 0;
 +               env->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
 +
 +       }
 +
 +}
 +
 +
Simple merge
Simple merge
diff --cc tcg/tcg.c
+++ b/tcg/tcg.c
  /* Forward declarations for functions declared in tcg-target.c and used here. */
  static void tcg_target_init(TCGContext *s);
  static void tcg_target_qemu_prologue(TCGContext *s);
 -static void patch_reloc(uint8_t *code_ptr, int type, 
 +static void patch_reloc(uint8_t *code_ptr, int type,
                          tcg_target_long value, tcg_target_long addend);
  
+ /* The CIE and FDE header definitions will be common to all hosts.  */
+ typedef struct {
+     uint32_t len __attribute__((aligned((sizeof(void *)))));
+     uint32_t id;
+     uint8_t version;
+     char augmentation[1];
+     uint8_t code_align;
+     uint8_t data_align;
+     uint8_t return_column;
+ } DebugFrameCIE;
+ typedef struct QEMU_PACKED {
+     uint32_t len __attribute__((aligned((sizeof(void *)))));
+     uint32_t cie_offset;
+     tcg_target_long func_start;
+     tcg_target_long func_len;
+ } DebugFrameFDEHeader;
  static void tcg_register_jit_int(void *buf, size_t size,
                                   void *debug_frame, size_t debug_frame_size)
      __attribute__((unused));
diff --cc tcg/tcg.h
Simple merge
diff --cc ui/input.c
  #include "qapi/error.h"
  #include "qmp-commands.h"
  #include "qapi-types.h"
+ #include "ui/keymaps.h"
 +//#include "tizen/src/debug_ch.h"
 +
 +//MULTI_DEBUG_CHANNEL(tizen, input);
 +
  struct QEMUPutMouseEntry {
      QEMUPutMouseEvent *qemu_put_mouse_event;
      void *qemu_put_mouse_event_opaque;
Simple merge
  #include "qemu/main-loop.h"
  #include "trace.h"
  #include "qemu/sockets.h"
 -
+ /* this must come after including "trace.h" */
+ #include <shlobj.h>
  
 +#ifdef CONFIG_MARU
 +typedef BOOL (WINAPI *LPFN_ISWOW64PROCESS) (HANDLE, PBOOL);
 +LPFN_ISWOW64PROCESS fnIsWow64Process;
 +
 +int is_wow64(void)
 +{
 +    int result = 0;
 +
 +    /* IsWow64Process is not available on all supported versions of Windows.
 +       Use GetModuleHandle to get a handle to the DLL that contains the function
 +       and GetProcAddress to get a pointer to the function if available. */
 +
 +    fnIsWow64Process = (LPFN_ISWOW64PROCESS) GetProcAddress(
 +        GetModuleHandle(TEXT("kernel32")),"IsWow64Process");
 +
 +    if (NULL != fnIsWow64Process) {
 +        if (!fnIsWow64Process(GetCurrentProcess(),&result)) {
 +            // handle error
 +            fprintf(stderr, "Cannot find 'IsWow64Process'\n");
 +        }
 +    }
 +    return result;
 +}
 +
 +bool get_java_path(char** java_path)
 +{
 +    HKEY hKeyNew;
 +    HKEY hKey;
 +    //char strJavaRuntimePath[JAVA_MAX_COMMAND_LENGTH] = {0};
 +    char strChoosenName[JAVA_MAX_COMMAND_LENGTH] = {0};
 +    char strSubKeyName[JAVA_MAX_COMMAND_LENGTH] = {0};
 +    char strJavaHome[JAVA_MAX_COMMAND_LENGTH] = {0};
 +    int index;
 +    DWORD dwSubKeyNameMax = JAVA_MAX_COMMAND_LENGTH;
 +    DWORD dwBufLen = JAVA_MAX_COMMAND_LENGTH;
 +
 +    RegOpenKeyEx(HKEY_LOCAL_MACHINE,
 +                "SOFTWARE\\JavaSoft\\Java Runtime Environment",
 +                0,
 +                KEY_QUERY_VALUE | KEY_ENUMERATE_SUB_KEYS | MY_KEY_WOW64_64KEY,
 +                &hKey);
 +    RegEnumKeyEx(hKey, 0, (LPSTR)strSubKeyName, &dwSubKeyNameMax,
 +                NULL, NULL, NULL, NULL);
 +    strcpy(strChoosenName, strSubKeyName);
 +
 +    index = 1;
 +    while (ERROR_SUCCESS ==
 +            RegEnumKeyEx(hKey, index, (LPSTR)strSubKeyName, &dwSubKeyNameMax,
 +            NULL, NULL, NULL, NULL)) {
 +        if (strcmp(strChoosenName, strSubKeyName) < 0) {
 +            strcpy(strChoosenName, strSubKeyName);
 +        }
 +        index++;
 +    }
 +
 +    RegOpenKeyEx(hKey, strChoosenName, 0,
 +                KEY_QUERY_VALUE | MY_KEY_WOW64_64KEY, &hKeyNew);
 +    RegQueryValueEx(hKeyNew, "JavaHome", NULL,
 +                    NULL, (LPBYTE)strJavaHome, &dwBufLen);
 +    RegCloseKey(hKey);
 +    if (strJavaHome[0] != '\0') {
 +        sprintf(*java_path, "\"%s\\bin\\java\"", strJavaHome);
 +        //strcpy(*java_path, strJavaHome);
 +        //strcat(*java_path, "\\bin\\java");
 +    } else {
 +        return false;
 +    }
 +
 +    return true;
 +}
 +#endif
 +
  void *qemu_oom_check(void *ptr)
  {
 +#ifdef CONFIG_MARU
 +    const char _msg[] = "Failed to allocate memory in qemu.";
 +    char cmd[JAVA_MAX_COMMAND_LENGTH] = { 0, };
 +    char *JAVA_EXEFILE_PATH = NULL;
 +    int len, ret;
 +#endif
 +
      if (ptr == NULL) {
          fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError());
 +#ifdef CONFIG_MARU
 +        JAVA_EXEFILE_PATH = malloc(JAVA_MAX_COMMAND_LENGTH);
 +        if (!JAVA_EXEFILE_PATH) {
 +            // TODO: print error message.
 +            return ptr;
 +        }
 +
 +        memset(JAVA_EXEFILE_PATH, 0, JAVA_MAX_COMMAND_LENGTH);
 +        if (is_wow64()) {
 +            if (!get_java_path(&JAVA_EXEFILE_PATH)) {
 +                strcpy(JAVA_EXEFILE_PATH, "java");
 +            }
 +        } else {
 +            strcpy(JAVA_EXEFILE_PATH, "java");
 +        }
 +        len = strlen(JAVA_EXEFILE_PATH) + strlen(JAVA_EXEOPTION) +
 +                  strlen(JAR_SKINFILE) + strlen(JAVA_SIMPLEMODE_OPTION) +
 +                  strlen(_msg) + 7;
 +        if (len > JAVA_MAX_COMMAND_LENGTH) {
 +            len = JAVA_MAX_COMMAND_LENGTH;
 +        }
 +
 +        snprintf(cmd, len, "%s %s %s %s=\"%s\"",
 +            JAVA_EXEFILE_PATH, JAVA_EXEOPTION, JAR_SKINFILE,
 +            JAVA_SIMPLEMODE_OPTION, _msg);
 +        ret = WinExec(cmd, SW_SHOW);
 +        if (ret < 32) {
 +            // TODO: error handling...
 +        }
 +
 +        /* for 64bit windows */
 +        free(JAVA_EXEFILE_PATH);
 +        JAVA_EXEFILE_PATH=0;
 +#endif
          abort();
      }
      return ptr;
Simple merge
diff --cc vl.c
--- 1/vl.c
--- 2/vl.c
+++ b/vl.c
@@@ -4488,14 -4308,8 +4503,12 @@@ int main(int argc, char **argv, char **
  
      qdev_machine_init();
  
 +#ifdef CONFIG_MARU
 +    // Returned variable points different address from input variable.
 +    kernel_cmdline = prepare_maru_devices(kernel_cmdline);
 +#endif
      QEMUMachineInitArgs args = { .ram_size = ram_size,
-                                  .boot_device = (boot_devices[0] == '\0') ?
-                                                 machine->boot_order :
-                                                 boot_devices,
+                                  .boot_device = boot_order,
                                   .kernel_filename = kernel_filename,
                                   .kernel_cmdline = kernel_cmdline,
                                   .initrd_filename = initrd_filename,
diff --cc xen-all.c
+++ b/xen-all.c
@@@ -161,18 -161,18 +161,18 @@@ static void xen_ram_init(ram_addr_t ram
      ram_addr_t block_len;
  
      block_len = ram_size;
 -    if (ram_size >= HVM_BELOW_4G_RAM_END) {
 +    if (ram_size >= QEMU_BELOW_4G_RAM_END) {
          /* Xen does not allocate the memory continuously, and keep a hole at
 -         * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
 +         * QEMU_BELOW_4G_RAM_END of QEMU_BELOW_4G_MMIO_LENGTH
           */
 -        block_len += HVM_BELOW_4G_MMIO_LENGTH;
 +        block_len += QEMU_BELOW_4G_MMIO_LENGTH;
      }
-     memory_region_init_ram(&ram_memory, "xen.ram", block_len);
+     memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len);
      vmstate_register_ram_global(&ram_memory);
  
 -    if (ram_size >= HVM_BELOW_4G_RAM_END) {
 -        above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
 -        below_4g_mem_size = HVM_BELOW_4G_RAM_END;
 +    if (ram_size >= QEMU_BELOW_4G_RAM_END) {
 +        above_4g_mem_size = ram_size - QEMU_BELOW_4G_RAM_END;
 +        below_4g_mem_size = QEMU_BELOW_4G_RAM_END;
      } else {
          below_4g_mem_size = ram_size;
      }