obj-$(CONFIG_XEN) += xen-all.o xen-mapcache.o
obj-$(CONFIG_NO_XEN) += xen-stub.o
+# HAX support
+ifdef CONFIG_WIN32
+obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-windows.o
+obj-$(CONFIG_NO_HAX) += hax-stub.o
+endif
+ifdef CONFIG_DARWIN
+obj-$(CONFIG_HAX) += target-i386/hax-all.o target-i386/hax-darwin.o
+obj-$(CONFIG_NO_HAX) += hax-stub.o
+endif
+
# Hardware support
- ifeq ($(TARGET_ARCH), sparc64)
+ ifeq ($(TARGET_NAME), sparc64)
obj-y += hw/sparc64/
else
obj-y += hw/$(TARGET_BASE_ARCH)/
return info;
}
+ /* Stub function that's gets run on the vcpu when its brought out of the
+ VM to run inside qemu via async_run_on_cpu()*/
+ static void mig_sleep_cpu(void *opq)
+ {
+ qemu_mutex_unlock_iothread();
+ g_usleep(30*1000);
+ qemu_mutex_lock_iothread();
+ }
+
+ /* To reduce the dirty rate explicitly disallow the VCPUs from spending
+ much time in the VM. The migration thread will try to catchup.
+ Workload will experience a performance drop.
+ */
+ static void mig_throttle_cpu_down(CPUState *cpu, void *data)
+ {
+ async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
+ }
+
+ static void mig_throttle_guest_down(void)
+ {
+ qemu_mutex_lock_iothread();
+ qemu_for_each_cpu(mig_throttle_cpu_down, NULL);
+ qemu_mutex_unlock_iothread();
+ }
+
+ static void check_guest_throttling(void)
+ {
+ static int64_t t0;
+ int64_t t1;
+
+ if (!mig_throttle_on) {
+ return;
+ }
+
+ if (!t0) {
+ t0 = qemu_get_clock_ns(rt_clock);
+ return;
+ }
+
+ t1 = qemu_get_clock_ns(rt_clock);
+
+ /* If it has been more than 40 ms since the last time the guest
+ * was throttled then do it again.
+ */
+ if (40 < (t1-t0)/1000000) {
+ mig_throttle_guest_down();
+ t0 = t1;
+ }
+ }
++
+int hax_available(void)
+{
+#ifdef CONFIG_HAX
+ return 1;
+#else
+ return 0;
+#endif
+}
return ret;
}
if (header.capacity == 0) {
+ int64_t desc_offset = le64_to_cpu(header.desc_offset);
+ uint64_t desc_offset = le64_to_cpu(header.desc_offset);
if (desc_offset) {
return vmdk_open_desc_file(bs, flags, desc_offset << 9);
}
bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
if (ro && copy_on_read) {
- error_report("warning: disabling copy_on_read on readonly drive");
+ error_report("warning: disabling copy_on_read on read-only drive");
}
+ QINCREF(bs_opts);
ret = bdrv_open(dinfo->bdrv, file, bs_opts, bdrv_flags, drv);
- bs_opts = NULL;
if (ret < 0) {
+#ifdef CONFIG_MARU
+ const char _msg[] = "Failed to load disk file from the following path. Check if the file is corrupted or missing.\n\n";
+ char* err_msg = NULL;
+ err_msg = maru_convert_path((char*)_msg, file);
+ start_simple_client(err_msg);
+ if (err_msg) {
+ g_free(err_msg);
+ }
+#endif
+
if (ret == -EMEDIUMTYPE) {
error_report("could not open disk image %s: not in %s format",
- file ?: dinfo->id, drv->format_name);
+ file ?: dinfo->id, drv ? drv->format_name :
+ qdict_get_str(bs_opts, "driver"));
} else {
error_report("could not open disk image %s: %s",
file ?: dinfo->id, strerror(-ret));
vhost_net="no"
vhost_scsi="no"
kvm="no"
+hax="no"
+ rdma=""
gprof="no"
debug_tcg="no"
debug="no"
smartcard_nss=""
libusb=""
usb_redir=""
+opengl=""
+efence="no"
+yagl="no"
+yagl_stats="no"
glx=""
+vigs="no"
zlib="yes"
- guest_agent="yes"
+ guest_agent=""
want_tools="yes"
libiscsi=""
coroutine=""
seccomp=""
+gl="yes"
+
+# for TIZEN-maru
+maru="no"
+shm="no"
+#
glusterfs=""
+ glusterfs_discard="no"
virtio_blk_data_plane=""
gtk=""
gtkabi="2.0"
if [ "$cpu" = "i386" -o "$cpu" = "x86_64" ] ; then
audio_possible_drivers="$audio_possible_drivers fmod"
fi
- libs_qga="-lrt $libs_qga"
- QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers $QEMU_INCLUDES"
++
+# fix linking error on Ubuntu 13.04
++# libs_qga="-lrt $libs_qga"
++# QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers $QEMU_INCLUDES"
+ QEMU_INCLUDES="-I\$(SRC_PATH)/linux-headers -I$(pwd)/linux-headers $QEMU_INCLUDES"
;;
esac
echo " --disable-slirp disable SLIRP userspace network connectivity"
echo " --disable-kvm disable KVM acceleration support"
echo " --enable-kvm enable KVM acceleration support"
+echo " --disable-gl disable GL acceleration support"
+
+echo " --disable-hax disable HAX acceleration support"
+echo " --enable-hax enable HAX acceleration support"
+
+echo " --disable-gl disable GL acceleration support"
+echo " --enable-gl enable GL acceleration support"
+ echo " --disable-rdma disable RDMA-based migration support"
+ echo " --enable-rdma enable RDMA-based migration support"
echo " --enable-tcg-interpreter enable TCG with bytecode interpreter (TCI)"
- echo " --disable-nptl disable usermode NPTL support"
- echo " --enable-nptl enable usermode NPTL support"
echo " --enable-system enable all system emulation targets"
echo " --disable-system disable all system emulation targets"
echo " --enable-user enable supported user emulation targets"
echo "ATTR/XATTR support $attr"
echo "Install blobs $blobs"
echo "KVM support $kvm"
+echo "HAX support $hax"
+echo "GL support $gl"
+ echo "RDMA support $rdma"
echo "TCG interpreter $tcg_interpreter"
echo "fdt support $fdt"
echo "preadv support $preadv"
if test "$trace_default" = "yes"; then
echo "CONFIG_TRACE_DEFAULT=y" >> $config_host_mak
fi
+if test "$hax" = "yes" ; then
+ if test "$mingw32" = "yes" ; then
+ echo "CONFIG_HAX_BACKEND=y" >> $config_host_mak
+ elif test "$darwin" = "yes" ; then
+ echo "CONFIG_HAX_BACKEND=y" >> $config_host_mak
+ else
+ hax="no"
+ fi
+fi
+ if test "$rdma" = "yes" ; then
+ echo "CONFIG_RDMA=y" >> $config_host_mak
+ fi
+
if test "$tcg_interpreter" = "yes"; then
QEMU_INCLUDES="-I\$(SRC_PATH)/tcg/tci $QEMU_INCLUDES"
elif test "$ARCH" = "sparc64" ; then
}
}
+#ifdef CONFIG_HAX
+ if (hax_enabled() && !hax_vcpu_exec(env))
+ longjmp(env->jmp_env, 1);
+#endif
+
next_tb = 0; /* force lookup of first TB */
for(;;) {
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = need_handle_intr_request(cpu);
if (unlikely(interrupt_request)) {
- if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
+ if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
0);
cpu->interrupt_request &= ~CPU_INTERRUPT_SMI;
- do_smm_enter(env);
+#ifdef CONFIG_HAX
+ if (hax_enabled())
+ env->hax_vcpu->resync = 1;
+#endif
+ do_smm_enter(x86_env_get_cpu(env));
next_tb = 0;
} else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
!(env->hflags2 & HF2_NMI_MASK)) {
return true;
}
if (!cpu->halted || qemu_cpu_has_work(cpu) ||
- kvm_async_interrupts_enabled() || hax_enabled()) {
- kvm_halt_in_kernel()) {
++ kvm_halt_in_kernel() || hax_enabled()) {
return false;
}
return true;
#include "hw/qdev.h"
#include "qemu/osdep.h"
#include "sysemu/kvm.h"
+#include "sysemu/hax.h"
+ #include "sysemu/sysemu.h"
#include "hw/xen/xen.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
if (s->ops->init(&s->ctx) < 0) {
fprintf(stderr, "Virtio-9p Failed to initialize fs-driver with id:%s"
" and export path:%s\n", s->fsconf.fsdev_id, s->ctx.fs_root);
-
+#ifdef CONFIG_MARU
+ const char _msg[] = "Failed to find the file sharing path. Check if the path is correct or not.\n\n";
+ char* err_msg = NULL;
+ err_msg = maru_convert_path((char*)_msg, s->ctx.fs_root);
+ maru_register_exit_msg(MARU_EXIT_UNKNOWN, err_msg);
+ if (err_msg) {
+ g_free(err_msg);
+ }
+#endif
+
- return -1;
+ goto out;
}
if (v9fs_init_worker_threads() < 0) {
fprintf(stderr, "worker thread initialization failed\n");
VGACommonState *s = &d->vga;
/* vga + console init */
- vga_common_init(s);
+#ifdef CONFIG_MARU
+ maru_vga_common_init(s);
+#else
- vga_init(s, pci_address_space(dev), pci_address_space_io(dev), true);
+ vga_common_init(s, OBJECT(dev));
+#endif
+ vga_init(s, OBJECT(dev), pci_address_space(dev), pci_address_space_io(dev),
+ true);
s->con = graphic_console_init(DEVICE(dev), s->hw_ops, s);
#include "hw/acpi/acpi.h"
#include "hw/cpu/icc_bus.h"
#include "hw/boards.h"
+ #include "hw/pci/pci_host.h"
+#ifdef CONFIG_MARU
+#include "../../tizen/src/maru_err_table.h"
+#endif
/* debug PC/ISA interrupts */
//#define DEBUG_IRQ
--- /dev/null
+ /*
+ * QEMU PC System Firmware
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2011-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+ #include "sysemu/blockdev.h"
+ #include "qemu/error-report.h"
+ #include "hw/sysbus.h"
+ #include "hw/hw.h"
+ #include "hw/i386/pc.h"
+ #include "hw/boards.h"
+ #include "hw/loader.h"
+ #include "sysemu/sysemu.h"
+ #include "hw/block/flash.h"
+ #include "sysemu/kvm.h"
+
++#ifdef CONFIG_MARU
++#include "../../tizen/src/maru_err_table.h"
++#endif
++
+ #define BIOS_FILENAME "bios.bin"
+
+ typedef struct PcSysFwDevice {
+ SysBusDevice busdev;
+ uint8_t isapc_ram_fw;
+ } PcSysFwDevice;
+
+ static void pc_isa_bios_init(MemoryRegion *rom_memory,
+ MemoryRegion *flash_mem,
+ int ram_size)
+ {
+ int isa_bios_size;
+ MemoryRegion *isa_bios;
+ uint64_t flash_size;
+ void *flash_ptr, *isa_bios_ptr;
+
+ flash_size = memory_region_size(flash_mem);
+
+ /* map the last 128KB of the BIOS in ISA space */
+ isa_bios_size = flash_size;
+ if (isa_bios_size > (128 * 1024)) {
+ isa_bios_size = 128 * 1024;
+ }
+ isa_bios = g_malloc(sizeof(*isa_bios));
+ memory_region_init_ram(isa_bios, NULL, "isa-bios", isa_bios_size);
+ vmstate_register_ram_global(isa_bios);
+ memory_region_add_subregion_overlap(rom_memory,
+ 0x100000 - isa_bios_size,
+ isa_bios,
+ 1);
+
+ /* copy ISA rom image from top of flash memory */
+ flash_ptr = memory_region_get_ram_ptr(flash_mem);
+ isa_bios_ptr = memory_region_get_ram_ptr(isa_bios);
+ memcpy(isa_bios_ptr,
+ ((uint8_t*)flash_ptr) + (flash_size - isa_bios_size),
+ isa_bios_size);
+
+ memory_region_set_readonly(isa_bios, true);
+ }
+
+ static void pc_system_flash_init(MemoryRegion *rom_memory,
+ DriveInfo *pflash_drv)
+ {
+ BlockDriverState *bdrv;
+ int64_t size;
+ hwaddr phys_addr;
+ int sector_bits, sector_size;
+ pflash_t *system_flash;
+ MemoryRegion *flash_mem;
+
+ bdrv = pflash_drv->bdrv;
+ size = bdrv_getlength(pflash_drv->bdrv);
+ sector_bits = 12;
+ sector_size = 1 << sector_bits;
+
+ if ((size % sector_size) != 0) {
+ fprintf(stderr,
+ "qemu: PC system firmware (pflash) must be a multiple of 0x%x\n",
+ sector_size);
+ exit(1);
+ }
+
+ phys_addr = 0x100000000ULL - size;
+ system_flash = pflash_cfi01_register(phys_addr, NULL, "system.flash", size,
+ bdrv, sector_size, size >> sector_bits,
+ 1, 0x0000, 0x0000, 0x0000, 0x0000, 0);
+ flash_mem = pflash_cfi01_get_memory(system_flash);
+
+ pc_isa_bios_init(rom_memory, flash_mem, size);
+ }
+
+ static void old_pc_system_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw)
+ {
+ char *filename;
+ MemoryRegion *bios, *isa_bios;
+ int bios_size, isa_bios_size;
+ int ret;
+
+ /* BIOS load */
+ if (bios_name == NULL) {
+ bios_name = BIOS_FILENAME;
+ }
+ filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
+ if (filename) {
+ bios_size = get_image_size(filename);
+ } else {
+ bios_size = -1;
+ }
+ if (bios_size <= 0 ||
+ (bios_size % 65536) != 0) {
+ goto bios_error;
+ }
+ bios = g_malloc(sizeof(*bios));
+ memory_region_init_ram(bios, NULL, "pc.bios", bios_size);
+ vmstate_register_ram_global(bios);
+ if (!isapc_ram_fw) {
+ memory_region_set_readonly(bios, true);
+ }
+ ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1);
+ if (ret != 0) {
+ bios_error:
+ fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name);
++#ifdef CONFIG_MARU
++ maru_register_exit_msg(MARU_EXIT_BIOS_FILE_EXCEPTION, bios_name);
++#endif
+ exit(1);
+ }
+ if (filename) {
+ g_free(filename);
+ }
+
+ /* map the last 128KB of the BIOS in ISA space */
+ isa_bios_size = bios_size;
+ if (isa_bios_size > (128 * 1024)) {
+ isa_bios_size = 128 * 1024;
+ }
+ isa_bios = g_malloc(sizeof(*isa_bios));
+ memory_region_init_alias(isa_bios, NULL, "isa-bios", bios,
+ bios_size - isa_bios_size, isa_bios_size);
+ memory_region_add_subregion_overlap(rom_memory,
+ 0x100000 - isa_bios_size,
+ isa_bios,
+ 1);
+ if (!isapc_ram_fw) {
+ memory_region_set_readonly(isa_bios, true);
+ }
+
+ /* map all the bios at the top of memory */
+ memory_region_add_subregion(rom_memory,
+ (uint32_t)(-bios_size),
+ bios);
+ }
+
+ void pc_system_firmware_init(MemoryRegion *rom_memory, bool isapc_ram_fw)
+ {
+ DriveInfo *pflash_drv;
+
+ pflash_drv = drive_get(IF_PFLASH, 0, 0);
+
+ if (isapc_ram_fw || pflash_drv == NULL) {
+ /* When a pflash drive is not found, use rom-mode */
+ old_pc_system_rom_init(rom_memory, isapc_ram_fw);
+ return;
+ }
+
+ if (kvm_enabled() && !kvm_readonly_mem_enabled()) {
+ /* Older KVM cannot execute from device memory. So, flash memory
+ * cannot be used unless the readonly memory kvm capability is present. */
+ fprintf(stderr, "qemu: pflash with kvm requires KVM readonly memory support\n");
+ exit(1);
+ }
+
+ pc_system_flash_init(rom_memory, pflash_drv);
+ }
--- /dev/null
+ /*
+ * Deprecated PCI hotplug interface support
+ * This covers the old pci_add / pci_del command, whereas the more general
+ * device_add / device_del commands are now preferred.
+ *
+ * Copyright (c) 2004 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+ #include "hw/hw.h"
+ #include "hw/boards.h"
+ #include "hw/pci/pci.h"
+ #include "net/net.h"
+ #include "hw/i386/pc.h"
+ #include "monitor/monitor.h"
+ #include "hw/scsi/scsi.h"
+ #include "hw/virtio/virtio-blk.h"
+ #include "qemu/config-file.h"
+ #include "sysemu/blockdev.h"
+ #include "qapi/error.h"
+
+ static int pci_read_devaddr(Monitor *mon, const char *addr,
+ int *busp, unsigned *slotp)
+ {
+ int dom;
+
+ /* strip legacy tag */
+ if (!strncmp(addr, "pci_addr=", 9)) {
+ addr += 9;
+ }
+ if (pci_parse_devaddr(addr, &dom, busp, slotp, NULL)) {
+ monitor_printf(mon, "Invalid pci address\n");
+ return -1;
+ }
+ if (dom != 0) {
+ monitor_printf(mon, "Multiple PCI domains not supported, use device_add\n");
+ return -1;
+ }
+ return 0;
+ }
+
+ static PCIDevice *qemu_pci_hot_add_nic(Monitor *mon,
+ const char *devaddr,
+ const char *opts_str)
+ {
+ Error *local_err = NULL;
+ QemuOpts *opts;
+ PCIBus *root = pci_find_primary_bus();
+ PCIBus *bus;
+ int ret, devfn;
+
+ if (!root) {
+ monitor_printf(mon, "no primary PCI bus (if there are multiple"
+ " PCI roots, you must use device_add instead)");
+ return NULL;
+ }
+
+ bus = pci_get_bus_devfn(&devfn, root, devaddr);
+ if (!bus) {
+ monitor_printf(mon, "Invalid PCI device address %s\n", devaddr);
+ return NULL;
+ }
+ if (!((BusState*)bus)->allow_hotplug) {
+ monitor_printf(mon, "PCI bus doesn't support hotplug\n");
+ return NULL;
+ }
+
+ opts = qemu_opts_parse(qemu_find_opts("net"), opts_str ? opts_str : "", 0);
+ if (!opts) {
+ return NULL;
+ }
+
+ qemu_opt_set(opts, "type", "nic");
+
+ ret = net_client_init(opts, 0, &local_err);
+ if (error_is_set(&local_err)) {
+ qerror_report_err(local_err);
+ error_free(local_err);
+ return NULL;
+ }
+ if (nd_table[ret].devaddr) {
+ monitor_printf(mon, "Parameter addr not supported\n");
+ return NULL;
+ }
+ return pci_nic_init(&nd_table[ret], root, "rtl8139", devaddr);
+ }
+
+ static int scsi_hot_add(Monitor *mon, DeviceState *adapter,
+ DriveInfo *dinfo, int printinfo)
+ {
+ SCSIBus *scsibus;
+ SCSIDevice *scsidev;
+
+ scsibus = (SCSIBus *)
+ object_dynamic_cast(OBJECT(QLIST_FIRST(&adapter->child_bus)),
+ TYPE_SCSI_BUS);
+ if (!scsibus) {
+ error_report("Device is not a SCSI adapter");
+ return -1;
+ }
+
+ /*
+ * drive_init() tries to find a default for dinfo->unit. Doesn't
+ * work at all for hotplug though as we assign the device to a
+ * specific bus instead of the first bus with spare scsi ids.
+ *
+ * Ditch the calculated value and reload from option string (if
+ * specified).
+ */
+ dinfo->unit = qemu_opt_get_number(dinfo->opts, "unit", -1);
+ dinfo->bus = scsibus->busnr;
+ scsidev = scsi_bus_legacy_add_drive(scsibus, dinfo->bdrv, dinfo->unit,
+ false, -1, NULL, NULL);
+ if (!scsidev) {
+ return -1;
+ }
+ dinfo->unit = scsidev->id;
+
+ if (printinfo)
+ monitor_printf(mon, "OK bus %d, unit %d\n",
+ scsibus->busnr, scsidev->id);
+ return 0;
+ }
+
+ int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo)
+ {
+ int pci_bus;
+ unsigned slot;
+ PCIBus *root = pci_find_primary_bus();
+ PCIDevice *dev;
+ const char *pci_addr = qdict_get_str(qdict, "pci_addr");
+
+ switch (dinfo->type) {
+ case IF_SCSI:
+ if (!root) {
+ monitor_printf(mon, "no primary PCI bus (if there are multiple"
+ " PCI roots, you must use device_add instead)");
+ goto err;
+ }
+ if (pci_read_devaddr(mon, pci_addr, &pci_bus, &slot)) {
+ goto err;
+ }
+ dev = pci_find_device(root, pci_bus, PCI_DEVFN(slot, 0));
+ if (!dev) {
+ monitor_printf(mon, "no pci device with address %s\n", pci_addr);
+ goto err;
+ }
+ if (scsi_hot_add(mon, &dev->qdev, dinfo, 1) != 0) {
+ goto err;
+ }
+ break;
+ default:
+ monitor_printf(mon, "Can't hot-add drive to type %d\n", dinfo->type);
+ goto err;
+ }
+
+ return 0;
+ err:
+ return -1;
+ }
+
+ static PCIDevice *qemu_pci_hot_add_storage(Monitor *mon,
+ const char *devaddr,
+ const char *opts)
+ {
+ PCIDevice *dev;
+ DriveInfo *dinfo = NULL;
+ int type = -1;
+ char buf[128];
+ PCIBus *root = pci_find_primary_bus();
+ PCIBus *bus;
+ int devfn;
+
+ if (get_param_value(buf, sizeof(buf), "if", opts)) {
+ if (!strcmp(buf, "scsi"))
+ type = IF_SCSI;
+ else if (!strcmp(buf, "virtio")) {
+ type = IF_VIRTIO;
+ } else {
+ monitor_printf(mon, "type %s not a hotpluggable PCI device.\n", buf);
+ return NULL;
+ }
+ } else {
+ monitor_printf(mon, "no if= specified\n");
+ return NULL;
+ }
+
+ if (get_param_value(buf, sizeof(buf), "file", opts)) {
+ dinfo = add_init_drive(opts);
+ if (!dinfo)
+ return NULL;
+ if (dinfo->devaddr) {
+ monitor_printf(mon, "Parameter addr not supported\n");
+ return NULL;
+ }
+ } else {
+ dinfo = NULL;
+ }
+
+ if (!root) {
+ monitor_printf(mon, "no primary PCI bus (if there are multiple"
+ " PCI roots, you must use device_add instead)");
+ return NULL;
+ }
+ bus = pci_get_bus_devfn(&devfn, root, devaddr);
+ if (!bus) {
+ monitor_printf(mon, "Invalid PCI device address %s\n", devaddr);
+ return NULL;
+ }
+ if (!((BusState*)bus)->allow_hotplug) {
+ monitor_printf(mon, "PCI bus doesn't support hotplug\n");
+ return NULL;
+ }
+
+ switch (type) {
+ case IF_SCSI:
+ dev = pci_create(bus, devfn, "lsi53c895a");
+ if (qdev_init(&dev->qdev) < 0)
+ dev = NULL;
+ if (dev && dinfo) {
+ if (scsi_hot_add(mon, &dev->qdev, dinfo, 0) != 0) {
+ qdev_unplug(&dev->qdev, NULL);
+ dev = NULL;
+ }
+ }
+ break;
+ case IF_VIRTIO:
+ if (!dinfo) {
+ monitor_printf(mon, "virtio requires a backing file/device.\n");
+ return NULL;
+ }
+ dev = pci_create(bus, devfn, "virtio-blk-pci");
+ if (qdev_prop_set_drive(&dev->qdev, "drive", dinfo->bdrv) < 0) {
+ qdev_free(&dev->qdev);
+ dev = NULL;
+ break;
+ }
+ if (qdev_init(&dev->qdev) < 0)
+ dev = NULL;
+ break;
+ default:
+ dev = NULL;
+ }
+ return dev;
+ }
+
++#ifdef CONFIG_MARU
++static PCIDevice *qemu_pci_hot_add_keyboard(Monitor *mon,
++ const char *devaddr,
++ const char *opts)
++{
++ PCIDevice *dev;
++ PCIBus *bus;
++ int devfn;
++
++ bus = pci_get_bus_devfn(&devfn, devaddr);
++ if (!bus) {
++ monitor_printf(mon, "Invalid PCI device address %s\n", devaddr);
++ return NULL;
++ }
++
++ if (!((BusState*)bus)->allow_hotplug) {
++ monitor_printf(mon, "PCI bus doesn't support hotplug\n");
++ return NULL;
++ }
++
++ dev = pci_create(bus, devfn, "virtio-keyboard-pci");
++ if (qdev_init(&dev->qdev) < 0) {
++ dev = NULL;
++ }
++
++ return dev;
++}
++#endif /* CONFIG_MARU */
++
++#ifdef CONFIG_MARU
++void pci_device_hot_add(Monitor *mon, const QDict *qdict)
++{
++ do_pci_device_hot_add(mon, qdict);
++}
++
++PCIDevice *do_pci_device_hot_add(Monitor *mon, const QDict *qdict)
++#else
+ void pci_device_hot_add(Monitor *mon, const QDict *qdict)
++#endif
+ {
+ PCIDevice *dev = NULL;
+ const char *pci_addr = qdict_get_str(qdict, "pci_addr");
+ const char *type = qdict_get_str(qdict, "type");
+ const char *opts = qdict_get_try_str(qdict, "opts");
+
+ /* strip legacy tag */
+ if (!strncmp(pci_addr, "pci_addr=", 9)) {
+ pci_addr += 9;
+ }
+
+ if (!opts) {
+ opts = "";
+ }
+
+ if (!strcmp(pci_addr, "auto"))
+ pci_addr = NULL;
+
+ if (strcmp(type, "nic") == 0) {
+ dev = qemu_pci_hot_add_nic(mon, pci_addr, opts);
+ } else if (strcmp(type, "storage") == 0) {
+ dev = qemu_pci_hot_add_storage(mon, pci_addr, opts);
++#ifdef CONFIG_MARU
++ } else if (strcmp(type, "keyboard") == 0) {
++ dev = qemu_pci_hot_add_keyboard(mon, pci_addr, opts);
++#endif
+ } else {
+ monitor_printf(mon, "invalid type: %s\n", type);
+ }
+
+ if (dev) {
+ monitor_printf(mon, "OK root bus %s, bus %d, slot %d, function %d\n",
+ pci_root_bus_path(dev),
+ pci_bus_num(dev->bus), PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ } else
+ monitor_printf(mon, "failed to add %s\n", opts);
++#ifdef CONFIG_MARU
++ return dev;
++#endif
+ }
+
+ static int pci_device_hot_remove(Monitor *mon, const char *pci_addr)
+ {
+ PCIBus *root = pci_find_primary_bus();
+ PCIDevice *d;
+ int bus;
+ unsigned slot;
+ Error *local_err = NULL;
+
+ if (!root) {
+ monitor_printf(mon, "no primary PCI bus (if there are multiple"
+ " PCI roots, you must use device_del instead)");
+ return -1;
+ }
+
+ if (pci_read_devaddr(mon, pci_addr, &bus, &slot)) {
+ return -1;
+ }
+
+ d = pci_find_device(root, bus, PCI_DEVFN(slot, 0));
+ if (!d) {
+ monitor_printf(mon, "slot %d empty\n", slot);
+ return -1;
+ }
+
+ qdev_unplug(&d->qdev, &local_err);
+ if (error_is_set(&local_err)) {
+ monitor_printf(mon, "%s\n", error_get_pretty(local_err));
+ error_free(local_err);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ void do_pci_device_hot_remove(Monitor *mon, const QDict *qdict)
+ {
+ pci_device_hot_remove(mon, qdict_get_str(qdict, "pci_addr"));
+ }
uc->handle_reset = usb_msd_handle_reset;
uc->handle_control = usb_msd_handle_control;
uc->handle_data = usb_msd_handle_data;
+#ifdef CONFIG_MARU
+ uc->handle_destroy = usb_msd_handle_destroy;
+#endif
+ set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
dc->fw_name = "storage";
dc->vmsd = &vmstate_usb_msd;
}
sigjmp_buf jmp_env; \
int exception_index; \
\
+ /* for hax */ \
+ int hax_vcpu_dirty; \
+ struct hax_vcpu_state *hax_vcpu; \
+ \
+ CPUArchState *next_cpu; /* next CPU sharing TB cache */ \
++
/* user data */ \
void *opaque; \
\
uint32_t kvm_arch_get_supported_cpuid(KVMState *env, uint32_t function,
uint32_t index, int reg);
- void kvm_cpu_synchronize_state(CPUArchState *env);
++
+ void kvm_cpu_synchronize_state(CPUState *cpu);
+
+#ifdef CONFIG_HAX
- void hax_cpu_synchronize_state(CPUArchState *env);
++void hax_cpu_synchronize_state(CPUState *cpu);
+#endif
+
/* generic hooks - to be moved/refactored once there are more users */
- static inline void cpu_synchronize_state(CPUArchState *env)
+ static inline void cpu_synchronize_state(CPUState *cpu)
{
if (kvm_enabled()) {
- kvm_cpu_synchronize_state(env);
+ kvm_cpu_synchronize_state(cpu);
}
- hax_cpu_synchronize_state(env);
+#ifdef CONFIG_HAX
++ hax_cpu_synchronize_state(cpu);
+#endif
}
#if !defined(CONFIG_USER_ONLY)
extern int vga_interface_type;
#define xenfb_enabled (vga_interface_type == VGA_XENFB)
- #define qxl_enabled (vga_interface_type == VGA_QXL)
+#ifdef CONFIG_MARU
+#define maru_vga_enabled (vga_interface_type == VGA_MARU)
+#endif
extern int graphic_width;
extern int graphic_height;
static CharDriverState *qmp_chardev_open_file(ChardevFile *file, Error **errp)
{
HANDLE out;
+#ifdef CONFIG_MARU
+ int open_flags, ret;
+#endif
- if (file->in) {
+ if (file->has_in) {
error_setg(errp, "input file not supported");
return NULL;
}
void object_unref(Object *obj)
{
+// WA for avoid QOM bug related with qbus_create_inplace()... see hw/qdev.c
+ if(obj->ref == 0) { // Object already finalized...
+ return;
+ }
+//
g_assert(obj->ref > 0);
- obj->ref--;
/* parent always holds a reference to its children */
- if (obj->ref == 0) {
+ if (atomic_fetch_dec(&obj->ref) == 1) {
object_finalize(obj);
}
}
--- /dev/null
- void hax_cpu_synchronize_state(CPUArchState *env)
+/*
+ * QEMU KVM support
+ *
+ * Copyright IBM, Corp. 2008
+ * Red Hat, Inc. 2008
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ * Glauber Costa <gcosta@redhat.com>
+ *
+ * Copyright (c) 2011 Intel Corporation
+ * Written by:
+ * Jiang Yunhong<yunhong.jiang@intel.com>
+ * Xin Xiaohui<xiaohui.xin@intel.com>
+ * Zhang Xiantao<xiantao.zhang@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * HAX common code for both windows and darwin
+ * some code from KVM side
+ */
+
+#include "hax-i386.h"
+
+#define HAX_EMUL_ONE 0x1
+#define HAX_EMUL_REAL 0x2
+#define HAX_EMUL_HLT 0x4
+#define HAX_EMUL_EXITLOOP 0x5
+
+#define HAX_EMULATE_STATE_MMIO 0x1
+#define HAX_EMULATE_STATE_REAL 0x2
+#define HAX_EMULATE_STATE_NONE 0x3
+#define HAX_EMULATE_STATE_INITIAL 0x4
+
+struct hax_state hax_global;
+int ret_hax_init = 0;
+static int hax_disabled = 1;
+
+int hax_support = -1;
+
+/* Called after hax_init */
+int hax_enabled(void)
+{
+ return (!hax_disabled && hax_support);
+}
+
+void hax_disable(int disable)
+{
+ hax_disabled = disable;
+}
+
+/* Currently non-PG modes are emulated by QEMU */
+int hax_vcpu_emulation_mode(CPUArchState *env)
+{
+ return !(env->cr[0] & CR0_PG_MASK);
+}
+
+static int hax_prepare_emulation(CPUArchState *env)
+{
+ /* Flush all emulation states */
+ tlb_flush(env, 1);
+ tb_flush(env);
+ /* Sync the vcpu state from hax kernel module */
+ hax_vcpu_sync_state(env, 0);
+ return 0;
+}
+
+/*
+ * Check whether to break the translation block loop
+ * break tbloop after one MMIO emulation, or after finish emulation mode
+ */
+static int hax_stop_tbloop(CPUArchState *env)
+{
+ switch (env->hax_vcpu->emulation_state)
+ {
+ case HAX_EMULATE_STATE_MMIO:
+ if (env->hax_vcpu->resync) {
+ hax_prepare_emulation(env);
+ env->hax_vcpu->resync = 0;
+ return 0;
+ }
+ return 1;
+ break;
+ case HAX_EMULATE_STATE_INITIAL:
+ case HAX_EMULATE_STATE_REAL:
+ if (!hax_vcpu_emulation_mode(env))
+ return 1;
+ break;
+ default:
+ dprint("Invalid emulation state in hax_sto_tbloop state %x\n",
+ env->hax_vcpu->emulation_state);
+ break;
+ }
+
+ return 0;
+}
+
+int hax_stop_emulation(CPUArchState *env)
+{
+ if (hax_stop_tbloop(env))
+ {
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_NONE;
+ /*
+ * QEMU emulation changes vcpu state,
+ * Sync the vcpu state to HAX kernel module
+ */
+ hax_vcpu_sync_state(env, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+int hax_stop_translate(CPUArchState *env)
+{
+ struct hax_vcpu_state *vstate;
+
+ vstate = env->hax_vcpu;
+ assert(vstate->emulation_state);
+ if (vstate->emulation_state == HAX_EMULATE_STATE_MMIO )
+ return 1;
+
+ return 0;
+}
+
+int valid_hax_tunnel_size(uint16_t size)
+{
+ return size >= sizeof(struct hax_tunnel);
+}
+
+hax_fd hax_vcpu_get_fd(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ if (!vcpu)
+ return HAX_INVALID_FD;
+ return vcpu->fd;
+}
+
+/* Current version */
+uint32_t hax_cur_version = 0x1;
+/* Least HAX kernel version */
+uint32_t hax_lest_version = 0x1;
+
+static int hax_get_capability(struct hax_state *hax)
+{
+ int ret;
+ struct hax_capabilityinfo capinfo, *cap = &capinfo;
+
+ ret = hax_capability(hax, cap);
+ if (ret)
+ return ret;
+
+ if ( ((cap->wstatus & HAX_CAP_WORKSTATUS_MASK) ==
+ HAX_CAP_STATUS_NOTWORKING ))
+ {
+ if (cap->winfo & HAX_CAP_FAILREASON_VT)
+ dprint("VTX feature is not enabled. which will cause HAX driver not working.\n");
+ else if (cap->winfo & HAX_CAP_FAILREASON_NX)
+ dprint("NX feature is not enabled, which will cause HAX driver not working.\n");
+ return -ENXIO;
+ }
+
+ if (cap->wstatus & HAX_CAP_MEMQUOTA)
+ {
+ if (cap->mem_quota < hax->mem_quota)
+ {
+ dprint("The memory needed by this VM exceeds the driver limit.\n");
+ return -ENOSPC;
+ }
+ }
+ return 0;
+}
+
+static int hax_version_support(struct hax_state *hax)
+{
+ int ret;
+ struct hax_module_version version;
+
+ ret = hax_mod_version(hax, &version);
+ if (ret < 0)
+ return 0;
+
+ if ( (hax_lest_version > version.cur_version) ||
+ (hax_cur_version < version.compat_version) )
+ return 0;
+
+ return 1;
+}
+
+int hax_vcpu_create(int id)
+{
+ struct hax_vcpu_state *vcpu = NULL;
+ int ret;
+
+ if (!hax_global.vm)
+ {
+ dprint("vcpu %x created failed, vm is null\n", id);
+ return -1;
+ }
+
+ if (hax_global.vm->vcpus[id])
+ {
+ dprint("vcpu %x allocated already\n", id);
+ return 0;
+ }
+
+ vcpu = g_malloc(sizeof(struct hax_vcpu_state));
+ if (!vcpu)
+ {
+ dprint("Failed to alloc vcpu state\n");
+ return -ENOMEM;
+ }
+
+ memset(vcpu, 0, sizeof(struct hax_vcpu_state));
+
+ ret = hax_host_create_vcpu(hax_global.vm->fd, id);
+ if (ret)
+ {
+ dprint("Failed to create vcpu %x\n", id);
+ goto error;
+ }
+
+ vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
+ if (hax_invalid_fd(vcpu->fd))
+ {
+ dprint("Failed to open the vcpu\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hax_global.vm->vcpus[id] = vcpu;
+
+ ret = hax_host_setup_vcpu_channel(vcpu);
+ if (ret)
+ {
+ dprint("Invalid hax tunnel size \n");
+ ret = -EINVAL;
+ goto error;
+ }
+ return 0;
+
+error:
+ /* vcpu and tunnel will be closed automatically */
+ if (vcpu && !hax_invalid_fd(vcpu->fd))
+ hax_close_fd(vcpu->fd);
+
+ hax_global.vm->vcpus[id] = NULL;
+ g_free(vcpu);
+ return -1;
+}
+
+int hax_vcpu_destroy(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+
+ if (!hax_global.vm)
+ {
+ dprint("vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
+ return -1;
+ }
+
+ if (!vcpu)
+ return 0;
+
+ /*
+ * 1. The hax_tunnel is also destroied when vcpu destroy
+ * 2. close fd will cause hax module vcpu be cleaned
+ */
+ hax_close_fd(vcpu->fd);
+ hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
+ g_free(vcpu);
+ return 0;
+}
+
+int hax_init_vcpu(CPUArchState *env)
+{
+ int ret;
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ ret = hax_vcpu_create(cpu->cpu_index);
+ if (ret < 0)
+ {
+ dprint("Failed to create HAX vcpu\n");
+ exit(-1);
+ }
+
+ env->hax_vcpu = hax_global.vm->vcpus[cpu->cpu_index];
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL;
+ env->hax_vcpu_dirty = 1;
+ qemu_register_reset(hax_reset_vcpu_state, env);
+
+ return ret;
+}
+
+struct hax_vm *hax_vm_create(struct hax_state *hax)
+{
+ struct hax_vm *vm;
+ int vm_id = 0, ret;
+ char *vm_name = NULL;
+
+ if (hax_invalid_fd(hax->fd))
+ return NULL;
+
+ if (hax->vm)
+ return hax->vm;
+
+ vm = g_malloc(sizeof(struct hax_vm));
+ if (!vm)
+ return NULL;
+ memset(vm, 0, sizeof(struct hax_vm));
+ ret = hax_host_create_vm(hax, &vm_id);
+ if (ret) {
+ dprint("Failed to create vm %x\n", ret);
+ goto error;
+ }
+ vm->id = vm_id;
+ vm->fd = hax_host_open_vm(hax, vm_id);
+ if (hax_invalid_fd(vm->fd))
+ {
+ dprint("Open the vm devcie error:%s\n", vm_name);
+ goto error;
+ }
+
+ hax->vm = vm;
+ dprint("End of VM create, id %d\n", vm->id);
+ return vm;
+
+error:
+ g_free(vm);
+ hax->vm = NULL;
+ return NULL;
+}
+
+int hax_vm_destroy(struct hax_vm *vm)
+{
+ int i;
+
+ for (i = 0; i < HAX_MAX_VCPU; i++)
+ if (vm->vcpus[i])
+ {
+ dprint("VCPU should be cleaned before vm clean\n");
+ return -1;
+ }
+ hax_close_fd(vm->fd);
+ g_free(vm);
+ hax_global.vm = NULL;
+ return 0;
+}
+
+static void
+hax_region_add(MemoryListener *listener, MemoryRegionSection *section)
+{
+ hax_set_phys_mem(section);
+}
+
+static void
+hax_region_del(MemoryListener *listener, MemoryRegionSection *section)
+{
+ hax_set_phys_mem(section);
+}
+
+
+/* currently we fake the dirty bitmap sync, always dirty */
+static void hax_log_sync(MemoryListener *listener, MemoryRegionSection *section)
+{
+ MemoryRegion *mr = section->mr;
+ unsigned long c;
+ unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
+ HOST_LONG_BITS;
+ unsigned long bitmap[len];
+ int i, j;
+
+ for (i = 0; i < len; i++) {
+ bitmap[i] = 1;
+ c = leul_to_cpu(bitmap[i]);
+ do {
+ j = ffsl(c) - 1;
+ c &= ~(1ul << j);
+ memory_region_set_dirty(mr, (i * HOST_LONG_BITS + j) *
+ TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
+ } while (c != 0);
+ }
+}
+
+static void hax_log_global_start(struct MemoryListener *listener)
+{
+}
+
+static void hax_log_global_stop(struct MemoryListener *listener)
+{
+}
+
+static void hax_log_start(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
+
+static void hax_log_stop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
+
+static void hax_begin(MemoryListener *listener)
+{
+}
+
+static void hax_commit(MemoryListener *listener)
+{
+}
+
+static void hax_region_nop(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+}
+
+static MemoryListener hax_memory_listener = {
+ .begin = hax_begin,
+ .commit = hax_commit,
+ .region_add = hax_region_add,
+ .region_del = hax_region_del,
+ .region_nop = hax_region_nop,
+ .log_start = hax_log_start,
+ .log_stop = hax_log_stop,
+ .log_sync = hax_log_sync,
+ .log_global_start = hax_log_global_start,
+ .log_global_stop = hax_log_global_stop,
+};
+
+static void hax_handle_interrupt(CPUArchState *env, int mask)
+{
+ CPUState *cpu = ENV_GET_CPU(env);
+ cpu->interrupt_request |= mask;
+
+ if (!qemu_cpu_is_self(env)) {
+ qemu_cpu_kick(env);
+ }
+}
+
+int hax_pre_init(uint64_t ram_size)
+{
+ struct hax_state *hax = NULL;
+
+ dprint("hax_disabled %d\n", hax_disabled);
+ if (hax_disabled)
+ return 0;
+ hax = &hax_global;
+ memset(hax, 0, sizeof(struct hax_state));
+ hax->mem_quota = ram_size;
+ dprint("ram_size %lx\n", ram_size);
+ return 0;
+}
+
+static int hax_init(void)
+{
+ struct hax_state *hax = NULL;
+ int ret;
+
+ hax_support = 0;
+
+ hax = &hax_global;
+
+
+ hax->fd = hax_mod_open();
+ if (hax_invalid_fd(hax->fd))
+ {
+ hax->fd = 0;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ ret = hax_get_capability(hax);
+
+ if (ret)
+ {
+ if (ret != -ENOSPC)
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (!hax_version_support(hax))
+ {
+ dprint("Incompat Hax version. Qemu current version %x ", hax_cur_version );
+ dprint("requires least HAX version %x\n", hax_lest_version);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax->vm = hax_vm_create(hax);
+ if (!hax->vm)
+ {
+ dprint("Failed to create HAX VM\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ memory_listener_register(&hax_memory_listener, NULL);
+
+ hax_support = 1;
+
+ return ret;
+error:
+ if (hax->vm)
+ hax_vm_destroy(hax->vm);
+ if (hax->fd)
+ hax_mod_close(hax);
+
+ return ret;
+}
+
+int hax_accel_init(void)
+{
+ if (hax_disabled) {
+ dprint("HAX is disabled and emulator runs in emulation mode.\n");
+ return 0;
+ }
+
+ ret_hax_init = hax_init();
+ if (ret_hax_init && (ret_hax_init != -ENOSPC)) {
+ dprint("No accelerator found.\n");
+ return ret_hax_init;
+ } else {
+ dprint("HAX is %s and emulator runs in %s mode.\n",
+ !ret_hax_init ? "working" : "not working",
+ !ret_hax_init ? "fast virt" : "emulation");
+ return 0;
+ }
+}
+
+int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, int direction,
+ int size, int count, void *buffer)
+{
+ uint8_t *ptr;
+ int i;
+
+ if (!df)
+ ptr = (uint8_t *)buffer;
+ else
+ ptr = buffer + size * count - size;
+ for (i = 0; i < count; i++)
+ {
+ if (direction == HAX_EXIT_IO_IN) {
+ switch (size) {
+ case 1:
+ stb_p(ptr, cpu_inb(port));
+ break;
+ case 2:
+ stw_p(ptr, cpu_inw(port));
+ break;
+ case 4:
+ stl_p(ptr, cpu_inl(port));
+ break;
+ }
+ } else {
+ switch (size) {
+ case 1:
+ cpu_outb(port, ldub_p(ptr));
+ break;
+ case 2:
+ cpu_outw(port, lduw_p(ptr));
+ break;
+ case 4:
+ cpu_outl(port, ldl_p(ptr));
+ break;
+ }
+ }
+ if (!df)
+ ptr += size;
+ else
+ ptr -= size;
+ }
+
+ return 0;
+}
+
+static int hax_vcpu_interrupt(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ /*
+ * Try to inject an interrupt if the guest can accept it
+ * Unlike KVM, HAX kernel check for the eflags, instead of qemu
+ */
+ if (ht->ready_for_interrupt_injection &&
+ (cpu->interrupt_request & CPU_INTERRUPT_HARD))
+ {
+ int irq;
+
+ cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ hax_inject_interrupt(env, irq);
+ }
+ }
+
+ /* If we have an interrupt but the guest is not ready to receive an
+ * interrupt, request an interrupt window exit. This will
+ * cause a return to userspace as soon as the guest is ready to
+ * receive interrupts. */
+ if ((cpu->interrupt_request & CPU_INTERRUPT_HARD))
+ ht->request_interrupt_window = 1;
+ else
+ ht->request_interrupt_window = 0;
+ return 0;
+}
+
+void hax_raise_event(CPUArchState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+
+ if (!vcpu)
+ return;
+ vcpu->tunnel->user_event_pending = 1;
+}
+
+/*
+ * Ask hax kernel module to run the CPU for us till:
+ * 1. Guest crash or shutdown
+ * 2. Need QEMU's emulation like guest execute MMIO instruction or guest
+ * enter emulation mode (non-PG mode)
+ * 3. Guest execute HLT
+ * 4. Qemu have Signal/event pending
+ * 5. An unknown VMX exit happens
+ */
+extern void qemu_system_reset_request(void);
+static int hax_vcpu_hax_exec(CPUArchState *env)
+{
+ int ret = 0;
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ if (hax_vcpu_emulation_mode(env))
+ {
+ dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
+ return HAX_EMUL_EXITLOOP;
+ }
+
+
+ //hax_cpu_synchronize_state(env);
+
+ do {
+ int hax_ret;
+
+
+ if (cpu->exit_request) {
+ ret = HAX_EMUL_EXITLOOP ;
+ break;
+ }
+
+#if 0
+ if (env->hax_vcpu_dirty) {
+ hax_vcpu_sync_state(env, 1);
+ env->hax_vcpu_dirty = 0;
+ }
+#endif
+
+ hax_vcpu_interrupt(env);
+
+ hax_ret = hax_vcpu_run(vcpu);
+
+ /* Simply continue the vcpu_run if system call interrupted */
+ if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
+ dprint("io window interrupted\n");
+ continue;
+ }
+
+ if (hax_ret < 0)
+ {
+ dprint("vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
+ abort();
+ }
+ switch (ht->_exit_status)
+ {
+ case HAX_EXIT_IO:
+ {
+ ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
+ ht->pio._direction,
+ ht->pio._size, ht->pio._count, vcpu->iobuf);
+ }
+ break;
+ case HAX_EXIT_MMIO:
+ ret = HAX_EMUL_ONE;
+ break;
+ case HAX_EXIT_REAL:
+ ret = HAX_EMUL_REAL;
+ break;
+ /* Guest state changed, currently only for shutdown */
+ case HAX_EXIT_STATECHANGE:
+ dprint("VCPU shutdown request\n");
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ case HAX_EXIT_UNKNOWN_VMEXIT:
+ dprint("Unknown VMX exit %x from guest\n", ht->_exit_reason);
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ case HAX_EXIT_HLT:
+ if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ /* hlt instruction with interrupt disabled is shutdown */
+ env->eflags |= IF_MASK;
+ cpu->halted = 1;
+ env->exception_index = EXCP_HLT;
+ ret = HAX_EMUL_HLT;
+ }
+ break;
+ /* these situation will continue to hax module */
+ case HAX_EXIT_INTERRUPT:
+ case HAX_EXIT_PAUSED:
+ break;
+ default:
+ dprint("Unknow exit %x from hax\n", ht->_exit_status);
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ }
+ }while (!ret);
+
+ if (cpu->exit_request) {
+ cpu->exit_request = 0;
+ env->exception_index = EXCP_INTERRUPT;
+ }
+ return ret;
+}
+
+static void do_hax_cpu_synchronize_state(void *_env)
+{
+ CPUArchState *env = _env;
+ if (!env->hax_vcpu_dirty) {
+ hax_vcpu_sync_state(env, 0);
+ env->hax_vcpu_dirty = 1;
+ }
+}
+
- if (!env->hax_vcpu_dirty) {
- run_on_cpu(env, do_hax_cpu_synchronize_state, env);
++void hax_cpu_synchronize_state(CPUState *cpu)
+{
++ if (!cpu->env.hax_vcpu_dirty) {
++ run_on_cpu(cpu, do_hax_cpu_synchronize_state, cpu);
+ }
+}
+
+void hax_cpu_synchronize_post_reset(CPUArchState *env)
+{
+ hax_vcpu_sync_state(env, 1);
+ env->hax_vcpu_dirty = 0;
+}
+
+void hax_cpu_synchronize_post_init(CPUArchState *env)
+{
+ hax_vcpu_sync_state(env, 1);
+ env->hax_vcpu_dirty = 0;
+}
+
+/*
+ * return 1 when need emulate, 0 when need exit loop
+ */
+int hax_vcpu_exec(CPUArchState *env)
+{
+ int next = 0, ret = 0;
+ struct hax_vcpu_state *vcpu;
+
+ if (env->hax_vcpu->emulation_state != HAX_EMULATE_STATE_NONE)
+ return 1;
+
+ vcpu = env->hax_vcpu;
+ next = hax_vcpu_hax_exec(env);
+ switch (next)
+ {
+ case HAX_EMUL_ONE:
+ ret = 1;
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_MMIO;
+ hax_prepare_emulation(env);
+ break;
+ case HAX_EMUL_REAL:
+ ret = 1;
+ env->hax_vcpu->emulation_state =
+ HAX_EMULATE_STATE_REAL;
+ hax_prepare_emulation(env);
+ break;
+ case HAX_EMUL_HLT:
+ case HAX_EMUL_EXITLOOP:
+ break;
+ default:
+ dprint("Unknown hax vcpu exec return %x\n", next);
+ abort();
+ }
+
+ return ret;
+}
+
+#define HAX_RAM_INFO_ROM 0x1
+
+static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ memset(lhs, 0, sizeof(struct segment_desc_t ));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->operand_size = 0;
+ lhs->desc = 1;
+ lhs->long_mode = 0;
+ lhs->granularity = 0;
+ lhs->available = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->flags =
+ (rhs->type << DESC_TYPE_SHIFT)
+ | (rhs->present * DESC_P_MASK)
+ | (rhs->dpl << DESC_DPL_SHIFT)
+ | (rhs->operand_size << DESC_B_SHIFT)
+ | (rhs->desc * DESC_S_MASK)
+ | (rhs->long_mode << DESC_L_SHIFT)
+ | (rhs->granularity * DESC_G_MASK)
+ | (rhs->available * DESC_AVL_MASK);
+}
+
+static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = rhs->selector & 3;
+ lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
+ lhs->desc = (flags & DESC_S_MASK) != 0;
+ lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
+ lhs->granularity = (flags & DESC_G_MASK) != 0;
+ lhs->available = (flags & DESC_AVL_MASK) != 0;
+}
+
+static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
+{
+ target_ulong reg = *hax_reg;
+
+ if (set)
+ *hax_reg = *qemu_reg;
+ else
+ *qemu_reg = reg;
+}
+
+/* The sregs has been synced with HAX kernel already before this call */
+static int hax_get_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ get_seg(&env->segs[R_CS], &sregs->_cs);
+ get_seg(&env->segs[R_DS], &sregs->_ds);
+ get_seg(&env->segs[R_ES], &sregs->_es);
+ get_seg(&env->segs[R_FS], &sregs->_fs);
+ get_seg(&env->segs[R_GS], &sregs->_gs);
+ get_seg(&env->segs[R_SS], &sregs->_ss);
+
+ get_seg(&env->tr, &sregs->_tr);
+ get_seg(&env->ldt, &sregs->_ldt);
+ env->idt.limit = sregs->_idt.limit;
+ env->idt.base = sregs->_idt.base;
+ env->gdt.limit = sregs->_gdt.limit;
+ env->gdt.base = sregs->_gdt.base;
+ return 0;
+}
+
+static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs)
+{
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_seg(&sregs->_es, &env->segs[R_ES]);
+ set_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_seg(&sregs->_ss, &env->segs[R_SS]);
+
+ if (env->cr[0] & CR0_PE_MASK) {
+ /* force ss cpl to cs cpl */
+ sregs->_ss.selector = (sregs->_ss.selector & ~3) |
+ (sregs->_cs.selector & 3);
+ sregs->_ss.dpl = sregs->_ss.selector & 3;
+ }
+ }
+
+ set_seg(&sregs->_tr, &env->tr);
+ set_seg(&sregs->_ldt, &env->ldt);
+ sregs->_idt.limit = env->idt.limit;
+ sregs->_idt.base = env->idt.base;
+ sregs->_gdt.limit = env->gdt.limit;
+ sregs->_gdt.base = env->gdt.base;
+ return 0;
+}
+
+/*
+ * After get the state from the kernel module, some
+ * qemu emulator state need be updated also
+ */
+static int hax_setup_qemu_emulator(CPUArchState *env)
+{
+
+#define HFLAG_COPY_MASK ~( \
+ HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
+
+ uint32_t hflags;
+
+ hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+ hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+ (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+ hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+ hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
+ (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->efer & MSR_EFER_LMA) {
+ hflags |= HF_LMA_MASK;
+ }
+
+ if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+ hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ } else {
+ hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_CS32_SHIFT);
+ hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) ||
+ (env->eflags & VM_MASK) ||
+ !(hflags & HF_CS32_MASK)) {
+ hflags |= HF_ADDSEG_MASK;
+ } else {
+ hflags |= ((env->segs[R_DS].base |
+ env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) <<
+ HF_ADDSEG_SHIFT;
+ }
+ }
+ env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ return 0;
+}
+
+static int hax_sync_vcpu_register(CPUArchState *env, int set)
+{
+ struct vcpu_state_t regs;
+ int ret;
+ memset(®s, 0, sizeof(struct vcpu_state_t));
+
+ if (!set)
+ {
+ ret = hax_sync_vcpu_state(env, ®s, 0);
+ if (ret < 0)
+ return -1;
+ }
+
+ /*generic register */
+ hax_getput_reg(®s._rax, &env->regs[R_EAX], set);
+ hax_getput_reg(®s._rbx, &env->regs[R_EBX], set);
+ hax_getput_reg(®s._rcx, &env->regs[R_ECX], set);
+ hax_getput_reg(®s._rdx, &env->regs[R_EDX], set);
+ hax_getput_reg(®s._rsi, &env->regs[R_ESI], set);
+ hax_getput_reg(®s._rdi, &env->regs[R_EDI], set);
+ hax_getput_reg(®s._rsp, &env->regs[R_ESP], set);
+ hax_getput_reg(®s._rbp, &env->regs[R_EBP], set);
+
+ hax_getput_reg(®s._rflags, &env->eflags, set);
+ hax_getput_reg(®s._rip, &env->eip, set);
+
+ if (set)
+ {
+
+ regs._cr0 = env->cr[0];
+ regs._cr2 = env->cr[2];
+ regs._cr3 = env->cr[3];
+ regs._cr4 = env->cr[4];
+ hax_set_segments(env, ®s);
+ }
+ else
+ {
+ env->cr[0] = regs._cr0;
+ env->cr[2] = regs._cr2;
+ env->cr[3] = regs._cr3;
+ env->cr[4] = regs._cr4;
+ hax_get_segments(env, ®s);
+ }
+
+ if (set)
+ {
+ ret = hax_sync_vcpu_state(env, ®s, 1);
+ if (ret < 0)
+ return -1;
+ }
+ if (!set)
+ hax_setup_qemu_emulator(env);
+ return 0;
+}
+
+static void hax_msr_entry_set(struct vmx_msr *item,
+ uint32_t index, uint64_t value)
+{
+ item->entry = index;
+ item->value = value;
+}
+
+static int hax_get_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs = md.entries;
+ int ret, i, n;
+
+ n = 0;
+ msrs[n++].entry = MSR_IA32_SYSENTER_CS;
+ msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
+ msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
+ msrs[n++].entry = MSR_IA32_TSC;
+ md.nr_msr = n;
+ ret = hax_sync_msr(env, &md, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < md.done; i++) {
+ switch (msrs[i].entry) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].value;
+ break;
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].value;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int hax_set_msrs(CPUArchState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs;
+ msrs = md.entries;
+ int n = 0;
+
+ memset(&md, 0, sizeof(struct hax_msr_data));
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+ md.nr_msr = n;
+ md.done = 0;
+
+ return hax_sync_msr(env, &md, 1);
+
+}
+
+static int hax_get_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i, ret;
+
+ ret = hax_sync_fpu(env, &fpu, 0);
+ if (ret < 0)
+ return ret;
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ for (i = 0; i < 8; ++i)
+ env->fptags[i] = !((fpu.ftw >> i) & 1);
+ memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
+
+ memcpy(env->xmm_regs, fpu.mmx_1, sizeof(fpu.mmx_1));
+ memcpy((XMMReg *)(env->xmm_regs) + 8, fpu.mmx_2, sizeof(fpu.mmx_2));
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int hax_set_fpu(CPUArchState *env)
+{
+ struct fx_layout fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof(fpu));
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+
+ for (i = 0; i < 8; ++i)
+ fpu.ftw |= (!env->fptags[i]) << i;
+
+ memcpy(fpu.st_mm, env->fpregs, sizeof (env->fpregs));
+ memcpy(fpu.mmx_1, env->xmm_regs, sizeof (fpu.mmx_1));
+ memcpy(fpu.mmx_2, (XMMReg *)(env->xmm_regs) + 8, sizeof (fpu.mmx_2));
+
+ fpu.mxcsr = env->mxcsr;
+
+ return hax_sync_fpu(env, &fpu, 1);
+}
+
+int hax_arch_get_registers(CPUArchState *env)
+{
+ int ret;
+
+ ret = hax_sync_vcpu_register(env, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = hax_get_fpu(env);
+ if (ret < 0)
+ return ret;
+
+ ret = hax_get_msrs(env);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hax_arch_set_registers(CPUArchState *env)
+{
+ int ret;
+ ret = hax_sync_vcpu_register(env, 1);
+
+ if (ret < 0)
+ {
+ dprint("Failed to sync vcpu reg\n");
+ return ret;
+ }
+ ret = hax_set_fpu(env);
+ if (ret < 0)
+ {
+ dprint("FPU failed\n");
+ return ret;
+ }
+ ret = hax_set_msrs(env);
+ if (ret < 0)
+ {
+ dprint("MSR failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void hax_vcpu_sync_state(CPUArchState *env, int modified)
+{
+ if (hax_enabled()) {
+ if (modified)
+ hax_arch_set_registers(env);
+ else
+ hax_arch_get_registers(env);
+ }
+}
+
+/*
+ * much simpler than kvm, at least in first stage because:
+ * We don't need consider the device pass-through, we don't need
+ * consider the framebuffer, and we may even remove the bios at all
+ */
+int hax_sync_vcpus(void)
+{
+ if (hax_enabled())
+ {
+ CPUArchState *env;
+
+ env = first_cpu;
+ if (!env)
+ return 0;
+
+ for (; env != NULL; env = env->next_cpu) {
+ int ret;
+
+ ret = hax_arch_set_registers(env);
+ if (ret < 0)
+ {
+ dprint("Failed to sync HAX vcpu context\n");
+ exit(1);
+ }
+ }
+ }
+
+ return 0;
+}
+void hax_reset_vcpu_state(void *opaque)
+{
+ CPUArchState *env = opaque;
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ {
+ dprint("*********ReSet hax_vcpu->emulation_state \n");
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL;
+ env->hax_vcpu->tunnel->user_event_pending = 0;
+ env->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
+
+ }
+
+}
+
+
/* Forward declarations for functions declared in tcg-target.c and used here. */
static void tcg_target_init(TCGContext *s);
static void tcg_target_qemu_prologue(TCGContext *s);
-static void patch_reloc(uint8_t *code_ptr, int type,
+static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value, tcg_target_long addend);
+ /* The CIE and FDE header definitions will be common to all hosts. */
+ typedef struct {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t id;
+ uint8_t version;
+ char augmentation[1];
+ uint8_t code_align;
+ uint8_t data_align;
+ uint8_t return_column;
+ } DebugFrameCIE;
+
+ typedef struct QEMU_PACKED {
+ uint32_t len __attribute__((aligned((sizeof(void *)))));
+ uint32_t cie_offset;
+ tcg_target_long func_start;
+ tcg_target_long func_len;
+ } DebugFrameFDEHeader;
+
static void tcg_register_jit_int(void *buf, size_t size,
void *debug_frame, size_t debug_frame_size)
__attribute__((unused));
#include "qapi/error.h"
#include "qmp-commands.h"
#include "qapi-types.h"
+ #include "ui/keymaps.h"
+
+//#include "tizen/src/debug_ch.h"
+
+//MULTI_DEBUG_CHANNEL(tizen, input);
+
struct QEMUPutMouseEntry {
QEMUPutMouseEvent *qemu_put_mouse_event;
void *qemu_put_mouse_event_opaque;
#include "qemu/main-loop.h"
#include "trace.h"
#include "qemu/sockets.h"
-
+ /* this must come after including "trace.h" */
+ #include <shlobj.h>
+#ifdef CONFIG_MARU
+typedef BOOL (WINAPI *LPFN_ISWOW64PROCESS) (HANDLE, PBOOL);
+LPFN_ISWOW64PROCESS fnIsWow64Process;
+
+int is_wow64(void)
+{
+ int result = 0;
+
+ /* IsWow64Process is not available on all supported versions of Windows.
+ Use GetModuleHandle to get a handle to the DLL that contains the function
+ and GetProcAddress to get a pointer to the function if available. */
+
+ fnIsWow64Process = (LPFN_ISWOW64PROCESS) GetProcAddress(
+ GetModuleHandle(TEXT("kernel32")),"IsWow64Process");
+
+ if (NULL != fnIsWow64Process) {
+ if (!fnIsWow64Process(GetCurrentProcess(),&result)) {
+ // handle error
+ fprintf(stderr, "Cannot find 'IsWow64Process'\n");
+ }
+ }
+ return result;
+}
+
+bool get_java_path(char** java_path)
+{
+ HKEY hKeyNew;
+ HKEY hKey;
+ //char strJavaRuntimePath[JAVA_MAX_COMMAND_LENGTH] = {0};
+ char strChoosenName[JAVA_MAX_COMMAND_LENGTH] = {0};
+ char strSubKeyName[JAVA_MAX_COMMAND_LENGTH] = {0};
+ char strJavaHome[JAVA_MAX_COMMAND_LENGTH] = {0};
+ int index;
+ DWORD dwSubKeyNameMax = JAVA_MAX_COMMAND_LENGTH;
+ DWORD dwBufLen = JAVA_MAX_COMMAND_LENGTH;
+
+ RegOpenKeyEx(HKEY_LOCAL_MACHINE,
+ "SOFTWARE\\JavaSoft\\Java Runtime Environment",
+ 0,
+ KEY_QUERY_VALUE | KEY_ENUMERATE_SUB_KEYS | MY_KEY_WOW64_64KEY,
+ &hKey);
+ RegEnumKeyEx(hKey, 0, (LPSTR)strSubKeyName, &dwSubKeyNameMax,
+ NULL, NULL, NULL, NULL);
+ strcpy(strChoosenName, strSubKeyName);
+
+ index = 1;
+ while (ERROR_SUCCESS ==
+ RegEnumKeyEx(hKey, index, (LPSTR)strSubKeyName, &dwSubKeyNameMax,
+ NULL, NULL, NULL, NULL)) {
+ if (strcmp(strChoosenName, strSubKeyName) < 0) {
+ strcpy(strChoosenName, strSubKeyName);
+ }
+ index++;
+ }
+
+ RegOpenKeyEx(hKey, strChoosenName, 0,
+ KEY_QUERY_VALUE | MY_KEY_WOW64_64KEY, &hKeyNew);
+ RegQueryValueEx(hKeyNew, "JavaHome", NULL,
+ NULL, (LPBYTE)strJavaHome, &dwBufLen);
+ RegCloseKey(hKey);
+ if (strJavaHome[0] != '\0') {
+ sprintf(*java_path, "\"%s\\bin\\java\"", strJavaHome);
+ //strcpy(*java_path, strJavaHome);
+ //strcat(*java_path, "\\bin\\java");
+ } else {
+ return false;
+ }
+
+ return true;
+}
+#endif
+
void *qemu_oom_check(void *ptr)
{
+#ifdef CONFIG_MARU
+ const char _msg[] = "Failed to allocate memory in qemu.";
+ char cmd[JAVA_MAX_COMMAND_LENGTH] = { 0, };
+ char *JAVA_EXEFILE_PATH = NULL;
+ int len, ret;
+#endif
+
if (ptr == NULL) {
fprintf(stderr, "Failed to allocate memory: %lu\n", GetLastError());
+#ifdef CONFIG_MARU
+ JAVA_EXEFILE_PATH = malloc(JAVA_MAX_COMMAND_LENGTH);
+ if (!JAVA_EXEFILE_PATH) {
+ // TODO: print error message.
+ return ptr;
+ }
+
+ memset(JAVA_EXEFILE_PATH, 0, JAVA_MAX_COMMAND_LENGTH);
+ if (is_wow64()) {
+ if (!get_java_path(&JAVA_EXEFILE_PATH)) {
+ strcpy(JAVA_EXEFILE_PATH, "java");
+ }
+ } else {
+ strcpy(JAVA_EXEFILE_PATH, "java");
+ }
+ len = strlen(JAVA_EXEFILE_PATH) + strlen(JAVA_EXEOPTION) +
+ strlen(JAR_SKINFILE) + strlen(JAVA_SIMPLEMODE_OPTION) +
+ strlen(_msg) + 7;
+ if (len > JAVA_MAX_COMMAND_LENGTH) {
+ len = JAVA_MAX_COMMAND_LENGTH;
+ }
+
+ snprintf(cmd, len, "%s %s %s %s=\"%s\"",
+ JAVA_EXEFILE_PATH, JAVA_EXEOPTION, JAR_SKINFILE,
+ JAVA_SIMPLEMODE_OPTION, _msg);
+ ret = WinExec(cmd, SW_SHOW);
+ if (ret < 32) {
+ // TODO: error handling...
+ }
+
+ /* for 64bit windows */
+ free(JAVA_EXEFILE_PATH);
+ JAVA_EXEFILE_PATH=0;
+#endif
abort();
}
return ptr;
qdev_machine_init();
+#ifdef CONFIG_MARU
+ // Returned variable points different address from input variable.
+ kernel_cmdline = prepare_maru_devices(kernel_cmdline);
+#endif
QEMUMachineInitArgs args = { .ram_size = ram_size,
- .boot_device = (boot_devices[0] == '\0') ?
- machine->boot_order :
- boot_devices,
+ .boot_device = boot_order,
.kernel_filename = kernel_filename,
.kernel_cmdline = kernel_cmdline,
.initrd_filename = initrd_filename,
ram_addr_t block_len;
block_len = ram_size;
- if (ram_size >= HVM_BELOW_4G_RAM_END) {
+ if (ram_size >= QEMU_BELOW_4G_RAM_END) {
/* Xen does not allocate the memory continuously, and keep a hole at
- * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
+ * QEMU_BELOW_4G_RAM_END of QEMU_BELOW_4G_MMIO_LENGTH
*/
- block_len += HVM_BELOW_4G_MMIO_LENGTH;
+ block_len += QEMU_BELOW_4G_MMIO_LENGTH;
}
- memory_region_init_ram(&ram_memory, "xen.ram", block_len);
+ memory_region_init_ram(&ram_memory, NULL, "xen.ram", block_len);
vmstate_register_ram_global(&ram_memory);
- if (ram_size >= HVM_BELOW_4G_RAM_END) {
- above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
- below_4g_mem_size = HVM_BELOW_4G_RAM_END;
+ if (ram_size >= QEMU_BELOW_4G_RAM_END) {
+ above_4g_mem_size = ram_size - QEMU_BELOW_4G_RAM_END;
+ below_4g_mem_size = QEMU_BELOW_4G_RAM_END;
} else {
below_4g_mem_size = ram_size;
}