CPU_FOREACH(cpu) {
cpu_synchronize_post_reset(cpu);
#ifdef CONFIG_HAX
- if (hax_enabled())
+ if (hax_enabled() && hax_ug_platform())
hax_cpu_synchronize_post_reset(cpu);
#endif
}
CPU_FOREACH(cpu) {
cpu_synchronize_post_init(cpu);
#ifdef CONFIG_HAX
- if (hax_enabled())
+ if (hax_enabled() && hax_ug_platform())
hax_cpu_synchronize_post_init(cpu);
#endif
}
{
CPUState *cpu = arg;
int r;
-
qemu_thread_get_self(cpu->thread);
qemu_mutex_lock(&qemu_global_mutex);
if (!exit_request)
cpu_signal(0);
- cpu->exit_request = 1;
+ if (hax_enabled() && hax_ug_platform())
+ cpu->exit_request = 1;
#endif
#else /* _WIN32 */
if (!qemu_cpu_is_self(cpu)) {
}
cpu_signal(0);
- cpu->exit_request = 1;
+ if(hax_enabled() && hax_ug_platform())
+ cpu->exit_request = 1;
if (ResumeThread(cpu->hThread) == (DWORD)-1) {
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
- if ((hax_enabled() || !tcg_enabled()) && !cpu->thread_kicked) {
+#ifdef CONFIG_HAX
+ if (((hax_enabled() && hax_ug_platform()) || !tcg_enabled()) && !cpu->thread_kicked) {
+#else
+ if (!tcg_enabled() && !cpu->thread_kicked) {
+#endif
qemu_cpu_kick_thread(cpu);
cpu->thread_kicked = true;
}
void qemu_mutex_lock_iothread(void)
{
- if (hax_enabled() || !tcg_enabled()) {
+#ifdef CONFIG_HAX
+ if ((hax_enabled() && hax_ug_platform()) || !tcg_enabled()) {
+#else
+ if (!tcg_enabled()) {
+#endif
qemu_mutex_lock(&qemu_global_mutex);
} else {
iothread_requesting_mutex = true;
cpu->nr_cores = smp_cores;
cpu->nr_threads = smp_threads;
cpu->stopped = true;
+
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
#ifdef CONFIG_HAX
- } else if (hax_enabled()) {
+ } else if (hax_enabled() && hax_ug_platform()) {
qemu_hax_start_vcpu(cpu);
#endif
} else if (tcg_enabled()) {
#define HAX_EMULATE_STATE_NONE 0x3
#define HAX_EMULATE_STATE_INITIAL 0x4
+#define HAX_NON_UG_PLATFORM 0x0
+#define HAX_UG_PLATFORM 0x1
+
static void hax_vcpu_sync_state(CPUArchState *env, int modified);
static int hax_arch_get_registers(CPUArchState *env);
static int hax_handle_io(CPUArchState *env, uint32_t df, uint16_t port, int dir, int size, int cnt, void *buf);
static int hax_disabled = 1;
int hax_support = -1;
+int ug_support = 0;
/* Called after hax_init */
int hax_enabled(void)
hax_disabled = disable;
}
+/* Called after hax_init */
+int hax_ug_platform(void)
+{
+ return (ug_support);
+}
+
/* Currently non-PG modes are emulated by QEMU */
int hax_vcpu_emulation_mode(CPUState *cpu)
{
- // Tcg is single-thread, so we need haxm to run smp.
- // If the host has no UG, we always run tcg.
-
- if (hax_enabled())
- return 0;
- else
- return 1;
+ CPUArchState *env = (CPUArchState *)(cpu->env_ptr);
+ return !(env->cr[0] & CR0_PG_MASK);
}
static int hax_prepare_emulation(CPUArchState *env)
return -ENXIO;
}
- if (!(cap->winfo & HAX_CAP_UG))
- {
- dprint("UG feature is not available on platform needed to support HAXM.\n");
- return -ENXIO;
- }
+ if ((cap->winfo & HAX_CAP_UG))
+ ug_support = 1;
if (cap->wstatus & HAX_CAP_MEMQUOTA)
{
{
int irq;
- irq = cpu_get_pic_interrupt(env);
+ irq = cpu_get_pic_interrupt(env);
if (irq >= 0) {
hax_inject_interrupt(env, irq);
cpu->interrupt_request &= ~CPU_INTERRUPT_HARD;
- }
+ }
}
/* If we have an interrupt but the guest is not ready to receive an
* 5. An unknown VMX exit happens
*/
extern void qemu_system_reset_request(void);
-static int hax_vcpu_hax_exec(CPUArchState *env)
+static int hax_vcpu_hax_exec(CPUArchState *env, int ug_platform)
{
int ret = 0;
CPUState *cpu = ENV_GET_CPU(env);
struct hax_vcpu_state *vcpu = cpu->hax_vcpu;
struct hax_tunnel *ht = vcpu->tunnel;
- if (hax_vcpu_emulation_mode(cpu))
+ if(!ug_platform)
{
- dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
- return HAX_EMUL_EXITLOOP;
- }
+ if (hax_vcpu_emulation_mode(cpu))
+ {
+ dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
+ return HAX_EMUL_EXITLOOP;
+ }
- cpu->halted = 0;
+ cpu->halted = 0;
- if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
- apic_poll_irq(x86_cpu->apic_state);
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
}
+ else /* UG platform */
+ {
+ if (!hax_enabled())
+ {
+ dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
+ return HAX_EMUL_EXITLOOP;
+ }
- if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
- fprintf(stderr, "\nhax_vcpu_hax_exec: handling INIT for %d \n", cpu->cpu_index);
- do_cpu_init(x86_cpu);
- hax_vcpu_sync_state(env, 1);
- }
- if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
- fprintf(stderr, "hax_vcpu_hax_exec: handling SIPI for %d \n", cpu->cpu_index);
- hax_vcpu_sync_state(env, 0);
- do_cpu_sipi(x86_cpu);
- hax_vcpu_sync_state(env, 1);
+ cpu->halted = 0;
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
+ cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(x86_cpu->apic_state);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ fprintf(stderr, "\nUG hax_vcpu_hax_exec: handling INIT for %d \n", cpu->cpu_index);
+ do_cpu_init(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ fprintf(stderr, "UG hax_vcpu_hax_exec: handling SIPI for %d \n", cpu->cpu_index);
+ hax_vcpu_sync_state(env, 0);
+ do_cpu_sipi(x86_cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
}
//hax_cpu_synchronize_state(cpu);
#endif
hax_vcpu_interrupt(env);
- qemu_mutex_unlock_iothread();
- hax_ret = hax_vcpu_run(vcpu);
- qemu_mutex_lock_iothread();
+ if (!ug_platform)
+ {
+ hax_ret = hax_vcpu_run(vcpu);
+ }
+ else /* UG platform */
+ {
+ qemu_mutex_unlock_iothread();
+ hax_ret = hax_vcpu_run(vcpu);
+ qemu_mutex_lock_iothread();
+ current_cpu = cpu;
+ }
-#ifdef CONFIG_DARWIN
- current_cpu = cpu;
-#endif
/* Simply continue the vcpu_run if system call interrupted */
if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
dprint("io window interrupted\n");
return 1;
vcpu = cpu->hax_vcpu;
- next = hax_vcpu_hax_exec(env);
+ next = hax_vcpu_hax_exec(env, HAX_NON_UG_PLATFORM);
switch (next)
{
case HAX_EMUL_ONE:
break;
}
- why = hax_vcpu_hax_exec(env);
+ why = hax_vcpu_hax_exec(env, HAX_UG_PLATFORM);
if ((why != HAX_EMUL_HLT) && (why != HAX_EMUL_EXITLOOP))
{