}
}
+static void qemu_hax_wait_io_event(CPUState *cpu)
+{
+ while (cpu_thread_is_idle(cpu)) {
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
+ }
+
+ qemu_wait_io_event_common(cpu);
+}
+
static void qemu_kvm_wait_io_event(CPUState *cpu)
{
while (cpu_thread_is_idle(cpu)) {
return NULL;
}
+static void *qemu_hax_cpu_thread_fn(void *arg)
+{
+ CPUState *cpu = arg;
+ int r;
+
+ qemu_thread_get_self(cpu->thread);
+ qemu_mutex_lock(&qemu_global_mutex);
+
+ cpu->thread_id = qemu_get_thread_id();
+ cpu->created = true;
+ current_cpu = cpu;
+
+ hax_init_vcpu(cpu->env_ptr);
+ qemu_cond_signal(&qemu_cpu_cond);
+
+ while (1) {
+ if (cpu_can_run(cpu)) {
+ r = hax_smp_cpu_exec(cpu->env_ptr);
+ if (r == EXCP_DEBUG) {
+ cpu_handle_guest_debug(cpu);
+ }
+ }
+ qemu_hax_wait_io_event(cpu);
+ }
+ return NULL;
+}
+
static void qemu_cpu_kick_thread(CPUState *cpu)
{
#ifndef _WIN32
}
cpu_signal(0);
+ cpu->exit_request = 1;
if (ResumeThread(cpu->hThread) == (DWORD)-1) {
fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
- if (!tcg_enabled() && !cpu->thread_kicked) {
+ if ((hax_enabled() || !tcg_enabled()) && !cpu->thread_kicked) {
qemu_cpu_kick_thread(cpu);
cpu->thread_kicked = true;
}
void qemu_mutex_lock_iothread(void)
{
- if (!tcg_enabled()) {
+ if (hax_enabled() || !tcg_enabled()) {
qemu_mutex_lock(&qemu_global_mutex);
} else {
iothread_requesting_mutex = true;
}
}
+static void qemu_hax_start_vcpu(CPUState *cpu)
+{
+ char thread_name[VCPU_THREAD_NAME_SIZE];
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
+ cpu->halt_cond = g_malloc0(sizeof(QemuCond));
+ qemu_cond_init(cpu->halt_cond);
+
+ snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HAX",
+ cpu->cpu_index);
+
+ qemu_thread_create(cpu->thread, thread_name, qemu_hax_cpu_thread_fn,
+ cpu, QEMU_THREAD_JOINABLE);
+#ifdef _WIN32
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
+#endif
+ while (!cpu->created) {
+ qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
+ }
+}
+
static void qemu_kvm_start_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
cpu->stopped = true;
if (kvm_enabled()) {
qemu_kvm_start_vcpu(cpu);
+ } else if (hax_enabled()) {
+ qemu_hax_start_vcpu(cpu);
} else if (tcg_enabled()) {
qemu_tcg_init_vcpu(cpu);
} else {
* TODO: proper implementations via Win32 .tls sections and
* POSIX pthread_getspecific.
*/
-#ifdef __linux__
+#if defined (__linux__) || defined(_WIN32)
#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
#define DEFINE_TLS(type, x) __thread __typeof__(type) tls__##x
#define tls_var(x) tls__##x
int hax_init_vcpu(CPUArchState *env);
int hax_vcpu_exec(CPUArchState *env);
+int hax_smp_cpu_exec(CPUArchState *env);
void hax_vcpu_sync_state(CPUArchState *env, int modified);
//extern void hax_cpu_synchronize_state(CPUArchState *env);
//extern void hax_cpu_synchronize_post_reset(CPUArchState *env);
/* Currently non-PG modes are emulated by QEMU */
int hax_vcpu_emulation_mode(CPUArchState *env)
{
- return !(env->cr[0] & CR0_PG_MASK);
+ // Tcg is single-thread, so we need haxm to run smp.
+ // If the host has no UG, we always run tcg.
+
+ if (hax_enabled())
+ return 0;
+ else
+ return 1;
}
static int hax_prepare_emulation(CPUArchState *env)
return -ENXIO;
}
+ if (!(cap->winfo & HAX_CAP_UG))
+ {
+ dprint("UG feature is not available on platform needed to support HAXM.\n");
+ return -ENXIO;
+ }
+
if (cap->wstatus & HAX_CAP_MEMQUOTA)
{
if (cap->mem_quota < hax->mem_quota)
.log_global_stop = hax_log_global_stop,
};
-#if 0
-static void hax_handle_interrupt(CPUArchState *env, int mask)
+static void hax_handle_interrupt(CPUState *cpu, int mask)
{
- CPUState *cpu = ENV_GET_CPU(env);
cpu->interrupt_request |= mask;
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
}
-#endif
int hax_pre_init(uint64_t ram_size)
{
qversion.cur_version = hax_cur_version;
qversion.least_version = hax_lest_version;
hax_notify_qemu_version(hax->vm->fd, &qversion);
+ cpu_interrupt_handler = hax_handle_interrupt;
hax_support = 1;
return ret;
return HAX_EMUL_EXITLOOP;
}
-
+ if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
+ fprintf(stderr, "\nhax_vcpu_hax_exec: handling INIT for %d \n", cpu->cpu_index);
+ do_cpu_init(cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+ if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) {
+ fprintf(stderr, "hax_vcpu_hax_exec: handling SIPI for %d \n", cpu->cpu_index);
+ hax_vcpu_sync_state(env, 0);
+ do_cpu_sipi(cpu);
+ hax_vcpu_sync_state(env, 1);
+ }
+
//hax_cpu_synchronize_state(env);
do {
#endif
hax_vcpu_interrupt(env);
-
+ qemu_mutex_unlock_iothread();
hax_ret = hax_vcpu_run(vcpu);
+ qemu_mutex_lock_iothread();
/* Simply continue the vcpu_run if system call interrupted */
if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
return ret;
}
+int hax_smp_cpu_exec(CPUArchState *env)
+{
+ CPUState *cpu = ENV_GET_CPU(env);
+ int why;
+ int ret;
+
+ while (1) {
+ if (cpu->exception_index >= EXCP_INTERRUPT) {
+ ret = cpu->exception_index;
+ cpu->exception_index = -1;
+ break;
+ }
+
+ why = hax_vcpu_hax_exec(env);
+
+ if ((why != HAX_EMUL_HLT) && (why != HAX_EMUL_EXITLOOP))
+ {
+ dprint("Unknown hax vcpu return %x\n", why);
+ abort();
+ }
+ }
+
+ return ret;
+}
+
#define HAX_RAM_INFO_ROM 0x1
static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
#define HAX_CAP_FAILREASON_NX 0x2
#define HAX_CAP_MEMQUOTA 0x2
+#define HAX_CAP_UG 0x4
struct hax_capabilityinfo
{