4 * Copyright (c) 2011 Intel Corporation
6 * Jiang Yunhong<yunhong.jiang@intel.com>
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
15 * return 0 when success, -1 when driver not loaded,
16 * other negative value for other failure
18 static int hax_open_device(hax_fd *fd)
26 hDevice = CreateFile( "\\\\.\\HAX",
27 GENERIC_READ | GENERIC_WRITE,
31 FILE_ATTRIBUTE_NORMAL,
34 if (hDevice == INVALID_HANDLE_VALUE)
36 dprint("Failed to open the HAX device!\n");
37 errNum = GetLastError();
38 if (errNum == ERROR_FILE_NOT_FOUND)
43 dprint("device fd:%d\n", *fd);
47 /* This need changes after more investigation on driver loading*/
48 hax_fd hax_mod_open(void)
53 ret = hax_open_device(&fd);
55 dprint("Open HAX device failed\n");
60 int hax_populate_ram(uint64_t va, uint32_t size)
63 struct hax_alloc_ram_info info;
67 if (!hax_global.vm || !hax_global.vm->fd)
69 dprint("Allocate memory before vm create?\n");
76 hDeviceVM = hax_global.vm->fd;
78 ret = DeviceIoControl(hDeviceVM,
79 HAX_VM_IOCTL_ALLOC_RAM,
86 dprint("Failed to allocate %x memory\n", size);
95 * much simpler than kvm, at least in first stage because:
96 * We don't need consider the device pass-through, we don't need
97 * consider the framebuffer, and we may even remove the bios at all
100 int hax_set_phys_mem(MemoryRegionSection *section)
102 struct hax_set_ram_info info, *pinfo = &info;
103 MemoryRegion *mr = section->mr;
104 hwaddr start_addr = section->offset_within_address_space;
105 ram_addr_t size = int128_get64(section->size);
110 /* We only care for the RAM and ROM */
111 if (!memory_region_is_ram(mr)) {
115 if ( (start_addr & ~TARGET_PAGE_MASK) || (size & ~TARGET_PAGE_MASK)) {
117 "set_phys_mem %x %lx requires page aligned addr and size\n",
122 info.pa_start = start_addr;
124 info.va = (uint64_t)(intptr_t)(memory_region_get_ram_ptr(mr) +
125 section->offset_within_region);
126 info.flags = memory_region_is_rom(mr) ? 1 : 0;
128 hDeviceVM = hax_global.vm->fd;
130 ret = DeviceIoControl(hDeviceVM,
131 HAX_VM_IOCTL_SET_RAM,
132 pinfo, sizeof(*pinfo),
135 (LPOVERLAPPED) NULL);
143 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
146 HANDLE hDevice = hax->fd; //handle to hax module
150 if (hax_invalid_fd(hDevice)) {
151 dprint("Invalid fd for hax device!\n");
155 ret = DeviceIoControl(hDevice,
156 HAX_IOCTL_CAPABILITY,
160 (LPOVERLAPPED) NULL);
163 err = GetLastError();
164 if (err == ERROR_INSUFFICIENT_BUFFER ||
165 err == ERROR_MORE_DATA)
166 dprint("hax capability is too long to hold.\n");
167 dprint("Failed to get Hax capability:%d\n", err);
173 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
176 HANDLE hDevice = hax->fd; //handle to hax module
180 if (hax_invalid_fd(hDevice)) {
181 dprint("Invalid fd for hax device!\n");
185 ret = DeviceIoControl(hDevice,
188 version, sizeof(*version),
190 (LPOVERLAPPED) NULL);
193 err = GetLastError();
194 if (err == ERROR_INSUFFICIENT_BUFFER ||
195 err == ERROR_MORE_DATA)
196 dprint("hax module verion is too long to hold.\n");
197 dprint("Failed to get Hax module version:%d\n", err);
203 static char *hax_vm_devfs_string(int vm_id)
207 if (vm_id > MAX_VM_ID)
209 dprint("Too big VM id\n");
213 name = g_strdup("\\\\.\\hax_vmxx");
216 sprintf(name, "\\\\.\\hax_vm%02d", vm_id);
221 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
225 if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID)
227 dprint("Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
230 name = g_strdup("\\\\.\\hax_vmxx_vcpuxx");
233 sprintf(name, "\\\\.\\hax_vm%02d_vcpu%02d", vm_id, vcpu_id);
238 int hax_host_create_vm(struct hax_state *hax, int *vmid)
244 if (hax_invalid_fd(hax->fd))
250 ret = DeviceIoControl(hax->fd,
253 &vm_id, sizeof(vm_id),
255 (LPOVERLAPPED) NULL);
257 dprint("error code:%d", GetLastError());
264 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
266 char *vm_name = NULL;
269 vm_name = hax_vm_devfs_string(vm_id);
271 dprint("Incorrect name\n");
272 return INVALID_HANDLE_VALUE;
275 hDeviceVM = CreateFile(vm_name,
276 GENERIC_READ | GENERIC_WRITE,
280 FILE_ATTRIBUTE_NORMAL,
282 if (hDeviceVM == INVALID_HANDLE_VALUE)
283 dprint("Open the vm devcie error:%s, ec:%d\n", vm_name, GetLastError());
289 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
293 if (hax_invalid_fd(vm_fd))
295 ret = DeviceIoControl(vm_fd,
296 HAX_VM_IOCTL_NOTIFY_QEMU_VERSION,
297 qversion, sizeof(struct hax_qemu_version),
300 (LPOVERLAPPED) NULL);
303 dprint("Failed to notify qemu API version\n");
309 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
314 ret = DeviceIoControl(vm_fd,
315 HAX_VM_IOCTL_VCPU_CREATE,
316 &vcpuid, sizeof(vcpuid),
319 (LPOVERLAPPED) NULL);
322 dprint("Failed to create vcpu %x\n", vcpuid);
329 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
331 char *devfs_path = NULL;
334 devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
337 dprint("Failed to get the devfs\n");
338 return INVALID_HANDLE_VALUE;
341 hDeviceVCPU = CreateFile( devfs_path,
342 GENERIC_READ | GENERIC_WRITE,
346 FILE_ATTRIBUTE_NORMAL,
349 if (hDeviceVCPU == INVALID_HANDLE_VALUE)
350 dprint("Failed to open the vcpu devfs\n");
355 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
357 hax_fd hDeviceVCPU = vcpu->fd;
359 struct hax_tunnel_info info;
362 ret = DeviceIoControl(hDeviceVCPU,
363 HAX_VCPU_IOCTL_SETUP_TUNNEL,
367 (LPOVERLAPPED) NULL);
370 dprint("Failed to setup the hax tunnel\n");
374 if (!valid_hax_tunnel_size(info.size))
376 dprint("Invalid hax tunnel size %x\n", info.size);
380 vcpu->tunnel = (struct hax_tunnel *)(intptr_t)(info.va);
381 vcpu->iobuf = (unsigned char *)(intptr_t)(info.io_va);
385 int hax_vcpu_run(struct hax_vcpu_state* vcpu)
388 HANDLE hDeviceVCPU = vcpu->fd;
391 ret = DeviceIoControl(hDeviceVCPU,
396 (LPOVERLAPPED) NULL);
403 int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
410 fd = hax_vcpu_get_fd(env);
411 if (hax_invalid_fd(fd))
417 ret = DeviceIoControl(hDeviceVCPU,
418 HAX_VCPU_IOCTL_SET_FPU,
422 (LPOVERLAPPED) NULL);
424 ret = DeviceIoControl(hDeviceVCPU,
425 HAX_VCPU_IOCTL_GET_FPU,
429 (LPOVERLAPPED) NULL);
436 int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
443 fd = hax_vcpu_get_fd(env);
444 if (hax_invalid_fd(fd))
449 ret = DeviceIoControl(hDeviceVCPU,
450 HAX_VCPU_IOCTL_SET_MSRS,
454 (LPOVERLAPPED) NULL);
456 ret = DeviceIoControl(hDeviceVCPU,
457 HAX_VCPU_IOCTL_GET_MSRS,
461 (LPOVERLAPPED) NULL);
468 int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
475 fd = hax_vcpu_get_fd(env);
476 if (hax_invalid_fd(fd))
482 ret = DeviceIoControl(hDeviceVCPU,
484 state, sizeof(*state),
487 (LPOVERLAPPED) NULL);
489 ret = DeviceIoControl(hDeviceVCPU,
492 state, sizeof(*state),
494 (LPOVERLAPPED) NULL);
501 int hax_inject_interrupt(CPUArchState *env, int vector)
508 fd = hax_vcpu_get_fd(env);
509 if (hax_invalid_fd(fd))
514 ret = DeviceIoControl(hDeviceVCPU,
515 HAX_VCPU_IOCTL_INTERRUPT,
516 &vector, sizeof(vector),
519 (LPOVERLAPPED) NULL);