Since HAX_VM_IOCTL_ALLOC_RAM takes a 32-bit size, it cannot handle
RAM blocks of 4GB or larger, which is why HAXM can only run guests
with less than 4GB of RAM. Solve this problem by utilizing the new
HAXM API, HAX_VM_IOCTL_ADD_RAMBLOCK, which takes a 64-bit size, to
register RAM blocks with the HAXM kernel module. The new API is
first added in HAXM 7.0.0, and its availablility and be confirmed
by the presence of the HAX_CAP_64BIT_RAMBLOCK capability flag.
When the guest RAM size reaches 7GB, QEMU will ask HAXM to set up a
memory mapping that covers a 4GB region, which will fail, because
HAX_VM_IOCTL_SET_RAM also takes a 32-bit size. Work around this
limitation by splitting the large mapping into small ones and
calling HAX_VM_IOCTL_SET_RAM multiple times.
Bug: https://bugs.launchpad.net/qemu/+bug/
1735576
Change-Id: Ide245bffe3ea78eb602a8637eea99537acccfedd
Signed-off-by: Yu Ning <yu.ning@intel.com>
Message-Id: <
1515752555-12784-1-git-send-email-yu.ning@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit
7a5235c9e679c58be41c7f0d2f4092ded8bd01f2)
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
#ifdef CONFIG_HAX
/*
- * In Hax, the qemu allocate the virtual address, and HAX kernel
- * populate the memory with physical memory. Currently we have no
- * paging, so user should make sure enough free memory in advance
+ * We must register each RAM block with the HAXM kernel module, or
+ * hax_set_ram() will fail for any mapping into the RAM block:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_alloc_ram
+ *
+ * Old versions of the HAXM kernel module (< 6.2.0) used to preallocate all
+ * host physical pages for the RAM block as part of this registration
+ * process, hence the name hax_populate_ram().
*/
if (hax_enabled()) {
int ret = hax_populate_ram((uint64_t)(uintptr_t)new_block->host,
void hax_cpu_synchronize_post_reset(CPUState *cpu);
void hax_cpu_synchronize_post_init(CPUState *cpu);
void hax_cpu_synchronize_state(CPUState *cpu);
-int hax_populate_ram(uint64_t va, uint32_t size);
+int hax_populate_ram(uint64_t va, uint64_t size);
int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags);
int hax_vcpu_emulation_mode(CPUState *cpu);
int hax_stop_emulation(CPUState *cpu);
if ((cap->winfo & HAX_CAP_UG))
ug_support = 1;
+ hax->supports_64bit_ramblock = !!(cap->winfo & HAX_CAP_64BIT_RAMBLOCK);
+
if (cap->wstatus & HAX_CAP_MEMQUOTA)
{
if (cap->mem_quota < hax->mem_quota)
unsigned int delta;
void *host_ptr;
int flags;
+ uint32_t max_mapping_size;
/* We only care about RAM and ROM */
if (!memory_region_is_ram(mr)) {
host_ptr = memory_region_get_ram_ptr(mr) + section->offset_within_region
+ delta;
flags = memory_region_is_rom(mr) ? 1 : 0;
- hax_slot_register(start_pa, size, (uintptr_t) host_ptr, flags);
+
+ /*
+ * The kernel module interface uses 32-bit sizes:
+ * https://github.com/intel/haxm/blob/master/API.md#hax_vm_ioctl_set_ram
+ *
+ * If the mapping size is longer than 32 bits, we can't process it in one
+ * call into the kernel. Instead, we split the mapping into smaller ones,
+ * and call hax_slot_register() on each.
+ */
+ max_mapping_size = UINT32_MAX & qemu_real_host_page_mask;
+ while (size > max_mapping_size) {
+ hax_slot_register(start_pa, max_mapping_size, (uintptr_t) host_ptr, flags);
+ start_pa += max_mapping_size;
+ size -= max_mapping_size;
+ host_ptr += max_mapping_size;
+ }
+ /* Now size <= max_mapping_size */
+ hax_slot_register(start_pa, (uint32_t)size, (uintptr_t) host_ptr, flags);
}
static void
return fd;
}
-int hax_populate_ram(uint64_t va, uint32_t size)
+int hax_populate_ram(uint64_t va, uint64_t size)
{
int ret;
- struct hax_alloc_ram_info info;
if (!hax_global.vm || !hax_global.vm->fd)
{
return -EINVAL;
}
- info.size = size;
- info.va = va;
- ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
+ if (hax_global.supports_64bit_ramblock)
+ {
+ struct hax_ramblock_info ramblock = {
+ .start_va = va,
+ .size = size,
+ .reserved = 0
+ };
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ADD_RAMBLOCK, &ramblock);
+ }
+ else
+ {
+ struct hax_alloc_ram_info info = {
+ .size = (uint32_t)size,
+ .pad = 0,
+ .va = va
+ };
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
+ }
if (ret < 0)
{
- dprint("Failed to allocate %x memory\n", size);
+ dprint("Failed to register RAM block: ret=%d, va=0x%" PRIx64
+ ", size=0x%" PRIx64 ", method=%s\n", ret, va, size,
+ hax_global.supports_64bit_ramblock ? "new" : "legacy");
return ret;
}
return 0;
#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info)
#define HAX_VM_IOCTL_VCPU_DESTROY _IOW(0, 0x83, uint32_t)
#define HAX_VM_IOCTL_NOTIFY_QEMU_VERSION _IOW(0, 0x84, struct hax_qemu_version)
+#define HAX_VM_IOCTL_ADD_RAMBLOCK _IOW(0, 0x85, struct hax_ramblock_info)
#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0)
#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data)
uint32_t version;
struct hax_vm *vm;
uint64_t mem_quota;
+ bool supports_64bit_ramblock;
};
#define HAX_MAX_VCPU 0x10
uint32_t pad;
uint64_t va;
}__attribute__ ((__packed__));
+
+struct hax_ramblock_info {
+ uint64_t start_va;
+ uint64_t size;
+ uint64_t reserved;
+} __attribute__ ((__packed__));
+
#define HAX_RAM_INFO_ROM 0x1
struct hax_set_ram_info
{
#define HAX_CAP_MEMQUOTA 0x2
#define HAX_CAP_UG 0x4
+#define HAX_CAP_64BIT_RAMBLOCK 0x8
struct hax_capabilityinfo
{
return fd;
}
-int hax_populate_ram(uint64_t va, uint32_t size)
+int hax_populate_ram(uint64_t va, uint64_t size)
{
int ret;
- struct hax_alloc_ram_info info;
HANDLE hDeviceVM;
DWORD dSize = 0;
return -EINVAL;
}
- info.size = size;
- info.va = va;
-
hDeviceVM = hax_global.vm->fd;
- ret = DeviceIoControl(hDeviceVM,
- HAX_VM_IOCTL_ALLOC_RAM,
- &info, sizeof(info),
- NULL, 0,
- &dSize,
- (LPOVERLAPPED) NULL);
+ if (hax_global.supports_64bit_ramblock)
+ {
+ struct hax_ramblock_info ramblock = {
+ .start_va = va,
+ .size = size,
+ .reserved = 0
+ };
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ADD_RAMBLOCK,
+ &ramblock, sizeof(ramblock), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ }
+ else
+ {
+ struct hax_alloc_ram_info info = {
+ .size = (uint32_t) size,
+ .pad = 0,
+ .va = va
+ };
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ALLOC_RAM,
+ &info, sizeof(info), NULL, 0, &dSize,
+ (LPOVERLAPPED) NULL);
+ }
if (!ret) {
- dprint("Failed to allocate %x memory\n", size);
+ dprint("Failed to register RAM block: va=0x%" PRIx64
+ ", size=0x%" PRIx64 ", method=%s\n", va, size,
+ hax_global.supports_64bit_ramblock ? "new" : "legacy");
return ret;
}
#define HAX_VM_IOCTL_ALLOC_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define HAX_VM_IOCTL_SET_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define HAX_VM_IOCTL_VCPU_DESTROY CTL_CODE(HAX_DEVICE_TYPE, 0x905, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_ADD_RAMBLOCK CTL_CODE(HAX_DEVICE_TYPE, 0x913, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define HAX_VCPU_IOCTL_RUN CTL_CODE(HAX_DEVICE_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define HAX_VCPU_IOCTL_SET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS)