struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;
- r = -ENOMEM;
- dirty_bitmap = vmalloc(n);
- if (!dirty_bitmap)
- goto out;
+ dirty_bitmap = memslot->dirty_bitmap_head;
+ if (memslot->dirty_bitmap == dirty_bitmap)
+ dirty_bitmap += n / sizeof(long);
memset(dirty_bitmap, 0, n);
r = -ENOMEM;
slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
- if (!slots) {
- vfree(dirty_bitmap);
+ if (!slots)
goto out;
- }
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
slots->generation++;
spin_unlock(&kvm->mmu_lock);
r = -EFAULT;
- if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
- vfree(dirty_bitmap);
+ if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
goto out;
- }
- vfree(dirty_bitmap);
} else {
r = -EFAULT;
if (clear_user(log->dirty_bitmap, n))
if (!memslot->dirty_bitmap)
return;
- vfree(memslot->dirty_bitmap);
+ vfree(memslot->dirty_bitmap_head);
memslot->dirty_bitmap = NULL;
+ memslot->dirty_bitmap_head = NULL;
}
/*
return 0;
}
+/*
+ * Allocation size is twice as large as the actual dirty bitmap size.
+ * This makes it possible to do double buffering: see x86's
+ * kvm_vm_ioctl_get_dirty_log().
+ */
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
- unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
+ unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
memslot->dirty_bitmap = vmalloc(dirty_bytes);
if (!memslot->dirty_bitmap)
return -ENOMEM;
memset(memslot->dirty_bitmap, 0, dirty_bytes);
+ memslot->dirty_bitmap_head = memslot->dirty_bitmap;
return 0;
}