}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
}
static int kvm_set_memslot(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
old.as_id = new->as_id;
}
- r = kvm_arch_prepare_memory_region(kvm, mem, &old, new, change);
+ r = kvm_arch_prepare_memory_region(kvm, &old, new, change);
if (r)
goto out_slots;
else if (change == KVM_MR_CREATE)
kvm->nr_memslot_pages += new->npages;
- kvm_arch_commit_memory_region(kvm, mem, &old, new, change);
+ kvm_arch_commit_memory_region(kvm, &old, new, change);
/* Free the old memslot's metadata. Note, this is the full copy!!! */
if (change == KVM_MR_DELETE)
new.id = id;
new.as_id = as_id;
- return kvm_set_memslot(kvm, mem, &new, KVM_MR_DELETE);
+ return kvm_set_memslot(kvm, &new, KVM_MR_DELETE);
}
new.as_id = as_id;
bitmap_set(new.dirty_bitmap, 0, new.npages);
}
- r = kvm_set_memslot(kvm, mem, &new, change);
+ r = kvm_set_memslot(kvm, &new, change);
if (r)
goto out_bitmap;