struct kvm_arch {
unsigned int n_free_mmu_pages;
unsigned int n_requested_mmu_pages;
- unsigned int n_alloc_mmu_pages;
+ unsigned int n_max_mmu_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
int used_pages;
LIST_HEAD(invalid_list);
- used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
+ used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
used_pages = max(0, used_pages);
/*
}
else
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
- - kvm->arch.n_alloc_mmu_pages;
+ - kvm->arch.n_max_mmu_pages;
- kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
+ kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
}
static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- npages = kvm->arch.n_alloc_mmu_pages -
+ npages = kvm->arch.n_max_mmu_pages -
kvm_mmu_available_pages(kvm);
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
- return kvm->arch.n_alloc_mmu_pages;
+ return kvm->arch.n_max_mmu_pages;
}
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)