int used_pages;
LIST_HEAD(invalid_list);
- used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
+ used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
used_pages = max(0, used_pages);
/*
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
- int free_pages;
LIST_HEAD(invalid_list);
- free_pages = vcpu->kvm->arch.n_free_mmu_pages;
- while (free_pages < KVM_REFILL_PAGES &&
+ while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *sp;
sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
- free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
- &invalid_list);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_recycled;
}
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
npages = kvm->arch.n_alloc_mmu_pages -
- kvm->arch.n_free_mmu_pages;
+ kvm_mmu_available_pages(kvm);
cache_count += npages;
if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
+static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+{
+ return kvm->arch.n_free_mmu_pages;
+}
+
static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
- if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
+ if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
__kvm_mmu_free_some_pages(vcpu);
}