}
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn)
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level)
{
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
- for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
+ for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
if (is_tdp_mmu_enabled(kvm))
write_protected |=
- kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn);
+ kvm_tdp_mmu_write_protect_gfn(kvm, slot, gfn, min_level);
return write_protected;
}
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+ return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn, PG_LEVEL_4K);
}
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
- struct kvm_memory_slot *slot, u64 gfn);
+ struct kvm_memory_slot *slot, u64 gfn,
+ int min_level);
void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
kvm_mmu_gfn_disallow_lpage(slot, gfn);
if (mode == KVM_PAGE_TRACK_WRITE)
- if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K))
kvm_flush_remote_tlbs(kvm);
}
EXPORT_SYMBOL_GPL(kvm_slot_page_track_add_page);
* Returns true if an SPTE was set and a TLB flush is needed.
*/
static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
- gfn_t gfn)
+ gfn_t gfn, int min_level)
{
struct tdp_iter iter;
u64 new_spte;
bool spte_set = false;
+ BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
+
rcu_read_lock();
- tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) {
+ for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
+ min_level, gfn, gfn + 1) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ !is_last_spte(iter.old_spte, iter.level))
+ continue;
+
if (!is_writable_pte(iter.old_spte))
break;
* Returns true if an SPTE was set and a TLB flush is needed.
*/
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn)
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level)
{
struct kvm_mmu_page *root;
bool spte_set = false;
lockdep_assert_held_write(&kvm->mmu_lock);
for_each_tdp_mmu_root(kvm, root, slot->as_id)
- spte_set |= write_protect_gfn(kvm, root, gfn);
+ spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
return spte_set;
}
bool flush);
bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
- struct kvm_memory_slot *slot, gfn_t gfn);
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ int min_level);
int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
int *root_level);