1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include "mmu_internal.h"
11 #include <asm/cmpxchg.h>
12 #include <trace/events/kvm.h>
14 /* Initializes the TDP MMU for the VM, if enabled. */
15 void kvm_mmu_init_tdp_mmu(struct kvm *kvm)
17 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
18 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
21 /* Arbitrarily returns true so that this may be used in if statements. */
22 static __always_inline bool kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
26 lockdep_assert_held_read(&kvm->mmu_lock);
28 lockdep_assert_held_write(&kvm->mmu_lock);
33 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
36 * Invalidate all roots, which besides the obvious, schedules all roots
37 * for zapping and thus puts the TDP MMU's reference to each root, i.e.
38 * ultimately frees all roots.
40 kvm_tdp_mmu_invalidate_all_roots(kvm);
41 kvm_tdp_mmu_zap_invalidated_roots(kvm);
43 WARN_ON(atomic64_read(&kvm->arch.tdp_mmu_pages));
44 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
47 * Ensure that all the outstanding RCU callbacks to free shadow pages
48 * can run before the VM is torn down. Putting the last reference to
49 * zapped roots will create new callbacks.
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
56 free_page((unsigned long)sp->spt);
57 kmem_cache_free(mmu_page_header_cache, sp);
61 * This is called through call_rcu in order to free TDP page table memory
62 * safely with respect to other kernel threads that may be operating on
64 * By only accessing TDP MMU page table memory in an RCU read critical
65 * section, and freeing it after a grace period, lockless access to that
66 * memory won't use it after it is freed.
68 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
79 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
81 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
85 * The TDP MMU itself holds a reference to each root until the root is
86 * explicitly invalidated, i.e. the final reference should be never be
87 * put for a valid root.
89 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm);
91 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
92 list_del_rcu(&root->link);
93 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
94 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
98 * Returns the next root after @prev_root (or the first root if @prev_root is
99 * NULL). A reference to the returned root is acquired, and the reference to
100 * @prev_root is released (the caller obviously must hold a reference to
101 * @prev_root if it's non-NULL).
103 * If @only_valid is true, invalid roots are skipped.
105 * Returns NULL if the end of tdp_mmu_roots was reached.
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
108 struct kvm_mmu_page *prev_root,
109 bool shared, bool only_valid)
111 struct kvm_mmu_page *next_root;
116 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118 typeof(*prev_root), link);
120 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
121 typeof(*next_root), link);
124 if ((!only_valid || !next_root->role.invalid) &&
125 kvm_tdp_mmu_get_root(next_root))
128 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
129 &next_root->link, typeof(*next_root), link);
135 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
141 * Note: this iterator gets and puts references to the roots it iterates over.
142 * This makes it safe to release the MMU lock and yield within the loop, but
143 * if exiting the loop early, the caller must drop the reference to the most
144 * recent root. (Unless keeping a live reference is desirable.)
146 * If shared is set, this function is operating under the MMU lock in read
147 * mode. In the unlikely event that this thread must free a root, the lock
148 * will be temporarily dropped and reacquired in write mode.
150 #define __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, _only_valid)\
151 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, _only_valid); \
153 _root = tdp_mmu_next_root(_kvm, _root, _shared, _only_valid)) \
154 if (kvm_lockdep_assert_mmu_lock_held(_kvm, _shared) && \
155 kvm_mmu_page_as_id(_root) != _as_id) { \
158 #define for_each_valid_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
159 __for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared, true)
161 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _shared) \
162 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared, false); \
164 _root = tdp_mmu_next_root(_kvm, _root, _shared, false)) \
165 if (!kvm_lockdep_assert_mmu_lock_held(_kvm, _shared)) { \
169 * Iterate over all TDP MMU roots. Requires that mmu_lock be held for write,
170 * the implication being that any flow that holds mmu_lock for read is
171 * inherently yield-friendly and should use the yield-safe variant above.
172 * Holding mmu_lock for write obviates the need for RCU protection as the list
173 * is guaranteed to be stable.
175 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
176 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link) \
177 if (kvm_lockdep_assert_mmu_lock_held(_kvm, false) && \
178 kvm_mmu_page_as_id(_root) != _as_id) { \
181 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu)
183 struct kvm_mmu_page *sp;
185 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
186 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
191 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep,
192 gfn_t gfn, union kvm_mmu_page_role role)
194 INIT_LIST_HEAD(&sp->possible_nx_huge_page_link);
196 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
201 sp->tdp_mmu_page = true;
203 trace_kvm_mmu_get_page(sp, true);
206 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp,
207 struct tdp_iter *iter)
209 struct kvm_mmu_page *parent_sp;
210 union kvm_mmu_page_role role;
212 parent_sp = sptep_to_sp(rcu_dereference(iter->sptep));
214 role = parent_sp->role;
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role);
220 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
222 union kvm_mmu_page_role role = vcpu->arch.mmu->root_role;
223 struct kvm *kvm = vcpu->kvm;
224 struct kvm_mmu_page *root;
226 lockdep_assert_held_write(&kvm->mmu_lock);
229 * Check for an existing root before allocating a new one. Note, the
230 * role check prevents consuming an invalid root.
232 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
233 if (root->role.word == role.word &&
234 kvm_tdp_mmu_get_root(root))
238 root = tdp_mmu_alloc_sp(vcpu);
239 tdp_mmu_init_sp(root, NULL, 0, role);
242 * TDP MMU roots are kept until they are explicitly invalidated, either
243 * by a memslot update or by the destruction of the VM. Initialize the
244 * refcount to two; one reference for the vCPU, and one reference for
245 * the TDP MMU itself, which is held until the root is invalidated and
246 * is ultimately put by kvm_tdp_mmu_zap_invalidated_roots().
248 refcount_set(&root->tdp_mmu_root_count, 2);
250 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
252 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
255 return __pa(root->spt);
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
259 u64 old_spte, u64 new_spte, int level,
262 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
264 kvm_account_pgtable_pages((void *)sp->spt, +1);
265 atomic64_inc(&kvm->arch.tdp_mmu_pages);
268 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
270 kvm_account_pgtable_pages((void *)sp->spt, -1);
271 atomic64_dec(&kvm->arch.tdp_mmu_pages);
275 * tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
278 * @sp: the page to be removed
279 * @shared: This operation may not be running under the exclusive use of
280 * the MMU lock and the operation must synchronize with other
281 * threads that might be adding or removing pages.
283 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
286 tdp_unaccount_mmu_page(kvm, sp);
288 if (!sp->nx_huge_page_disallowed)
292 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
294 lockdep_assert_held_write(&kvm->mmu_lock);
296 sp->nx_huge_page_disallowed = false;
297 untrack_possible_nx_huge_page(kvm, sp);
300 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
304 * handle_removed_pt() - handle a page table removed from the TDP structure
307 * @pt: the page removed from the paging structure
308 * @shared: This operation may not be running under the exclusive use
309 * of the MMU lock and the operation must synchronize with other
310 * threads that might be modifying SPTEs.
312 * Given a page table that has been removed from the TDP paging structure,
313 * iterates through the page table to clear SPTEs and free child page tables.
315 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
316 * protection. Since this thread removed it from the paging structure,
317 * this thread will be responsible for ensuring the page is freed. Hence the
318 * early rcu_dereferences in the function.
320 static void handle_removed_pt(struct kvm *kvm, tdp_ptep_t pt, bool shared)
322 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
323 int level = sp->role.level;
324 gfn_t base_gfn = sp->gfn;
327 trace_kvm_mmu_prepare_zap_page(sp);
329 tdp_mmu_unlink_sp(kvm, sp, shared);
331 for (i = 0; i < SPTE_ENT_PER_PAGE; i++) {
332 tdp_ptep_t sptep = pt + i;
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
338 * Set the SPTE to a nonpresent value that other
339 * threads will not overwrite. If the SPTE was
340 * already marked as removed then another thread
341 * handling a page fault could overwrite it, so
342 * set the SPTE until it is set from some other
343 * value to the removed SPTE value.
346 old_spte = kvm_tdp_mmu_write_spte_atomic(sptep, REMOVED_SPTE);
347 if (!is_removed_spte(old_spte))
353 * If the SPTE is not MMU-present, there is no backing
354 * page associated with the SPTE and so no side effects
355 * that need to be recorded, and exclusive ownership of
356 * mmu_lock ensures the SPTE can't be made present.
357 * Note, zapping MMIO SPTEs is also unnecessary as they
358 * are guarded by the memslots generation, not by being
361 old_spte = kvm_tdp_mmu_read_spte(sptep);
362 if (!is_shadow_present_pte(old_spte))
366 * Use the common helper instead of a raw WRITE_ONCE as
367 * the SPTE needs to be updated atomically if it can be
368 * modified by a different vCPU outside of mmu_lock.
369 * Even though the parent SPTE is !PRESENT, the TLB
370 * hasn't yet been flushed, and both Intel and AMD
371 * document that A/D assists can use upper-level PxE
372 * entries that are cached in the TLB, i.e. the CPU can
373 * still access the page and mark it dirty.
375 * No retry is needed in the atomic update path as the
376 * sole concern is dropping a Dirty bit, i.e. no other
377 * task can zap/remove the SPTE as mmu_lock is held for
378 * write. Marking the SPTE as a removed SPTE is not
379 * strictly necessary for the same reason, but using
380 * the remove SPTE value keeps the shared/exclusive
381 * paths consistent and allows the handle_changed_spte()
382 * call below to hardcode the new value to REMOVED_SPTE.
384 * Note, even though dropping a Dirty bit is the only
385 * scenario where a non-atomic update could result in a
386 * functional bug, simply checking the Dirty bit isn't
387 * sufficient as a fast page fault could read the upper
388 * level SPTE before it is zapped, and then make this
389 * target SPTE writable, resume the guest, and set the
390 * Dirty bit between reading the SPTE above and writing
393 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte,
394 REMOVED_SPTE, level);
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
397 old_spte, REMOVED_SPTE, level, shared);
400 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
404 * handle_changed_spte - handle bookkeeping associated with an SPTE change
406 * @as_id: the address space of the paging structure the SPTE was a part of
407 * @gfn: the base GFN that was mapped by the SPTE
408 * @old_spte: The value of the SPTE before the change
409 * @new_spte: The value of the SPTE after the change
410 * @level: the level of the PT the SPTE is part of in the paging structure
411 * @shared: This operation may not be running under the exclusive use of
412 * the MMU lock and the operation must synchronize with other
413 * threads that might be modifying SPTEs.
415 * Handle bookkeeping that might result from the modification of a SPTE. Note,
416 * dirty logging updates are handled in common code, not here (see make_spte()
417 * and fast_pf_fix_direct_spte()).
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
420 u64 old_spte, u64 new_spte, int level,
423 bool was_present = is_shadow_present_pte(old_spte);
424 bool is_present = is_shadow_present_pte(new_spte);
425 bool was_leaf = was_present && is_last_spte(old_spte, level);
426 bool is_leaf = is_present && is_last_spte(new_spte, level);
427 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
429 WARN_ON_ONCE(level > PT64_ROOT_MAX_LEVEL);
430 WARN_ON_ONCE(level < PG_LEVEL_4K);
431 WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
434 * If this warning were to trigger it would indicate that there was a
435 * missing MMU notifier or a race with some notifier handler.
436 * A present, leaf SPTE should never be directly replaced with another
437 * present leaf SPTE pointing to a different PFN. A notifier handler
438 * should be zapping the SPTE before the main MM's page table is
439 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
440 * thread before replacement.
442 if (was_leaf && is_leaf && pfn_changed) {
443 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
444 "SPTE with another present leaf SPTE mapping a\n"
446 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
447 as_id, gfn, old_spte, new_spte, level);
450 * Crash the host to prevent error propagation and guest data
456 if (old_spte == new_spte)
459 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
462 check_spte_writable_invariants(new_spte);
465 * The only times a SPTE should be changed from a non-present to
466 * non-present state is when an MMIO entry is installed/modified/
467 * removed. In that case, there is nothing to do here.
469 if (!was_present && !is_present) {
471 * If this change does not involve a MMIO SPTE or removed SPTE,
472 * it is unexpected. Log the change, though it should not
473 * impact the guest since both the former and current SPTEs
476 if (WARN_ON_ONCE(!is_mmio_spte(old_spte) &&
477 !is_mmio_spte(new_spte) &&
478 !is_removed_spte(new_spte)))
479 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
480 "should not be replaced with another,\n"
481 "different nonpresent SPTE, unless one or both\n"
482 "are MMIO SPTEs, or the new SPTE is\n"
483 "a temporary removed SPTE.\n"
484 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
485 as_id, gfn, old_spte, new_spte, level);
489 if (is_leaf != was_leaf)
490 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
492 if (was_leaf && is_dirty_spte(old_spte) &&
493 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
494 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
497 * Recursively handle child PTs if the change removed a subtree from
498 * the paging structure. Note the WARN on the PFN changing without the
499 * SPTE being converted to a hugepage (leaf) or being zapped. Shadow
500 * pages are kernel allocations and should never be migrated.
502 if (was_present && !was_leaf &&
503 (is_leaf || !is_present || WARN_ON_ONCE(pfn_changed)))
504 handle_removed_pt(kvm, spte_to_child_pt(old_spte, level), shared);
506 if (was_leaf && is_accessed_spte(old_spte) &&
507 (!is_present || !is_accessed_spte(new_spte) || pfn_changed))
508 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
512 * tdp_mmu_set_spte_atomic - Set a TDP MMU SPTE atomically
513 * and handle the associated bookkeeping. Do not mark the page dirty
514 * in KVM's dirty bitmaps.
516 * If setting the SPTE fails because it has changed, iter->old_spte will be
517 * refreshed to the current value of the spte.
520 * @iter: a tdp_iter instance currently on the SPTE that should be set
521 * @new_spte: The value the SPTE should be set to
523 * * 0 - If the SPTE was set.
524 * * -EBUSY - If the SPTE cannot be set. In this case this function will have
525 * no side-effects other than setting iter->old_spte to the last
526 * known value of the spte.
528 static inline int tdp_mmu_set_spte_atomic(struct kvm *kvm,
529 struct tdp_iter *iter,
532 u64 *sptep = rcu_dereference(iter->sptep);
535 * The caller is responsible for ensuring the old SPTE is not a REMOVED
536 * SPTE. KVM should never attempt to zap or manipulate a REMOVED SPTE,
537 * and pre-checking before inserting a new SPTE is advantageous as it
538 * avoids unnecessary work.
540 WARN_ON_ONCE(iter->yielded || is_removed_spte(iter->old_spte));
542 lockdep_assert_held_read(&kvm->mmu_lock);
545 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
546 * does not hold the mmu_lock. On failure, i.e. if a different logical
547 * CPU modified the SPTE, try_cmpxchg64() updates iter->old_spte with
548 * the current value, so the caller operates on fresh data, e.g. if it
549 * retries tdp_mmu_set_spte_atomic()
551 if (!try_cmpxchg64(sptep, &iter->old_spte, new_spte))
554 handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
555 new_spte, iter->level, true);
560 static inline int tdp_mmu_zap_spte_atomic(struct kvm *kvm,
561 struct tdp_iter *iter)
566 * Freeze the SPTE by setting it to a special,
567 * non-present value. This will stop other threads from
568 * immediately installing a present entry in its place
569 * before the TLBs are flushed.
571 ret = tdp_mmu_set_spte_atomic(kvm, iter, REMOVED_SPTE);
575 kvm_flush_remote_tlbs_gfn(kvm, iter->gfn, iter->level);
578 * No other thread can overwrite the removed SPTE as they must either
579 * wait on the MMU lock or use tdp_mmu_set_spte_atomic() which will not
580 * overwrite the special removed SPTE value. No bookkeeping is needed
581 * here since the SPTE is going from non-present to non-present. Use
582 * the raw write helper to avoid an unnecessary check on volatile bits.
584 __kvm_tdp_mmu_write_spte(iter->sptep, 0);
591 * tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
593 * @as_id: Address space ID, i.e. regular vs. SMM
594 * @sptep: Pointer to the SPTE
595 * @old_spte: The current value of the SPTE
596 * @new_spte: The new value that will be set for the SPTE
597 * @gfn: The base GFN that was (or will be) mapped by the SPTE
598 * @level: The level _containing_ the SPTE (its parent PT's level)
600 * Returns the old SPTE value, which _may_ be different than @old_spte if the
601 * SPTE had voldatile bits.
603 static u64 tdp_mmu_set_spte(struct kvm *kvm, int as_id, tdp_ptep_t sptep,
604 u64 old_spte, u64 new_spte, gfn_t gfn, int level)
606 lockdep_assert_held_write(&kvm->mmu_lock);
609 * No thread should be using this function to set SPTEs to or from the
610 * temporary removed SPTE value.
611 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
612 * should be used. If operating under the MMU lock in write mode, the
613 * use of the removed SPTE should not be necessary.
615 WARN_ON_ONCE(is_removed_spte(old_spte) || is_removed_spte(new_spte));
617 old_spte = kvm_tdp_mmu_write_spte(sptep, old_spte, new_spte, level);
619 handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level, false);
623 static inline void tdp_mmu_iter_set_spte(struct kvm *kvm, struct tdp_iter *iter,
626 WARN_ON_ONCE(iter->yielded);
627 iter->old_spte = tdp_mmu_set_spte(kvm, iter->as_id, iter->sptep,
628 iter->old_spte, new_spte,
629 iter->gfn, iter->level);
632 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
633 for_each_tdp_pte(_iter, _root, _start, _end)
635 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
636 tdp_root_for_each_pte(_iter, _root, _start, _end) \
637 if (!is_shadow_present_pte(_iter.old_spte) || \
638 !is_last_spte(_iter.old_spte, _iter.level)) \
642 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
643 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
646 * Yield if the MMU lock is contended or this thread needs to return control
649 * If this function should yield and flush is set, it will perform a remote
650 * TLB flush before yielding.
652 * If this function yields, iter->yielded is set and the caller must skip to
653 * the next iteration, where tdp_iter_next() will reset the tdp_iter's walk
654 * over the paging structures to allow the iterator to continue its traversal
655 * from the paging structure root.
657 * Returns true if this function yielded.
659 static inline bool __must_check tdp_mmu_iter_cond_resched(struct kvm *kvm,
660 struct tdp_iter *iter,
661 bool flush, bool shared)
663 WARN_ON_ONCE(iter->yielded);
665 /* Ensure forward progress has been made before yielding. */
666 if (iter->next_last_level_gfn == iter->yielded_gfn)
669 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
671 kvm_flush_remote_tlbs(kvm);
676 cond_resched_rwlock_read(&kvm->mmu_lock);
678 cond_resched_rwlock_write(&kvm->mmu_lock);
682 WARN_ON_ONCE(iter->gfn > iter->next_last_level_gfn);
684 iter->yielded = true;
687 return iter->yielded;
690 static inline gfn_t tdp_mmu_max_gfn_exclusive(void)
693 * Bound TDP MMU walks at host.MAXPHYADDR. KVM disallows memslots with
694 * a gpa range that would exceed the max gfn, and KVM does not create
695 * MMIO SPTEs for "impossible" gfns, instead sending such accesses down
696 * the slow emulation path every time.
698 return kvm_mmu_max_gfn() + 1;
701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
702 bool shared, int zap_level)
704 struct tdp_iter iter;
706 gfn_t end = tdp_mmu_max_gfn_exclusive();
709 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) {
711 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
714 if (!is_shadow_present_pte(iter.old_spte))
717 if (iter.level > zap_level)
721 tdp_mmu_iter_set_spte(kvm, &iter, 0);
722 else if (tdp_mmu_set_spte_atomic(kvm, &iter, 0))
727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
732 * The root must have an elevated refcount so that it's reachable via
733 * mmu_notifier callbacks, which allows this path to yield and drop
734 * mmu_lock. When handling an unmap/release mmu_notifier command, KVM
735 * must drop all references to relevant pages prior to completing the
736 * callback. Dropping mmu_lock with an unreachable root would result
737 * in zapping SPTEs after a relevant mmu_notifier callback completes
738 * and lead to use-after-free as zapping a SPTE triggers "writeback" of
739 * dirty accessed bits to the SPTE's associated struct page.
741 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count));
743 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
748 * To avoid RCU stalls due to recursively removing huge swaths of SPs,
749 * split the zap into two passes. On the first pass, zap at the 1gb
750 * level, and then zap top-level SPs on the second pass. "1gb" is not
751 * arbitrary, as KVM must be able to zap a 1gb shadow page without
752 * inducing a stall to allow in-place replacement with a 1gb hugepage.
754 * Because zapping a SP recurses on its children, stepping down to
755 * PG_LEVEL_4K in the iterator itself is unnecessary.
757 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G);
758 __tdp_mmu_zap_root(kvm, root, shared, root->role.level);
763 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
768 * This helper intentionally doesn't allow zapping a root shadow page,
769 * which doesn't have a parent page table and thus no associated entry.
771 if (WARN_ON_ONCE(!sp->ptep))
774 old_spte = kvm_tdp_mmu_read_spte(sp->ptep);
775 if (WARN_ON_ONCE(!is_shadow_present_pte(old_spte)))
778 tdp_mmu_set_spte(kvm, kvm_mmu_page_as_id(sp), sp->ptep, old_spte, 0,
779 sp->gfn, sp->role.level + 1);
785 * If can_yield is true, will release the MMU lock and reschedule if the
786 * scheduler needs the CPU or there is contention on the MMU lock. If this
787 * function cannot yield, it will not release the MMU lock or reschedule and
788 * the caller must ensure it does not supply too large a GFN range, or the
789 * operation can cause a soft lockup.
791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
792 gfn_t start, gfn_t end, bool can_yield, bool flush)
794 struct tdp_iter iter;
796 end = min(end, tdp_mmu_max_gfn_exclusive());
798 lockdep_assert_held_write(&kvm->mmu_lock);
802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) {
804 tdp_mmu_iter_cond_resched(kvm, &iter, flush, false)) {
809 if (!is_shadow_present_pte(iter.old_spte) ||
810 !is_last_spte(iter.old_spte, iter.level))
813 tdp_mmu_iter_set_spte(kvm, &iter, 0);
820 * Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
821 * to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
827 * Zap leaf SPTEs for the range of gfns, [start, end), for all roots. Returns
828 * true if a TLB flush is needed before releasing the MMU lock, i.e. if one or
829 * more SPTEs were zapped since the MMU lock was last acquired.
831 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush)
833 struct kvm_mmu_page *root;
835 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
836 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush);
841 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
843 struct kvm_mmu_page *root;
846 * Zap all roots, including invalid roots, as all SPTEs must be dropped
847 * before returning to the caller. Zap directly even if the root is
848 * also being zapped by a worker. Walking zapped top-level SPTEs isn't
849 * all that expensive and mmu_lock is already held, which means the
850 * worker has yielded, i.e. flushing the work instead of zapping here
851 * isn't guaranteed to be any faster.
853 * A TLB flush is unnecessary, KVM zaps everything if and only the VM
854 * is being destroyed or the userspace VMM has exited. In both cases,
855 * KVM_RUN is unreachable, i.e. no vCPUs will ever service the request.
857 for_each_tdp_mmu_root_yield_safe(kvm, root, false)
858 tdp_mmu_zap_root(kvm, root, false);
862 * Zap all invalidated roots to ensure all SPTEs are dropped before the "fast
865 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
867 struct kvm_mmu_page *root;
869 read_lock(&kvm->mmu_lock);
871 for_each_tdp_mmu_root_yield_safe(kvm, root, true) {
872 if (!root->tdp_mmu_scheduled_root_to_zap)
875 root->tdp_mmu_scheduled_root_to_zap = false;
876 KVM_BUG_ON(!root->role.invalid, kvm);
879 * A TLB flush is not necessary as KVM performs a local TLB
880 * flush when allocating a new root (see kvm_mmu_load()), and
881 * when migrating a vCPU to a different pCPU. Note, the local
882 * TLB flush on reuse also invalidates paging-structure-cache
883 * entries, i.e. TLB entries for intermediate paging structures,
884 * that may be zapped, as such entries are associated with the
885 * ASID on both VMX and SVM.
887 tdp_mmu_zap_root(kvm, root, true);
890 * The referenced needs to be put *after* zapping the root, as
891 * the root must be reachable by mmu_notifiers while it's being
894 kvm_tdp_mmu_put_root(kvm, root, true);
897 read_unlock(&kvm->mmu_lock);
901 * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that
902 * is about to be zapped, e.g. in response to a memslots update. The actual
903 * zapping is done separately so that it happens with mmu_lock with read,
904 * whereas invalidating roots must be done with mmu_lock held for write (unless
905 * the VM is being destroyed).
907 * Note, kvm_tdp_mmu_zap_invalidated_roots() is gifted the TDP MMU's reference.
908 * See kvm_tdp_mmu_get_vcpu_root_hpa().
910 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
912 struct kvm_mmu_page *root;
915 * mmu_lock must be held for write to ensure that a root doesn't become
916 * invalid while there are active readers (invalidating a root while
917 * there are active readers may or may not be problematic in practice,
918 * but it's uncharted territory and not supported).
920 * Waive the assertion if there are no users of @kvm, i.e. the VM is
921 * being destroyed after all references have been put, or if no vCPUs
922 * have been created (which means there are no roots), i.e. the VM is
923 * being destroyed in an error path of KVM_CREATE_VM.
925 if (IS_ENABLED(CONFIG_PROVE_LOCKING) &&
926 refcount_read(&kvm->users_count) && kvm->created_vcpus)
927 lockdep_assert_held_write(&kvm->mmu_lock);
930 * As above, mmu_lock isn't held when destroying the VM! There can't
931 * be other references to @kvm, i.e. nothing else can invalidate roots
932 * or get/put references to roots.
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) {
936 * Note, invalid roots can outlive a memslot update! Invalid
937 * roots must be *zapped* before the memslot update completes,
938 * but a different task can acquire a reference and keep the
939 * root alive after its been zapped.
941 if (!root->role.invalid) {
942 root->tdp_mmu_scheduled_root_to_zap = true;
943 root->role.invalid = true;
949 * Installs a last-level SPTE to handle a TDP page fault.
950 * (NPT/EPT violation/misconfiguration)
952 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
953 struct kvm_page_fault *fault,
954 struct tdp_iter *iter)
956 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep));
958 int ret = RET_PF_FIXED;
961 if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
964 if (unlikely(!fault->slot))
965 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
967 wrprot = make_spte(vcpu, sp, fault->slot, ACC_ALL, iter->gfn,
968 fault->pfn, iter->old_spte, fault->prefetch, true,
969 fault->map_writable, &new_spte);
971 if (new_spte == iter->old_spte)
972 ret = RET_PF_SPURIOUS;
973 else if (tdp_mmu_set_spte_atomic(vcpu->kvm, iter, new_spte))
975 else if (is_shadow_present_pte(iter->old_spte) &&
976 !is_last_spte(iter->old_spte, iter->level))
977 kvm_flush_remote_tlbs_gfn(vcpu->kvm, iter->gfn, iter->level);
980 * If the page fault was caused by a write but the page is write
981 * protected, emulation is needed. If the emulation was skipped,
982 * the vCPU would have the same fault again.
986 ret = RET_PF_EMULATE;
989 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
990 if (unlikely(is_mmio_spte(new_spte))) {
991 vcpu->stat.pf_mmio_spte_created++;
992 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
994 ret = RET_PF_EMULATE;
996 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
997 rcu_dereference(iter->sptep));
1004 * tdp_mmu_link_sp - Replace the given spte with an spte pointing to the
1005 * provided page table.
1007 * @kvm: kvm instance
1008 * @iter: a tdp_iter instance currently on the SPTE that should be set
1009 * @sp: The new TDP page table to install.
1010 * @shared: This operation is running under the MMU lock in read mode.
1012 * Returns: 0 if the new page table was installed. Non-0 if the page table
1013 * could not be installed (e.g. the atomic compare-exchange failed).
1015 static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
1016 struct kvm_mmu_page *sp, bool shared)
1018 u64 spte = make_nonleaf_spte(sp->spt, !kvm_ad_enabled());
1022 ret = tdp_mmu_set_spte_atomic(kvm, iter, spte);
1026 tdp_mmu_iter_set_spte(kvm, iter, spte);
1029 tdp_account_mmu_page(kvm, sp);
1034 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1035 struct kvm_mmu_page *sp, bool shared);
1038 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
1039 * page tables and SPTEs to translate the faulting guest physical address.
1041 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
1043 struct kvm_mmu *mmu = vcpu->arch.mmu;
1044 struct kvm *kvm = vcpu->kvm;
1045 struct tdp_iter iter;
1046 struct kvm_mmu_page *sp;
1047 int ret = RET_PF_RETRY;
1049 kvm_mmu_hugepage_adjust(vcpu, fault);
1051 trace_kvm_mmu_spte_requested(fault);
1055 tdp_mmu_for_each_pte(iter, mmu, fault->gfn, fault->gfn + 1) {
1058 if (fault->nx_huge_page_workaround_enabled)
1059 disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
1062 * If SPTE has been frozen by another thread, just give up and
1063 * retry, avoiding unnecessary page table allocation and free.
1065 if (is_removed_spte(iter.old_spte))
1068 if (iter.level == fault->goal_level)
1069 goto map_target_level;
1071 /* Step down into the lower level page table if it exists. */
1072 if (is_shadow_present_pte(iter.old_spte) &&
1073 !is_large_pte(iter.old_spte))
1077 * The SPTE is either non-present or points to a huge page that
1078 * needs to be split.
1080 sp = tdp_mmu_alloc_sp(vcpu);
1081 tdp_mmu_init_child_sp(sp, &iter);
1083 sp->nx_huge_page_disallowed = fault->huge_page_disallowed;
1085 if (is_shadow_present_pte(iter.old_spte))
1086 r = tdp_mmu_split_huge_page(kvm, &iter, sp, true);
1088 r = tdp_mmu_link_sp(kvm, &iter, sp, true);
1091 * Force the guest to retry if installing an upper level SPTE
1092 * failed, e.g. because a different task modified the SPTE.
1095 tdp_mmu_free_sp(sp);
1099 if (fault->huge_page_disallowed &&
1100 fault->req_level >= iter.level) {
1101 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
1102 if (sp->nx_huge_page_disallowed)
1103 track_possible_nx_huge_page(kvm, sp);
1104 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
1109 * The walk aborted before reaching the target level, e.g. because the
1110 * iterator detected an upper level SPTE was frozen during traversal.
1112 WARN_ON_ONCE(iter.level == fault->goal_level);
1116 ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
1123 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1126 struct kvm_mmu_page *root;
1128 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false)
1129 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end,
1130 range->may_block, flush);
1135 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1136 struct kvm_gfn_range *range);
1138 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1139 struct kvm_gfn_range *range,
1140 tdp_handler_t handler)
1142 struct kvm_mmu_page *root;
1143 struct tdp_iter iter;
1147 * Don't support rescheduling, none of the MMU notifiers that funnel
1148 * into this helper allow blocking; it'd be dead, wasteful code.
1150 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1153 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1154 ret |= handler(kvm, &iter, range);
1163 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1164 * if any of the GFNs in the range have been accessed.
1166 * No need to mark the corresponding PFN as accessed as this call is coming
1167 * from the clear_young() or clear_flush_young() notifier, which uses the
1168 * return value to determine if the page has been accessed.
1170 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1171 struct kvm_gfn_range *range)
1175 /* If we have a non-accessed entry we don't need to change the pte. */
1176 if (!is_accessed_spte(iter->old_spte))
1179 if (spte_ad_enabled(iter->old_spte)) {
1180 iter->old_spte = tdp_mmu_clear_spte_bits(iter->sptep,
1182 shadow_accessed_mask,
1184 new_spte = iter->old_spte & ~shadow_accessed_mask;
1187 * Capture the dirty status of the page, so that it doesn't get
1188 * lost when the SPTE is marked for access tracking.
1190 if (is_writable_pte(iter->old_spte))
1191 kvm_set_pfn_dirty(spte_to_pfn(iter->old_spte));
1193 new_spte = mark_spte_for_access_track(iter->old_spte);
1194 iter->old_spte = kvm_tdp_mmu_write_spte(iter->sptep,
1195 iter->old_spte, new_spte,
1199 trace_kvm_tdp_mmu_spte_changed(iter->as_id, iter->gfn, iter->level,
1200 iter->old_spte, new_spte);
1204 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1206 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1209 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1210 struct kvm_gfn_range *range)
1212 return is_accessed_spte(iter->old_spte);
1215 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1217 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1220 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1221 struct kvm_gfn_range *range)
1225 /* Huge pages aren't expected to be modified without first being zapped. */
1226 WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end);
1228 if (iter->level != PG_LEVEL_4K ||
1229 !is_shadow_present_pte(iter->old_spte))
1233 * Note, when changing a read-only SPTE, it's not strictly necessary to
1234 * zero the SPTE before setting the new PFN, but doing so preserves the
1235 * invariant that the PFN of a present * leaf SPTE can never change.
1236 * See handle_changed_spte().
1238 tdp_mmu_iter_set_spte(kvm, iter, 0);
1240 if (!pte_write(range->arg.pte)) {
1241 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1242 pte_pfn(range->arg.pte));
1244 tdp_mmu_iter_set_spte(kvm, iter, new_spte);
1251 * Handle the changed_pte MMU notifier for the TDP MMU.
1252 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1254 * Returns non-zero if a flush is needed before releasing the MMU lock.
1256 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1259 * No need to handle the remote TLB flush under RCU protection, the
1260 * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a
1261 * shadow page. See the WARN on pfn_changed in handle_changed_spte().
1263 return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1267 * Remove write access from all SPTEs at or above min_level that map GFNs
1268 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1272 gfn_t start, gfn_t end, int min_level)
1274 struct tdp_iter iter;
1276 bool spte_set = false;
1280 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1282 for_each_tdp_pte_min_level(iter, root, min_level, start, end) {
1284 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1287 if (!is_shadow_present_pte(iter.old_spte) ||
1288 !is_last_spte(iter.old_spte, iter.level) ||
1289 !(iter.old_spte & PT_WRITABLE_MASK))
1292 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1294 if (tdp_mmu_set_spte_atomic(kvm, &iter, new_spte))
1305 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1306 * only affect leaf SPTEs down to min_level.
1307 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1309 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1310 const struct kvm_memory_slot *slot, int min_level)
1312 struct kvm_mmu_page *root;
1313 bool spte_set = false;
1315 lockdep_assert_held_read(&kvm->mmu_lock);
1317 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1319 slot->base_gfn + slot->npages, min_level);
1324 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp)
1326 struct kvm_mmu_page *sp;
1330 sp = kmem_cache_alloc(mmu_page_header_cache, gfp);
1334 sp->spt = (void *)__get_free_page(gfp);
1336 kmem_cache_free(mmu_page_header_cache, sp);
1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
1344 struct tdp_iter *iter,
1347 struct kvm_mmu_page *sp;
1350 * Since we are allocating while under the MMU lock we have to be
1351 * careful about GFP flags. Use GFP_NOWAIT to avoid blocking on direct
1352 * reclaim and to avoid making any filesystem callbacks (which can end
1353 * up invoking KVM MMU notifiers, resulting in a deadlock).
1355 * If this allocation fails we drop the lock and retry with reclaim
1358 sp = __tdp_mmu_alloc_sp_for_split(GFP_NOWAIT | __GFP_ACCOUNT);
1365 read_unlock(&kvm->mmu_lock);
1367 write_unlock(&kvm->mmu_lock);
1369 iter->yielded = true;
1370 sp = __tdp_mmu_alloc_sp_for_split(GFP_KERNEL_ACCOUNT);
1373 read_lock(&kvm->mmu_lock);
1375 write_lock(&kvm->mmu_lock);
1382 /* Note, the caller is responsible for initializing @sp. */
1383 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
1384 struct kvm_mmu_page *sp, bool shared)
1386 const u64 huge_spte = iter->old_spte;
1387 const int level = iter->level;
1391 * No need for atomics when writing to sp->spt since the page table has
1392 * not been linked in yet and thus is not reachable from any other CPU.
1394 for (i = 0; i < SPTE_ENT_PER_PAGE; i++)
1395 sp->spt[i] = make_huge_page_split_spte(kvm, huge_spte, sp->role, i);
1398 * Replace the huge spte with a pointer to the populated lower level
1399 * page table. Since we are making this change without a TLB flush vCPUs
1400 * will see a mix of the split mappings and the original huge mapping,
1401 * depending on what's currently in their TLB. This is fine from a
1402 * correctness standpoint since the translation will be the same either
1405 ret = tdp_mmu_link_sp(kvm, iter, sp, shared);
1410 * tdp_mmu_link_sp_atomic() will handle subtracting the huge page we
1411 * are overwriting from the page stats. But we have to manually update
1412 * the page stats with the new present child pages.
1414 kvm_update_page_stats(kvm, level - 1, SPTE_ENT_PER_PAGE);
1417 trace_kvm_mmu_split_huge_page(iter->gfn, huge_spte, level, ret);
1421 static int tdp_mmu_split_huge_pages_root(struct kvm *kvm,
1422 struct kvm_mmu_page *root,
1423 gfn_t start, gfn_t end,
1424 int target_level, bool shared)
1426 struct kvm_mmu_page *sp = NULL;
1427 struct tdp_iter iter;
1433 * Traverse the page table splitting all huge pages above the target
1434 * level into one lower level. For example, if we encounter a 1GB page
1435 * we split it into 512 2MB pages.
1437 * Since the TDP iterator uses a pre-order traversal, we are guaranteed
1438 * to visit an SPTE before ever visiting its children, which means we
1439 * will correctly recursively split huge pages that are more than one
1440 * level above the target level (e.g. splitting a 1GB to 512 2MB pages,
1441 * and then splitting each of those to 512 4KB pages).
1443 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) {
1445 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, shared))
1448 if (!is_shadow_present_pte(iter.old_spte) || !is_large_pte(iter.old_spte))
1452 sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
1455 trace_kvm_mmu_split_huge_page(iter.gfn,
1465 tdp_mmu_init_child_sp(sp, &iter);
1467 if (tdp_mmu_split_huge_page(kvm, &iter, sp, shared))
1476 * It's possible to exit the loop having never used the last sp if, for
1477 * example, a vCPU doing HugePage NX splitting wins the race and
1478 * installs its own sp in place of the last sp we tried to split.
1481 tdp_mmu_free_sp(sp);
1488 * Try to split all huge pages mapped by the TDP MMU down to the target level.
1490 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
1491 const struct kvm_memory_slot *slot,
1492 gfn_t start, gfn_t end,
1493 int target_level, bool shared)
1495 struct kvm_mmu_page *root;
1498 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
1500 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) {
1501 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared);
1503 kvm_tdp_mmu_put_root(kvm, root, shared);
1510 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1511 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1512 * If AD bits are not enabled, this will require clearing the writable bit on
1513 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1516 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1517 gfn_t start, gfn_t end)
1519 u64 dbit = kvm_ad_enabled() ? shadow_dirty_mask : PT_WRITABLE_MASK;
1520 struct tdp_iter iter;
1521 bool spte_set = false;
1525 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1527 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1530 if (!is_shadow_present_pte(iter.old_spte))
1533 KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1534 spte_ad_need_write_protect(iter.old_spte));
1536 if (!(iter.old_spte & dbit))
1539 if (tdp_mmu_set_spte_atomic(kvm, &iter, iter.old_spte & ~dbit))
1550 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1551 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1552 * If AD bits are not enabled, this will require clearing the writable bit on
1553 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1556 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1557 const struct kvm_memory_slot *slot)
1559 struct kvm_mmu_page *root;
1560 bool spte_set = false;
1562 lockdep_assert_held_read(&kvm->mmu_lock);
1564 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1565 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1566 slot->base_gfn + slot->npages);
1572 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1573 * set in mask, starting at gfn. The given memslot is expected to contain all
1574 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1575 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1576 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1578 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1579 gfn_t gfn, unsigned long mask, bool wrprot)
1581 u64 dbit = (wrprot || !kvm_ad_enabled()) ? PT_WRITABLE_MASK :
1583 struct tdp_iter iter;
1585 lockdep_assert_held_write(&kvm->mmu_lock);
1589 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1590 gfn + BITS_PER_LONG) {
1594 KVM_MMU_WARN_ON(kvm_ad_enabled() &&
1595 spte_ad_need_write_protect(iter.old_spte));
1597 if (iter.level > PG_LEVEL_4K ||
1598 !(mask & (1UL << (iter.gfn - gfn))))
1601 mask &= ~(1UL << (iter.gfn - gfn));
1603 if (!(iter.old_spte & dbit))
1606 iter.old_spte = tdp_mmu_clear_spte_bits(iter.sptep,
1607 iter.old_spte, dbit,
1610 trace_kvm_tdp_mmu_spte_changed(iter.as_id, iter.gfn, iter.level,
1612 iter.old_spte & ~dbit);
1613 kvm_set_pfn_dirty(spte_to_pfn(iter.old_spte));
1620 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1621 * set in mask, starting at gfn. The given memslot is expected to contain all
1622 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1623 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1624 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1626 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1627 struct kvm_memory_slot *slot,
1628 gfn_t gfn, unsigned long mask,
1631 struct kvm_mmu_page *root;
1633 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1634 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1637 static void zap_collapsible_spte_range(struct kvm *kvm,
1638 struct kvm_mmu_page *root,
1639 const struct kvm_memory_slot *slot)
1641 gfn_t start = slot->base_gfn;
1642 gfn_t end = start + slot->npages;
1643 struct tdp_iter iter;
1644 int max_mapping_level;
1648 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) {
1650 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1653 if (iter.level > KVM_MAX_HUGEPAGE_LEVEL ||
1654 !is_shadow_present_pte(iter.old_spte))
1658 * Don't zap leaf SPTEs, if a leaf SPTE could be replaced with
1659 * a large page size, then its parent would have been zapped
1660 * instead of stepping down.
1662 if (is_last_spte(iter.old_spte, iter.level))
1666 * If iter.gfn resides outside of the slot, i.e. the page for
1667 * the current level overlaps but is not contained by the slot,
1668 * then the SPTE can't be made huge. More importantly, trying
1669 * to query that info from slot->arch.lpage_info will cause an
1670 * out-of-bounds access.
1672 if (iter.gfn < start || iter.gfn >= end)
1675 max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
1676 iter.gfn, PG_LEVEL_NUM);
1677 if (max_mapping_level < iter.level)
1680 /* Note, a successful atomic zap also does a remote TLB flush. */
1681 if (tdp_mmu_zap_spte_atomic(kvm, &iter))
1689 * Zap non-leaf SPTEs (and free their associated page tables) which could
1690 * be replaced by huge pages, for GFNs within the slot.
1692 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1693 const struct kvm_memory_slot *slot)
1695 struct kvm_mmu_page *root;
1697 lockdep_assert_held_read(&kvm->mmu_lock);
1699 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1700 zap_collapsible_spte_range(kvm, root, slot);
1704 * Removes write access on the last level SPTE mapping this GFN and unsets the
1705 * MMU-writable bit to ensure future writes continue to be intercepted.
1706 * Returns true if an SPTE was set and a TLB flush is needed.
1708 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1709 gfn_t gfn, int min_level)
1711 struct tdp_iter iter;
1713 bool spte_set = false;
1715 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1719 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) {
1720 if (!is_shadow_present_pte(iter.old_spte) ||
1721 !is_last_spte(iter.old_spte, iter.level))
1724 new_spte = iter.old_spte &
1725 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1727 if (new_spte == iter.old_spte)
1730 tdp_mmu_iter_set_spte(kvm, &iter, new_spte);
1740 * Removes write access on the last level SPTE mapping this GFN and unsets the
1741 * MMU-writable bit to ensure future writes continue to be intercepted.
1742 * Returns true if an SPTE was set and a TLB flush is needed.
1744 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1745 struct kvm_memory_slot *slot, gfn_t gfn,
1748 struct kvm_mmu_page *root;
1749 bool spte_set = false;
1751 lockdep_assert_held_write(&kvm->mmu_lock);
1752 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1753 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1759 * Return the level of the lowest level SPTE added to sptes.
1760 * That SPTE may be non-present.
1762 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1764 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1767 struct tdp_iter iter;
1768 struct kvm_mmu *mmu = vcpu->arch.mmu;
1769 gfn_t gfn = addr >> PAGE_SHIFT;
1772 *root_level = vcpu->arch.mmu->root_role.level;
1774 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1776 sptes[leaf] = iter.old_spte;
1783 * Returns the last level spte pointer of the shadow page walk for the given
1784 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1785 * walk could be performed, returns NULL and *spte does not contain valid data.
1788 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1789 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1791 * WARNING: This function is only intended to be called during fast_page_fault.
1793 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1796 struct tdp_iter iter;
1797 struct kvm_mmu *mmu = vcpu->arch.mmu;
1798 gfn_t gfn = addr >> PAGE_SHIFT;
1799 tdp_ptep_t sptep = NULL;
1801 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1802 *spte = iter.old_spte;
1807 * Perform the rcu_dereference to get the raw spte pointer value since
1808 * we are passing it up to fast_page_fault, which is shared with the
1809 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1812 * This is safe since fast_page_fault obeys the contracts of this
1813 * function as well as all TDP MMU contracts around modifying SPTEs
1814 * outside of mmu_lock.
1816 return rcu_dereference(sptep);