1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_INTERNAL_H
3 #define __KVM_X86_MMU_INTERNAL_H
5 #include <linux/types.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_host.h>
9 #ifdef CONFIG_KVM_PROVE_MMU
10 #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x)
12 #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x)
15 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
16 #define __PT_LEVEL_SHIFT(level, bits_per_level) \
17 (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
18 #define __PT_INDEX(address, level, bits_per_level) \
19 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
21 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \
22 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
24 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \
25 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
27 #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level))
30 * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
31 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
32 * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
33 * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
34 * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
36 #define INVALID_PAE_ROOT 0
37 #define IS_VALID_PAE_ROOT(x) (!!(x))
39 static inline hpa_t kvm_mmu_get_dummy_root(void)
41 return my_zero_pfn(0) << PAGE_SHIFT;
44 static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page)
46 return is_zero_pfn(shadow_page >> PAGE_SHIFT);
49 typedef u64 __rcu *tdp_ptep_t;
53 * Note, "link" through "spt" fit in a single 64 byte cache line on
54 * 64-bit kernels, keep it that way unless there's a reason not to.
56 struct list_head link;
57 struct hlist_node hash_link;
64 * The shadow page can't be replaced by an equivalent huge page
65 * because it is being used to map an executable page in the guest
66 * and the NX huge page mitigation is enabled.
68 bool nx_huge_page_disallowed;
71 * The following two entries are used to key the shadow page in the
74 union kvm_mmu_page_role role;
80 * Stores the result of the guest translation being shadowed by each
81 * SPTE. KVM shadows two types of guest translations: nGPA -> GPA
82 * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
83 * cases the result of the translation is a GPA and a set of access
86 * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed
87 * access permissions are stored in the lower bits. Note, for
88 * convenience and uniformity across guests, the access permissions are
89 * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format.
91 u64 *shadowed_translation;
93 /* Currently serving as active root */
96 refcount_t tdp_mmu_root_count;
98 unsigned int unsync_children;
100 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
104 DECLARE_BITMAP(unsync_child_bitmap, 512);
106 struct work_struct tdp_mmu_async_work;
107 void *tdp_mmu_async_data;
112 * Tracks shadow pages that, if zapped, would allow KVM to create an NX
113 * huge page. A shadow page will have nx_huge_page_disallowed set but
114 * not be on the list if a huge page is disallowed for other reasons,
115 * e.g. because KVM is shadowing a PTE at the same gfn, the memslot
116 * isn't properly aligned, etc...
118 struct list_head possible_nx_huge_page_link;
121 * Used out of the mmu-lock to avoid reading spte values while an
122 * update is in progress; see the comments in __get_spte_lockless().
124 int clear_spte_count;
127 /* Number of writes since the last time traversal visited this page. */
128 atomic_t write_flooding_count;
131 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
132 struct rcu_head rcu_head;
136 extern struct kmem_cache *mmu_page_header_cache;
138 static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
140 return role.smm ? 1 : 0;
143 static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
145 return kvm_mmu_role_as_id(sp->role);
148 static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
151 * When using the EPT page-modification log, the GPAs in the CPU dirty
152 * log would come from L2 rather than L1. Therefore, we need to rely
153 * on write protection to record dirty pages, which bypasses PML, since
154 * writes now result in a vmexit. Note, the check on CPU dirty logging
155 * being enabled is mandatory as the bits used to denote WP-only SPTEs
156 * are reserved for PAE paging (32-bit KVM).
158 return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
161 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level)
163 return gfn & -KVM_PAGES_PER_HPAGE(level);
166 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
167 gfn_t gfn, bool can_unsync, bool prefetch);
169 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
170 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
171 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
172 struct kvm_memory_slot *slot, u64 gfn,
175 /* Flush the given page (huge or not) of guest memory. */
176 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level)
178 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level),
179 KVM_PAGES_PER_HPAGE(level));
182 unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);
184 extern int nx_huge_pages;
185 static inline bool is_nx_huge_page_enabled(struct kvm *kvm)
187 return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages;
190 struct kvm_page_fault {
191 /* arguments to kvm_mmu_do_page_fault. */
193 const u32 error_code;
196 /* Derived from error_code. */
203 /* Derived from mmu and global state. */
205 const bool nx_huge_page_workaround_enabled;
208 * Whether a >4KB mapping can be created or is forbidden due to NX
211 bool huge_page_disallowed;
214 * Maximum page size that can be created for this fault; input to
215 * FNAME(fetch), direct_map() and kvm_tdp_mmu_map().
220 * Page size that can be created based on the max_level and the
221 * page size used by the host mapping.
226 * Page size that will be created based on the req_level and
227 * huge_page_disallowed.
231 /* Shifted addr, or result of guest page table walk if addr is a gva. */
234 /* The memslot containing gfn. May be NULL. */
235 struct kvm_memory_slot *slot;
237 /* Outputs of kvm_faultin_pfn. */
238 unsigned long mmu_seq;
244 * Indicates the guest is trying to write a gfn that contains one or
245 * more of the PTEs used to translate the write itself, i.e. the access
246 * is changing its own translation in the guest page tables.
248 bool write_fault_to_shadow_pgtable;
251 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
254 * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
255 * and of course kvm_mmu_do_page_fault().
257 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
258 * RET_PF_RETRY: let CPU fault again on the address.
259 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
260 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
261 * RET_PF_FIXED: The faulting entry has been fixed.
262 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
264 * Any names added to this enum should be exported to userspace for use in
265 * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
267 * Note, all values must be greater than or equal to zero so as not to encroach
268 * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which
269 * will allow for efficient machine code when checking for CONTINUE, e.g.
270 * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero.
281 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
282 u32 err, bool prefetch, int *emulation_type)
284 struct kvm_page_fault fault = {
287 .exec = err & PFERR_FETCH_MASK,
288 .write = err & PFERR_WRITE_MASK,
289 .present = err & PFERR_PRESENT_MASK,
290 .rsvd = err & PFERR_RSVD_MASK,
291 .user = err & PFERR_USER_MASK,
292 .prefetch = prefetch,
293 .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
294 .nx_huge_page_workaround_enabled =
295 is_nx_huge_page_enabled(vcpu->kvm),
297 .max_level = KVM_MAX_HUGEPAGE_LEVEL,
298 .req_level = PG_LEVEL_4K,
299 .goal_level = PG_LEVEL_4K,
303 if (vcpu->arch.mmu->root_role.direct) {
304 fault.gfn = fault.addr >> PAGE_SHIFT;
305 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
309 * Async #PF "faults", a.k.a. prefetch faults, are not faults from the
310 * guest perspective and have already been counted at the time of the
314 vcpu->stat.pf_taken++;
316 if (IS_ENABLED(CONFIG_RETPOLINE) && fault.is_tdp)
317 r = kvm_tdp_page_fault(vcpu, &fault);
319 r = vcpu->arch.mmu->page_fault(vcpu, &fault);
321 if (fault.write_fault_to_shadow_pgtable && emulation_type)
322 *emulation_type |= EMULTYPE_WRITE_PF_TO_SP;
325 * Similar to above, prefetch faults aren't truly spurious, and the
326 * async #PF path doesn't do emulation. Do count faults that are fixed
327 * by the async #PF handler though, otherwise they'll never be counted.
329 if (r == RET_PF_FIXED)
330 vcpu->stat.pf_fixed++;
333 else if (r == RET_PF_EMULATE)
334 vcpu->stat.pf_emulate++;
335 else if (r == RET_PF_SPURIOUS)
336 vcpu->stat.pf_spurious++;
340 int kvm_mmu_max_mapping_level(struct kvm *kvm,
341 const struct kvm_memory_slot *slot, gfn_t gfn,
343 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
344 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
346 void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
348 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
349 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
351 #endif /* __KVM_X86_MMU_INTERNAL_H */