1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
5 #include <linux/nospec.h>
13 void vmread_error(unsigned long field, bool fault);
14 void vmwrite_error(unsigned long field, unsigned long value);
15 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
16 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
17 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
18 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
20 #ifndef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
22 * The VMREAD error trampoline _always_ uses the stack to pass parameters, even
23 * for 64-bit targets. Preserving all registers allows the VMREAD inline asm
24 * blob to avoid clobbering GPRs, which in turn allows the compiler to better
25 * optimize sequences of VMREADs.
27 * Declare the trampoline as an opaque label as it's not safe to call from C
28 * code; there is no way to tell the compiler to pass params on the stack for
31 * void vmread_error_trampoline(unsigned long field, bool fault);
33 extern unsigned long vmread_error_trampoline;
36 static __always_inline void vmcs_check16(unsigned long field)
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39 "16-bit accessor invalid for 64-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41 "16-bit accessor invalid for 64-bit high field");
42 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
43 "16-bit accessor invalid for 32-bit high field");
44 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
45 "16-bit accessor invalid for natural width field");
48 static __always_inline void vmcs_check32(unsigned long field)
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
51 "32-bit accessor invalid for 16-bit field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
53 "32-bit accessor invalid for 64-bit field");
54 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
55 "32-bit accessor invalid for 64-bit high field");
56 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
57 "32-bit accessor invalid for natural width field");
60 static __always_inline void vmcs_check64(unsigned long field)
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
63 "64-bit accessor invalid for 16-bit field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65 "64-bit accessor invalid for 64-bit high field");
66 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67 "64-bit accessor invalid for 32-bit field");
68 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
69 "64-bit accessor invalid for natural width field");
72 static __always_inline void vmcs_checkl(unsigned long field)
74 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
75 "Natural width accessor invalid for 16-bit field");
76 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
77 "Natural width accessor invalid for 64-bit field");
78 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
79 "Natural width accessor invalid for 64-bit high field");
80 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
81 "Natural width accessor invalid for 32-bit field");
84 static __always_inline unsigned long __vmcs_readl(unsigned long field)
88 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
90 asm_volatile_goto("1: vmread %[field], %[output]\n\t"
93 _ASM_EXTABLE(1b, %l[do_exception])
95 : [output] "=r" (value)
98 : do_fail, do_exception);
103 instrumentation_begin();
104 WARN_ONCE(1, KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
105 pr_warn_ratelimited(KBUILD_MODNAME ": vmread failed: field=%lx\n", field);
106 instrumentation_end();
110 kvm_spurious_fault();
113 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
115 asm volatile("1: vmread %2, %1\n\t"
116 ".byte 0x3e\n\t" /* branch taken hint */
120 * VMREAD failed. Push '0' for @fault, push the failing
121 * @field, and bounce through the trampoline to preserve
122 * volatile registers.
128 "call vmread_error_trampoline\n\t"
131 * Unwind the stack. Note, the trampoline zeros out the
132 * memory for @fault so that the result is '0' on error.
138 /* VMREAD faulted. As above, except push '1' for @fault. */
139 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
141 : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
144 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
147 static __always_inline u16 vmcs_read16(unsigned long field)
150 if (kvm_is_using_evmcs())
151 return evmcs_read16(field);
152 return __vmcs_readl(field);
155 static __always_inline u32 vmcs_read32(unsigned long field)
158 if (kvm_is_using_evmcs())
159 return evmcs_read32(field);
160 return __vmcs_readl(field);
163 static __always_inline u64 vmcs_read64(unsigned long field)
166 if (kvm_is_using_evmcs())
167 return evmcs_read64(field);
169 return __vmcs_readl(field);
171 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
175 static __always_inline unsigned long vmcs_readl(unsigned long field)
178 if (kvm_is_using_evmcs())
179 return evmcs_read64(field);
180 return __vmcs_readl(field);
183 #define vmx_asm1(insn, op1, error_args...) \
185 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
186 ".byte 0x2e\n\t" /* branch not taken hint */ \
187 "jna %l[error]\n\t" \
188 _ASM_EXTABLE(1b, %l[fault]) \
189 : : op1 : "cc" : error, fault); \
192 instrumentation_begin(); \
193 insn##_error(error_args); \
194 instrumentation_end(); \
197 kvm_spurious_fault(); \
200 #define vmx_asm2(insn, op1, op2, error_args...) \
202 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
203 ".byte 0x2e\n\t" /* branch not taken hint */ \
204 "jna %l[error]\n\t" \
205 _ASM_EXTABLE(1b, %l[fault]) \
206 : : op1, op2 : "cc" : error, fault); \
209 instrumentation_begin(); \
210 insn##_error(error_args); \
211 instrumentation_end(); \
214 kvm_spurious_fault(); \
217 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
219 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
222 static __always_inline void vmcs_write16(unsigned long field, u16 value)
225 if (kvm_is_using_evmcs())
226 return evmcs_write16(field, value);
228 __vmcs_writel(field, value);
231 static __always_inline void vmcs_write32(unsigned long field, u32 value)
234 if (kvm_is_using_evmcs())
235 return evmcs_write32(field, value);
237 __vmcs_writel(field, value);
240 static __always_inline void vmcs_write64(unsigned long field, u64 value)
243 if (kvm_is_using_evmcs())
244 return evmcs_write64(field, value);
246 __vmcs_writel(field, value);
247 #ifndef CONFIG_X86_64
248 __vmcs_writel(field+1, value >> 32);
252 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
255 if (kvm_is_using_evmcs())
256 return evmcs_write64(field, value);
258 __vmcs_writel(field, value);
261 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
263 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
264 "vmcs_clear_bits does not support 64-bit fields");
265 if (kvm_is_using_evmcs())
266 return evmcs_write32(field, evmcs_read32(field) & ~mask);
268 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
271 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
273 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
274 "vmcs_set_bits does not support 64-bit fields");
275 if (kvm_is_using_evmcs())
276 return evmcs_write32(field, evmcs_read32(field) | mask);
278 __vmcs_writel(field, __vmcs_readl(field) | mask);
281 static inline void vmcs_clear(struct vmcs *vmcs)
283 u64 phys_addr = __pa(vmcs);
285 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
288 static inline void vmcs_load(struct vmcs *vmcs)
290 u64 phys_addr = __pa(vmcs);
292 if (kvm_is_using_evmcs())
293 return evmcs_load(phys_addr);
295 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
298 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
304 } operand = { vpid, 0, gva };
306 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
309 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
313 } operand = {eptp, gpa};
315 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
318 static inline void vpid_sync_vcpu_single(int vpid)
323 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
326 static inline void vpid_sync_vcpu_global(void)
328 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
331 static inline void vpid_sync_context(int vpid)
333 if (cpu_has_vmx_invvpid_single())
334 vpid_sync_vcpu_single(vpid);
336 vpid_sync_vcpu_global();
339 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
344 if (cpu_has_vmx_invvpid_individual_addr())
345 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
347 vpid_sync_context(vpid);
350 static inline void ept_sync_global(void)
352 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
355 static inline void ept_sync_context(u64 eptp)
357 if (cpu_has_vmx_invept_context())
358 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
363 #endif /* __KVM_X86_VMX_INSN_H */