x86/fpu: Fix inline prefix warnings
[platform/kernel/linux-rpi.git] / arch / x86 / kvm / vmx / vmx_ops.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/vmx.h>
8
9 #include "evmcs.h"
10 #include "vmcs.h"
11 #include "x86.h"
12
13 asmlinkage void vmread_error(unsigned long field, bool fault);
14 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
15                                                          bool fault);
16 void vmwrite_error(unsigned long field, unsigned long value);
17 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
18 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
19 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
20 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
21
22 static __always_inline void vmcs_check16(unsigned long field)
23 {
24         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
25                          "16-bit accessor invalid for 64-bit field");
26         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
27                          "16-bit accessor invalid for 64-bit high field");
28         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
29                          "16-bit accessor invalid for 32-bit high field");
30         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
31                          "16-bit accessor invalid for natural width field");
32 }
33
34 static __always_inline void vmcs_check32(unsigned long field)
35 {
36         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
37                          "32-bit accessor invalid for 16-bit field");
38         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39                          "32-bit accessor invalid for 64-bit field");
40         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41                          "32-bit accessor invalid for 64-bit high field");
42         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
43                          "32-bit accessor invalid for natural width field");
44 }
45
46 static __always_inline void vmcs_check64(unsigned long field)
47 {
48         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
49                          "64-bit accessor invalid for 16-bit field");
50         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
51                          "64-bit accessor invalid for 64-bit high field");
52         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
53                          "64-bit accessor invalid for 32-bit field");
54         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
55                          "64-bit accessor invalid for natural width field");
56 }
57
58 static __always_inline void vmcs_checkl(unsigned long field)
59 {
60         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
61                          "Natural width accessor invalid for 16-bit field");
62         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
63                          "Natural width accessor invalid for 64-bit field");
64         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65                          "Natural width accessor invalid for 64-bit high field");
66         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67                          "Natural width accessor invalid for 32-bit field");
68 }
69
70 static __always_inline unsigned long __vmcs_readl(unsigned long field)
71 {
72         unsigned long value;
73
74 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
75
76         asm_volatile_goto("1: vmread %[field], %[output]\n\t"
77                           "jna %l[do_fail]\n\t"
78
79                           _ASM_EXTABLE(1b, %l[do_exception])
80
81                           : [output] "=r" (value)
82                           : [field] "r" (field)
83                           : "cc"
84                           : do_fail, do_exception);
85
86         return value;
87
88 do_fail:
89         WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
90         pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
91         return 0;
92
93 do_exception:
94         kvm_spurious_fault();
95         return 0;
96
97 #else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
98
99         asm volatile("1: vmread %2, %1\n\t"
100                      ".byte 0x3e\n\t" /* branch taken hint */
101                      "ja 3f\n\t"
102
103                      /*
104                       * VMREAD failed.  Push '0' for @fault, push the failing
105                       * @field, and bounce through the trampoline to preserve
106                       * volatile registers.
107                       */
108                      "push $0\n\t"
109                      "push %2\n\t"
110                      "2:call vmread_error_trampoline\n\t"
111
112                      /*
113                       * Unwind the stack.  Note, the trampoline zeros out the
114                       * memory for @fault so that the result is '0' on error.
115                       */
116                      "pop %2\n\t"
117                      "pop %1\n\t"
118                      "3:\n\t"
119
120                      /* VMREAD faulted.  As above, except push '1' for @fault. */
121                      ".pushsection .fixup, \"ax\"\n\t"
122                      "4: push $1\n\t"
123                      "push %2\n\t"
124                      "jmp 2b\n\t"
125                      ".popsection\n\t"
126                      _ASM_EXTABLE(1b, 4b)
127                      : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
128         return value;
129
130 #endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
131 }
132
133 static __always_inline u16 vmcs_read16(unsigned long field)
134 {
135         vmcs_check16(field);
136         if (static_branch_unlikely(&enable_evmcs))
137                 return evmcs_read16(field);
138         return __vmcs_readl(field);
139 }
140
141 static __always_inline u32 vmcs_read32(unsigned long field)
142 {
143         vmcs_check32(field);
144         if (static_branch_unlikely(&enable_evmcs))
145                 return evmcs_read32(field);
146         return __vmcs_readl(field);
147 }
148
149 static __always_inline u64 vmcs_read64(unsigned long field)
150 {
151         vmcs_check64(field);
152         if (static_branch_unlikely(&enable_evmcs))
153                 return evmcs_read64(field);
154 #ifdef CONFIG_X86_64
155         return __vmcs_readl(field);
156 #else
157         return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
158 #endif
159 }
160
161 static __always_inline unsigned long vmcs_readl(unsigned long field)
162 {
163         vmcs_checkl(field);
164         if (static_branch_unlikely(&enable_evmcs))
165                 return evmcs_read64(field);
166         return __vmcs_readl(field);
167 }
168
169 #define vmx_asm1(insn, op1, error_args...)                              \
170 do {                                                                    \
171         asm_volatile_goto("1: " __stringify(insn) " %0\n\t"             \
172                           ".byte 0x2e\n\t" /* branch not taken hint */  \
173                           "jna %l[error]\n\t"                           \
174                           _ASM_EXTABLE(1b, %l[fault])                   \
175                           : : op1 : "cc" : error, fault);               \
176         return;                                                         \
177 error:                                                                  \
178         instrumentation_begin();                                        \
179         insn##_error(error_args);                                       \
180         instrumentation_end();                                          \
181         return;                                                         \
182 fault:                                                                  \
183         kvm_spurious_fault();                                           \
184 } while (0)
185
186 #define vmx_asm2(insn, op1, op2, error_args...)                         \
187 do {                                                                    \
188         asm_volatile_goto("1: "  __stringify(insn) " %1, %0\n\t"        \
189                           ".byte 0x2e\n\t" /* branch not taken hint */  \
190                           "jna %l[error]\n\t"                           \
191                           _ASM_EXTABLE(1b, %l[fault])                   \
192                           : : op1, op2 : "cc" : error, fault);          \
193         return;                                                         \
194 error:                                                                  \
195         instrumentation_begin();                                        \
196         insn##_error(error_args);                                       \
197         instrumentation_end();                                          \
198         return;                                                         \
199 fault:                                                                  \
200         kvm_spurious_fault();                                           \
201 } while (0)
202
203 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
204 {
205         vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
206 }
207
208 static __always_inline void vmcs_write16(unsigned long field, u16 value)
209 {
210         vmcs_check16(field);
211         if (static_branch_unlikely(&enable_evmcs))
212                 return evmcs_write16(field, value);
213
214         __vmcs_writel(field, value);
215 }
216
217 static __always_inline void vmcs_write32(unsigned long field, u32 value)
218 {
219         vmcs_check32(field);
220         if (static_branch_unlikely(&enable_evmcs))
221                 return evmcs_write32(field, value);
222
223         __vmcs_writel(field, value);
224 }
225
226 static __always_inline void vmcs_write64(unsigned long field, u64 value)
227 {
228         vmcs_check64(field);
229         if (static_branch_unlikely(&enable_evmcs))
230                 return evmcs_write64(field, value);
231
232         __vmcs_writel(field, value);
233 #ifndef CONFIG_X86_64
234         __vmcs_writel(field+1, value >> 32);
235 #endif
236 }
237
238 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
239 {
240         vmcs_checkl(field);
241         if (static_branch_unlikely(&enable_evmcs))
242                 return evmcs_write64(field, value);
243
244         __vmcs_writel(field, value);
245 }
246
247 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
248 {
249         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
250                          "vmcs_clear_bits does not support 64-bit fields");
251         if (static_branch_unlikely(&enable_evmcs))
252                 return evmcs_write32(field, evmcs_read32(field) & ~mask);
253
254         __vmcs_writel(field, __vmcs_readl(field) & ~mask);
255 }
256
257 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
258 {
259         BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
260                          "vmcs_set_bits does not support 64-bit fields");
261         if (static_branch_unlikely(&enable_evmcs))
262                 return evmcs_write32(field, evmcs_read32(field) | mask);
263
264         __vmcs_writel(field, __vmcs_readl(field) | mask);
265 }
266
267 static inline void vmcs_clear(struct vmcs *vmcs)
268 {
269         u64 phys_addr = __pa(vmcs);
270
271         vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
272 }
273
274 static inline void vmcs_load(struct vmcs *vmcs)
275 {
276         u64 phys_addr = __pa(vmcs);
277
278         if (static_branch_unlikely(&enable_evmcs))
279                 return evmcs_load(phys_addr);
280
281         vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
282 }
283
284 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
285 {
286         struct {
287                 u64 vpid : 16;
288                 u64 rsvd : 48;
289                 u64 gva;
290         } operand = { vpid, 0, gva };
291
292         vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
293 }
294
295 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
296 {
297         struct {
298                 u64 eptp, gpa;
299         } operand = {eptp, gpa};
300
301         vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
302 }
303
304 static inline void vpid_sync_vcpu_single(int vpid)
305 {
306         if (vpid == 0)
307                 return;
308
309         __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
310 }
311
312 static inline void vpid_sync_vcpu_global(void)
313 {
314         __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
315 }
316
317 static inline void vpid_sync_context(int vpid)
318 {
319         if (cpu_has_vmx_invvpid_single())
320                 vpid_sync_vcpu_single(vpid);
321         else if (vpid != 0)
322                 vpid_sync_vcpu_global();
323 }
324
325 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
326 {
327         if (vpid == 0)
328                 return;
329
330         if (cpu_has_vmx_invvpid_individual_addr())
331                 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
332         else
333                 vpid_sync_context(vpid);
334 }
335
336 static inline void ept_sync_global(void)
337 {
338         __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
339 }
340
341 static inline void ept_sync_context(u64 eptp)
342 {
343         if (cpu_has_vmx_invept_context())
344                 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
345         else
346                 ept_sync_global();
347 }
348
349 #endif /* __KVM_X86_VMX_INSN_H */