Merge tag 'for-linus' of https://github.com/openrisc/linux
[platform/kernel/linux-starfive.git] / arch / x86 / kvm / svm / svm.c
1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
3 #include <linux/kvm_host.h>
4
5 #include "irq.h"
6 #include "mmu.h"
7 #include "kvm_cache_regs.h"
8 #include "x86.h"
9 #include "smm.h"
10 #include "cpuid.h"
11 #include "pmu.h"
12
13 #include <linux/module.h>
14 #include <linux/mod_devicetable.h>
15 #include <linux/kernel.h>
16 #include <linux/vmalloc.h>
17 #include <linux/highmem.h>
18 #include <linux/amd-iommu.h>
19 #include <linux/sched.h>
20 #include <linux/trace_events.h>
21 #include <linux/slab.h>
22 #include <linux/hashtable.h>
23 #include <linux/objtool.h>
24 #include <linux/psp-sev.h>
25 #include <linux/file.h>
26 #include <linux/pagemap.h>
27 #include <linux/swap.h>
28 #include <linux/rwsem.h>
29 #include <linux/cc_platform.h>
30 #include <linux/smp.h>
31
32 #include <asm/apic.h>
33 #include <asm/perf_event.h>
34 #include <asm/tlbflush.h>
35 #include <asm/desc.h>
36 #include <asm/debugreg.h>
37 #include <asm/kvm_para.h>
38 #include <asm/irq_remapping.h>
39 #include <asm/spec-ctrl.h>
40 #include <asm/cpu_device_id.h>
41 #include <asm/traps.h>
42 #include <asm/fpu/api.h>
43
44 #include <asm/virtext.h>
45
46 #include <trace/events/ipi.h>
47
48 #include "trace.h"
49
50 #include "svm.h"
51 #include "svm_ops.h"
52
53 #include "kvm_onhyperv.h"
54 #include "svm_onhyperv.h"
55
56 MODULE_AUTHOR("Qumranet");
57 MODULE_LICENSE("GPL");
58
59 #ifdef MODULE
60 static const struct x86_cpu_id svm_cpu_id[] = {
61         X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
62         {}
63 };
64 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
65 #endif
66
67 #define SEG_TYPE_LDT 2
68 #define SEG_TYPE_BUSY_TSS16 3
69
70 static bool erratum_383_found __read_mostly;
71
72 u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
73
74 /*
75  * Set osvw_len to higher value when updated Revision Guides
76  * are published and we know what the new status bits are
77  */
78 static uint64_t osvw_len = 4, osvw_status;
79
80 static DEFINE_PER_CPU(u64, current_tsc_ratio);
81
82 #define X2APIC_MSR(x)   (APIC_BASE_MSR + (x >> 4))
83
84 static const struct svm_direct_access_msrs {
85         u32 index;   /* Index of the MSR */
86         bool always; /* True if intercept is initially cleared */
87 } direct_access_msrs[MAX_DIRECT_ACCESS_MSRS] = {
88         { .index = MSR_STAR,                            .always = true  },
89         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
90         { .index = MSR_IA32_SYSENTER_EIP,               .always = false },
91         { .index = MSR_IA32_SYSENTER_ESP,               .always = false },
92 #ifdef CONFIG_X86_64
93         { .index = MSR_GS_BASE,                         .always = true  },
94         { .index = MSR_FS_BASE,                         .always = true  },
95         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
96         { .index = MSR_LSTAR,                           .always = true  },
97         { .index = MSR_CSTAR,                           .always = true  },
98         { .index = MSR_SYSCALL_MASK,                    .always = true  },
99 #endif
100         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
101         { .index = MSR_IA32_PRED_CMD,                   .always = false },
102         { .index = MSR_IA32_FLUSH_CMD,                  .always = false },
103         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
104         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
105         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
106         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
107         { .index = MSR_EFER,                            .always = false },
108         { .index = MSR_IA32_CR_PAT,                     .always = false },
109         { .index = MSR_AMD64_SEV_ES_GHCB,               .always = true  },
110         { .index = MSR_TSC_AUX,                         .always = false },
111         { .index = X2APIC_MSR(APIC_ID),                 .always = false },
112         { .index = X2APIC_MSR(APIC_LVR),                .always = false },
113         { .index = X2APIC_MSR(APIC_TASKPRI),            .always = false },
114         { .index = X2APIC_MSR(APIC_ARBPRI),             .always = false },
115         { .index = X2APIC_MSR(APIC_PROCPRI),            .always = false },
116         { .index = X2APIC_MSR(APIC_EOI),                .always = false },
117         { .index = X2APIC_MSR(APIC_RRR),                .always = false },
118         { .index = X2APIC_MSR(APIC_LDR),                .always = false },
119         { .index = X2APIC_MSR(APIC_DFR),                .always = false },
120         { .index = X2APIC_MSR(APIC_SPIV),               .always = false },
121         { .index = X2APIC_MSR(APIC_ISR),                .always = false },
122         { .index = X2APIC_MSR(APIC_TMR),                .always = false },
123         { .index = X2APIC_MSR(APIC_IRR),                .always = false },
124         { .index = X2APIC_MSR(APIC_ESR),                .always = false },
125         { .index = X2APIC_MSR(APIC_ICR),                .always = false },
126         { .index = X2APIC_MSR(APIC_ICR2),               .always = false },
127
128         /*
129          * Note:
130          * AMD does not virtualize APIC TSC-deadline timer mode, but it is
131          * emulated by KVM. When setting APIC LVTT (0x832) register bit 18,
132          * the AVIC hardware would generate GP fault. Therefore, always
133          * intercept the MSR 0x832, and do not setup direct_access_msr.
134          */
135         { .index = X2APIC_MSR(APIC_LVTTHMR),            .always = false },
136         { .index = X2APIC_MSR(APIC_LVTPC),              .always = false },
137         { .index = X2APIC_MSR(APIC_LVT0),               .always = false },
138         { .index = X2APIC_MSR(APIC_LVT1),               .always = false },
139         { .index = X2APIC_MSR(APIC_LVTERR),             .always = false },
140         { .index = X2APIC_MSR(APIC_TMICT),              .always = false },
141         { .index = X2APIC_MSR(APIC_TMCCT),              .always = false },
142         { .index = X2APIC_MSR(APIC_TDCR),               .always = false },
143         { .index = MSR_INVALID,                         .always = false },
144 };
145
146 /*
147  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
148  * pause_filter_count: On processors that support Pause filtering(indicated
149  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
150  *      count value. On VMRUN this value is loaded into an internal counter.
151  *      Each time a pause instruction is executed, this counter is decremented
152  *      until it reaches zero at which time a #VMEXIT is generated if pause
153  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
154  *      Intercept Filtering for more details.
155  *      This also indicate if ple logic enabled.
156  *
157  * pause_filter_thresh: In addition, some processor families support advanced
158  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
159  *      the amount of time a guest is allowed to execute in a pause loop.
160  *      In this mode, a 16-bit pause filter threshold field is added in the
161  *      VMCB. The threshold value is a cycle count that is used to reset the
162  *      pause counter. As with simple pause filtering, VMRUN loads the pause
163  *      count value from VMCB into an internal counter. Then, on each pause
164  *      instruction the hardware checks the elapsed number of cycles since
165  *      the most recent pause instruction against the pause filter threshold.
166  *      If the elapsed cycle count is greater than the pause filter threshold,
167  *      then the internal pause count is reloaded from the VMCB and execution
168  *      continues. If the elapsed cycle count is less than the pause filter
169  *      threshold, then the internal pause count is decremented. If the count
170  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
171  *      triggered. If advanced pause filtering is supported and pause filter
172  *      threshold field is set to zero, the filter will operate in the simpler,
173  *      count only mode.
174  */
175
176 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
177 module_param(pause_filter_thresh, ushort, 0444);
178
179 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
180 module_param(pause_filter_count, ushort, 0444);
181
182 /* Default doubles per-vcpu window every exit. */
183 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
184 module_param(pause_filter_count_grow, ushort, 0444);
185
186 /* Default resets per-vcpu window every exit to pause_filter_count. */
187 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
188 module_param(pause_filter_count_shrink, ushort, 0444);
189
190 /* Default is to compute the maximum so we can never overflow. */
191 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
192 module_param(pause_filter_count_max, ushort, 0444);
193
194 /*
195  * Use nested page tables by default.  Note, NPT may get forced off by
196  * svm_hardware_setup() if it's unsupported by hardware or the host kernel.
197  */
198 bool npt_enabled = true;
199 module_param_named(npt, npt_enabled, bool, 0444);
200
201 /* allow nested virtualization in KVM/SVM */
202 static int nested = true;
203 module_param(nested, int, S_IRUGO);
204
205 /* enable/disable Next RIP Save */
206 static int nrips = true;
207 module_param(nrips, int, 0444);
208
209 /* enable/disable Virtual VMLOAD VMSAVE */
210 static int vls = true;
211 module_param(vls, int, 0444);
212
213 /* enable/disable Virtual GIF */
214 int vgif = true;
215 module_param(vgif, int, 0444);
216
217 /* enable/disable LBR virtualization */
218 static int lbrv = true;
219 module_param(lbrv, int, 0444);
220
221 static int tsc_scaling = true;
222 module_param(tsc_scaling, int, 0444);
223
224 /*
225  * enable / disable AVIC.  Because the defaults differ for APICv
226  * support between VMX and SVM we cannot use module_param_named.
227  */
228 static bool avic;
229 module_param(avic, bool, 0444);
230
231 bool __read_mostly dump_invalid_vmcb;
232 module_param(dump_invalid_vmcb, bool, 0644);
233
234
235 bool intercept_smi = true;
236 module_param(intercept_smi, bool, 0444);
237
238 bool vnmi = true;
239 module_param(vnmi, bool, 0444);
240
241 static bool svm_gp_erratum_intercept = true;
242
243 static u8 rsm_ins_bytes[] = "\x0f\xaa";
244
245 static unsigned long iopm_base;
246
247 DEFINE_PER_CPU(struct svm_cpu_data, svm_data);
248
249 /*
250  * Only MSR_TSC_AUX is switched via the user return hook.  EFER is switched via
251  * the VMCB, and the SYSCALL/SYSENTER MSRs are handled by VMLOAD/VMSAVE.
252  *
253  * RDTSCP and RDPID are not used in the kernel, specifically to allow KVM to
254  * defer the restoration of TSC_AUX until the CPU returns to userspace.
255  */
256 static int tsc_aux_uret_slot __read_mostly = -1;
257
258 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
259
260 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
261 #define MSRS_RANGE_SIZE 2048
262 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
263
264 u32 svm_msrpm_offset(u32 msr)
265 {
266         u32 offset;
267         int i;
268
269         for (i = 0; i < NUM_MSR_MAPS; i++) {
270                 if (msr < msrpm_ranges[i] ||
271                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
272                         continue;
273
274                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
275                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
276
277                 /* Now we have the u8 offset - but need the u32 offset */
278                 return offset / 4;
279         }
280
281         /* MSR not in any range */
282         return MSR_INVALID;
283 }
284
285 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu);
286
287 static int get_npt_level(void)
288 {
289 #ifdef CONFIG_X86_64
290         return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
291 #else
292         return PT32E_ROOT_LEVEL;
293 #endif
294 }
295
296 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
297 {
298         struct vcpu_svm *svm = to_svm(vcpu);
299         u64 old_efer = vcpu->arch.efer;
300         vcpu->arch.efer = efer;
301
302         if (!npt_enabled) {
303                 /* Shadow paging assumes NX to be available.  */
304                 efer |= EFER_NX;
305
306                 if (!(efer & EFER_LMA))
307                         efer &= ~EFER_LME;
308         }
309
310         if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
311                 if (!(efer & EFER_SVME)) {
312                         svm_leave_nested(vcpu);
313                         svm_set_gif(svm, true);
314                         /* #GP intercept is still needed for vmware backdoor */
315                         if (!enable_vmware_backdoor)
316                                 clr_exception_intercept(svm, GP_VECTOR);
317
318                         /*
319                          * Free the nested guest state, unless we are in SMM.
320                          * In this case we will return to the nested guest
321                          * as soon as we leave SMM.
322                          */
323                         if (!is_smm(vcpu))
324                                 svm_free_nested(svm);
325
326                 } else {
327                         int ret = svm_allocate_nested(svm);
328
329                         if (ret) {
330                                 vcpu->arch.efer = old_efer;
331                                 return ret;
332                         }
333
334                         /*
335                          * Never intercept #GP for SEV guests, KVM can't
336                          * decrypt guest memory to workaround the erratum.
337                          */
338                         if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
339                                 set_exception_intercept(svm, GP_VECTOR);
340                 }
341         }
342
343         svm->vmcb->save.efer = efer | EFER_SVME;
344         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
345         return 0;
346 }
347
348 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
349 {
350         struct vcpu_svm *svm = to_svm(vcpu);
351         u32 ret = 0;
352
353         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
354                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
355         return ret;
356 }
357
358 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
359 {
360         struct vcpu_svm *svm = to_svm(vcpu);
361
362         if (mask == 0)
363                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
364         else
365                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
366
367 }
368
369 static int __svm_skip_emulated_instruction(struct kvm_vcpu *vcpu,
370                                            bool commit_side_effects)
371 {
372         struct vcpu_svm *svm = to_svm(vcpu);
373         unsigned long old_rflags;
374
375         /*
376          * SEV-ES does not expose the next RIP. The RIP update is controlled by
377          * the type of exit and the #VC handler in the guest.
378          */
379         if (sev_es_guest(vcpu->kvm))
380                 goto done;
381
382         if (nrips && svm->vmcb->control.next_rip != 0) {
383                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
384                 svm->next_rip = svm->vmcb->control.next_rip;
385         }
386
387         if (!svm->next_rip) {
388                 if (unlikely(!commit_side_effects))
389                         old_rflags = svm->vmcb->save.rflags;
390
391                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
392                         return 0;
393
394                 if (unlikely(!commit_side_effects))
395                         svm->vmcb->save.rflags = old_rflags;
396         } else {
397                 kvm_rip_write(vcpu, svm->next_rip);
398         }
399
400 done:
401         if (likely(commit_side_effects))
402                 svm_set_interrupt_shadow(vcpu, 0);
403
404         return 1;
405 }
406
407 static int svm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
408 {
409         return __svm_skip_emulated_instruction(vcpu, true);
410 }
411
412 static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
413 {
414         unsigned long rip, old_rip = kvm_rip_read(vcpu);
415         struct vcpu_svm *svm = to_svm(vcpu);
416
417         /*
418          * Due to architectural shortcomings, the CPU doesn't always provide
419          * NextRIP, e.g. if KVM intercepted an exception that occurred while
420          * the CPU was vectoring an INTO/INT3 in the guest.  Temporarily skip
421          * the instruction even if NextRIP is supported to acquire the next
422          * RIP so that it can be shoved into the NextRIP field, otherwise
423          * hardware will fail to advance guest RIP during event injection.
424          * Drop the exception/interrupt if emulation fails and effectively
425          * retry the instruction, it's the least awful option.  If NRIPS is
426          * in use, the skip must not commit any side effects such as clearing
427          * the interrupt shadow or RFLAGS.RF.
428          */
429         if (!__svm_skip_emulated_instruction(vcpu, !nrips))
430                 return -EIO;
431
432         rip = kvm_rip_read(vcpu);
433
434         /*
435          * Save the injection information, even when using next_rip, as the
436          * VMCB's next_rip will be lost (cleared on VM-Exit) if the injection
437          * doesn't complete due to a VM-Exit occurring while the CPU is
438          * vectoring the event.   Decoding the instruction isn't guaranteed to
439          * work as there may be no backing instruction, e.g. if the event is
440          * being injected by L1 for L2, or if the guest is patching INT3 into
441          * a different instruction.
442          */
443         svm->soft_int_injected = true;
444         svm->soft_int_csbase = svm->vmcb->save.cs.base;
445         svm->soft_int_old_rip = old_rip;
446         svm->soft_int_next_rip = rip;
447
448         if (nrips)
449                 kvm_rip_write(vcpu, old_rip);
450
451         if (static_cpu_has(X86_FEATURE_NRIPS))
452                 svm->vmcb->control.next_rip = rip;
453
454         return 0;
455 }
456
457 static void svm_inject_exception(struct kvm_vcpu *vcpu)
458 {
459         struct kvm_queued_exception *ex = &vcpu->arch.exception;
460         struct vcpu_svm *svm = to_svm(vcpu);
461
462         kvm_deliver_exception_payload(vcpu, ex);
463
464         if (kvm_exception_is_soft(ex->vector) &&
465             svm_update_soft_interrupt_rip(vcpu))
466                 return;
467
468         svm->vmcb->control.event_inj = ex->vector
469                 | SVM_EVTINJ_VALID
470                 | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
471                 | SVM_EVTINJ_TYPE_EXEPT;
472         svm->vmcb->control.event_inj_err = ex->error_code;
473 }
474
475 static void svm_init_erratum_383(void)
476 {
477         u32 low, high;
478         int err;
479         u64 val;
480
481         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
482                 return;
483
484         /* Use _safe variants to not break nested virtualization */
485         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
486         if (err)
487                 return;
488
489         val |= (1ULL << 47);
490
491         low  = lower_32_bits(val);
492         high = upper_32_bits(val);
493
494         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
495
496         erratum_383_found = true;
497 }
498
499 static void svm_init_osvw(struct kvm_vcpu *vcpu)
500 {
501         /*
502          * Guests should see errata 400 and 415 as fixed (assuming that
503          * HLT and IO instructions are intercepted).
504          */
505         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
506         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
507
508         /*
509          * By increasing VCPU's osvw.length to 3 we are telling the guest that
510          * all osvw.status bits inside that length, including bit 0 (which is
511          * reserved for erratum 298), are valid. However, if host processor's
512          * osvw_len is 0 then osvw_status[0] carries no information. We need to
513          * be conservative here and therefore we tell the guest that erratum 298
514          * is present (because we really don't know).
515          */
516         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
517                 vcpu->arch.osvw.status |= 1;
518 }
519
520 static bool kvm_is_svm_supported(void)
521 {
522         int cpu = raw_smp_processor_id();
523         const char *msg;
524         u64 vm_cr;
525
526         if (!cpu_has_svm(&msg)) {
527                 pr_err("SVM not supported by CPU %d, %s\n", cpu, msg);
528                 return false;
529         }
530
531         if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
532                 pr_info("KVM is unsupported when running as an SEV guest\n");
533                 return false;
534         }
535
536         rdmsrl(MSR_VM_CR, vm_cr);
537         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE)) {
538                 pr_err("SVM disabled (by BIOS) in MSR_VM_CR on CPU %d\n", cpu);
539                 return false;
540         }
541
542         return true;
543 }
544
545 static int svm_check_processor_compat(void)
546 {
547         if (!kvm_is_svm_supported())
548                 return -EIO;
549
550         return 0;
551 }
552
553 void __svm_write_tsc_multiplier(u64 multiplier)
554 {
555         preempt_disable();
556
557         if (multiplier == __this_cpu_read(current_tsc_ratio))
558                 goto out;
559
560         wrmsrl(MSR_AMD64_TSC_RATIO, multiplier);
561         __this_cpu_write(current_tsc_ratio, multiplier);
562 out:
563         preempt_enable();
564 }
565
566 static void svm_hardware_disable(void)
567 {
568         /* Make sure we clean up behind us */
569         if (tsc_scaling)
570                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
571
572         cpu_svm_disable();
573
574         amd_pmu_disable_virt();
575 }
576
577 static int svm_hardware_enable(void)
578 {
579
580         struct svm_cpu_data *sd;
581         uint64_t efer;
582         int me = raw_smp_processor_id();
583
584         rdmsrl(MSR_EFER, efer);
585         if (efer & EFER_SVME)
586                 return -EBUSY;
587
588         sd = per_cpu_ptr(&svm_data, me);
589         sd->asid_generation = 1;
590         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
591         sd->next_asid = sd->max_asid + 1;
592         sd->min_asid = max_sev_asid + 1;
593
594         wrmsrl(MSR_EFER, efer | EFER_SVME);
595
596         wrmsrl(MSR_VM_HSAVE_PA, sd->save_area_pa);
597
598         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
599                 /*
600                  * Set the default value, even if we don't use TSC scaling
601                  * to avoid having stale value in the msr
602                  */
603                 __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
604         }
605
606
607         /*
608          * Get OSVW bits.
609          *
610          * Note that it is possible to have a system with mixed processor
611          * revisions and therefore different OSVW bits. If bits are not the same
612          * on different processors then choose the worst case (i.e. if erratum
613          * is present on one processor and not on another then assume that the
614          * erratum is present everywhere).
615          */
616         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
617                 uint64_t len, status = 0;
618                 int err;
619
620                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
621                 if (!err)
622                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
623                                                       &err);
624
625                 if (err)
626                         osvw_status = osvw_len = 0;
627                 else {
628                         if (len < osvw_len)
629                                 osvw_len = len;
630                         osvw_status |= status;
631                         osvw_status &= (1ULL << osvw_len) - 1;
632                 }
633         } else
634                 osvw_status = osvw_len = 0;
635
636         svm_init_erratum_383();
637
638         amd_pmu_enable_virt();
639
640         return 0;
641 }
642
643 static void svm_cpu_uninit(int cpu)
644 {
645         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
646
647         if (!sd->save_area)
648                 return;
649
650         kfree(sd->sev_vmcbs);
651         __free_page(sd->save_area);
652         sd->save_area_pa = 0;
653         sd->save_area = NULL;
654 }
655
656 static int svm_cpu_init(int cpu)
657 {
658         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
659         int ret = -ENOMEM;
660
661         memset(sd, 0, sizeof(struct svm_cpu_data));
662         sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
663         if (!sd->save_area)
664                 return ret;
665
666         ret = sev_cpu_init(sd);
667         if (ret)
668                 goto free_save_area;
669
670         sd->save_area_pa = __sme_page_pa(sd->save_area);
671         return 0;
672
673 free_save_area:
674         __free_page(sd->save_area);
675         sd->save_area = NULL;
676         return ret;
677
678 }
679
680 static int direct_access_msr_slot(u32 msr)
681 {
682         u32 i;
683
684         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
685                 if (direct_access_msrs[i].index == msr)
686                         return i;
687
688         return -ENOENT;
689 }
690
691 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read,
692                                      int write)
693 {
694         struct vcpu_svm *svm = to_svm(vcpu);
695         int slot = direct_access_msr_slot(msr);
696
697         if (slot == -ENOENT)
698                 return;
699
700         /* Set the shadow bitmaps to the desired intercept states */
701         if (read)
702                 set_bit(slot, svm->shadow_msr_intercept.read);
703         else
704                 clear_bit(slot, svm->shadow_msr_intercept.read);
705
706         if (write)
707                 set_bit(slot, svm->shadow_msr_intercept.write);
708         else
709                 clear_bit(slot, svm->shadow_msr_intercept.write);
710 }
711
712 static bool valid_msr_intercept(u32 index)
713 {
714         return direct_access_msr_slot(index) != -ENOENT;
715 }
716
717 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
718 {
719         u8 bit_write;
720         unsigned long tmp;
721         u32 offset;
722         u32 *msrpm;
723
724         /*
725          * For non-nested case:
726          * If the L01 MSR bitmap does not intercept the MSR, then we need to
727          * save it.
728          *
729          * For nested case:
730          * If the L02 MSR bitmap does not intercept the MSR, then we need to
731          * save it.
732          */
733         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
734                                       to_svm(vcpu)->msrpm;
735
736         offset    = svm_msrpm_offset(msr);
737         bit_write = 2 * (msr & 0x0f) + 1;
738         tmp       = msrpm[offset];
739
740         BUG_ON(offset == MSR_INVALID);
741
742         return test_bit(bit_write, &tmp);
743 }
744
745 static void set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm,
746                                         u32 msr, int read, int write)
747 {
748         struct vcpu_svm *svm = to_svm(vcpu);
749         u8 bit_read, bit_write;
750         unsigned long tmp;
751         u32 offset;
752
753         /*
754          * If this warning triggers extend the direct_access_msrs list at the
755          * beginning of the file
756          */
757         WARN_ON(!valid_msr_intercept(msr));
758
759         /* Enforce non allowed MSRs to trap */
760         if (read && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_READ))
761                 read = 0;
762
763         if (write && !kvm_msr_allowed(vcpu, msr, KVM_MSR_FILTER_WRITE))
764                 write = 0;
765
766         offset    = svm_msrpm_offset(msr);
767         bit_read  = 2 * (msr & 0x0f);
768         bit_write = 2 * (msr & 0x0f) + 1;
769         tmp       = msrpm[offset];
770
771         BUG_ON(offset == MSR_INVALID);
772
773         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
774         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
775
776         msrpm[offset] = tmp;
777
778         svm_hv_vmcb_dirty_nested_enlightenments(vcpu);
779         svm->nested.force_msr_bitmap_recalc = true;
780 }
781
782 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
783                           int read, int write)
784 {
785         set_shadow_msr_intercept(vcpu, msr, read, write);
786         set_msr_interception_bitmap(vcpu, msrpm, msr, read, write);
787 }
788
789 u32 *svm_vcpu_alloc_msrpm(void)
790 {
791         unsigned int order = get_order(MSRPM_SIZE);
792         struct page *pages = alloc_pages(GFP_KERNEL_ACCOUNT, order);
793         u32 *msrpm;
794
795         if (!pages)
796                 return NULL;
797
798         msrpm = page_address(pages);
799         memset(msrpm, 0xff, PAGE_SIZE * (1 << order));
800
801         return msrpm;
802 }
803
804 void svm_vcpu_init_msrpm(struct kvm_vcpu *vcpu, u32 *msrpm)
805 {
806         int i;
807
808         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
809                 if (!direct_access_msrs[i].always)
810                         continue;
811                 set_msr_interception(vcpu, msrpm, direct_access_msrs[i].index, 1, 1);
812         }
813 }
814
815 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept)
816 {
817         int i;
818
819         if (intercept == svm->x2avic_msrs_intercepted)
820                 return;
821
822         if (!x2avic_enabled ||
823             !apic_x2apic_mode(svm->vcpu.arch.apic))
824                 return;
825
826         for (i = 0; i < MAX_DIRECT_ACCESS_MSRS; i++) {
827                 int index = direct_access_msrs[i].index;
828
829                 if ((index < APIC_BASE_MSR) ||
830                     (index > APIC_BASE_MSR + 0xff))
831                         continue;
832                 set_msr_interception(&svm->vcpu, svm->msrpm, index,
833                                      !intercept, !intercept);
834         }
835
836         svm->x2avic_msrs_intercepted = intercept;
837 }
838
839 void svm_vcpu_free_msrpm(u32 *msrpm)
840 {
841         __free_pages(virt_to_page(msrpm), get_order(MSRPM_SIZE));
842 }
843
844 static void svm_msr_filter_changed(struct kvm_vcpu *vcpu)
845 {
846         struct vcpu_svm *svm = to_svm(vcpu);
847         u32 i;
848
849         /*
850          * Set intercept permissions for all direct access MSRs again. They
851          * will automatically get filtered through the MSR filter, so we are
852          * back in sync after this.
853          */
854         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
855                 u32 msr = direct_access_msrs[i].index;
856                 u32 read = test_bit(i, svm->shadow_msr_intercept.read);
857                 u32 write = test_bit(i, svm->shadow_msr_intercept.write);
858
859                 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write);
860         }
861 }
862
863 static void add_msr_offset(u32 offset)
864 {
865         int i;
866
867         for (i = 0; i < MSRPM_OFFSETS; ++i) {
868
869                 /* Offset already in list? */
870                 if (msrpm_offsets[i] == offset)
871                         return;
872
873                 /* Slot used by another offset? */
874                 if (msrpm_offsets[i] != MSR_INVALID)
875                         continue;
876
877                 /* Add offset to list */
878                 msrpm_offsets[i] = offset;
879
880                 return;
881         }
882
883         /*
884          * If this BUG triggers the msrpm_offsets table has an overflow. Just
885          * increase MSRPM_OFFSETS in this case.
886          */
887         BUG();
888 }
889
890 static void init_msrpm_offsets(void)
891 {
892         int i;
893
894         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
895
896         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
897                 u32 offset;
898
899                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
900                 BUG_ON(offset == MSR_INVALID);
901
902                 add_msr_offset(offset);
903         }
904 }
905
906 void svm_copy_lbrs(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
907 {
908         to_vmcb->save.dbgctl            = from_vmcb->save.dbgctl;
909         to_vmcb->save.br_from           = from_vmcb->save.br_from;
910         to_vmcb->save.br_to             = from_vmcb->save.br_to;
911         to_vmcb->save.last_excp_from    = from_vmcb->save.last_excp_from;
912         to_vmcb->save.last_excp_to      = from_vmcb->save.last_excp_to;
913
914         vmcb_mark_dirty(to_vmcb, VMCB_LBR);
915 }
916
917 static void svm_enable_lbrv(struct kvm_vcpu *vcpu)
918 {
919         struct vcpu_svm *svm = to_svm(vcpu);
920
921         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
922         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
923         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
924         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
925         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
926
927         /* Move the LBR msrs to the vmcb02 so that the guest can see them. */
928         if (is_guest_mode(vcpu))
929                 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr);
930 }
931
932 static void svm_disable_lbrv(struct kvm_vcpu *vcpu)
933 {
934         struct vcpu_svm *svm = to_svm(vcpu);
935
936         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
937         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
938         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
939         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
940         set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
941
942         /*
943          * Move the LBR msrs back to the vmcb01 to avoid copying them
944          * on nested guest entries.
945          */
946         if (is_guest_mode(vcpu))
947                 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb);
948 }
949
950 static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index)
951 {
952         /*
953          * If the LBR virtualization is disabled, the LBR msrs are always
954          * kept in the vmcb01 to avoid copying them on nested guest entries.
955          *
956          * If nested, and the LBR virtualization is enabled/disabled, the msrs
957          * are moved between the vmcb01 and vmcb02 as needed.
958          */
959         struct vmcb *vmcb =
960                 (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ?
961                         svm->vmcb : svm->vmcb01.ptr;
962
963         switch (index) {
964         case MSR_IA32_DEBUGCTLMSR:
965                 return vmcb->save.dbgctl;
966         case MSR_IA32_LASTBRANCHFROMIP:
967                 return vmcb->save.br_from;
968         case MSR_IA32_LASTBRANCHTOIP:
969                 return vmcb->save.br_to;
970         case MSR_IA32_LASTINTFROMIP:
971                 return vmcb->save.last_excp_from;
972         case MSR_IA32_LASTINTTOIP:
973                 return vmcb->save.last_excp_to;
974         default:
975                 KVM_BUG(false, svm->vcpu.kvm,
976                         "%s: Unknown MSR 0x%x", __func__, index);
977                 return 0;
978         }
979 }
980
981 void svm_update_lbrv(struct kvm_vcpu *vcpu)
982 {
983         struct vcpu_svm *svm = to_svm(vcpu);
984
985         bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) &
986                                            DEBUGCTLMSR_LBR;
987
988         bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext &
989                                       LBR_CTL_ENABLE_MASK);
990
991         if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled))
992                 if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))
993                         enable_lbrv = true;
994
995         if (enable_lbrv == current_enable_lbrv)
996                 return;
997
998         if (enable_lbrv)
999                 svm_enable_lbrv(vcpu);
1000         else
1001                 svm_disable_lbrv(vcpu);
1002 }
1003
1004 void disable_nmi_singlestep(struct vcpu_svm *svm)
1005 {
1006         svm->nmi_singlestep = false;
1007
1008         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1009                 /* Clear our flags if they were not set by the guest */
1010                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1011                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1012                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1013                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1014         }
1015 }
1016
1017 static void grow_ple_window(struct kvm_vcpu *vcpu)
1018 {
1019         struct vcpu_svm *svm = to_svm(vcpu);
1020         struct vmcb_control_area *control = &svm->vmcb->control;
1021         int old = control->pause_filter_count;
1022
1023         if (kvm_pause_in_guest(vcpu->kvm))
1024                 return;
1025
1026         control->pause_filter_count = __grow_ple_window(old,
1027                                                         pause_filter_count,
1028                                                         pause_filter_count_grow,
1029                                                         pause_filter_count_max);
1030
1031         if (control->pause_filter_count != old) {
1032                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1033                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1034                                             control->pause_filter_count, old);
1035         }
1036 }
1037
1038 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1039 {
1040         struct vcpu_svm *svm = to_svm(vcpu);
1041         struct vmcb_control_area *control = &svm->vmcb->control;
1042         int old = control->pause_filter_count;
1043
1044         if (kvm_pause_in_guest(vcpu->kvm))
1045                 return;
1046
1047         control->pause_filter_count =
1048                                 __shrink_ple_window(old,
1049                                                     pause_filter_count,
1050                                                     pause_filter_count_shrink,
1051                                                     pause_filter_count);
1052         if (control->pause_filter_count != old) {
1053                 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1054                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1055                                             control->pause_filter_count, old);
1056         }
1057 }
1058
1059 static void svm_hardware_unsetup(void)
1060 {
1061         int cpu;
1062
1063         sev_hardware_unsetup();
1064
1065         for_each_possible_cpu(cpu)
1066                 svm_cpu_uninit(cpu);
1067
1068         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT),
1069         get_order(IOPM_SIZE));
1070         iopm_base = 0;
1071 }
1072
1073 static void init_seg(struct vmcb_seg *seg)
1074 {
1075         seg->selector = 0;
1076         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1077                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1078         seg->limit = 0xffff;
1079         seg->base = 0;
1080 }
1081
1082 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1083 {
1084         seg->selector = 0;
1085         seg->attrib = SVM_SELECTOR_P_MASK | type;
1086         seg->limit = 0xffff;
1087         seg->base = 0;
1088 }
1089
1090 static u64 svm_get_l2_tsc_offset(struct kvm_vcpu *vcpu)
1091 {
1092         struct vcpu_svm *svm = to_svm(vcpu);
1093
1094         return svm->nested.ctl.tsc_offset;
1095 }
1096
1097 static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
1098 {
1099         struct vcpu_svm *svm = to_svm(vcpu);
1100
1101         return svm->tsc_ratio_msr;
1102 }
1103
1104 static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1105 {
1106         struct vcpu_svm *svm = to_svm(vcpu);
1107
1108         svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
1109         svm->vmcb->control.tsc_offset = offset;
1110         vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1111 }
1112
1113 static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier)
1114 {
1115         __svm_write_tsc_multiplier(multiplier);
1116 }
1117
1118
1119 /* Evaluate instruction intercepts that depend on guest CPUID features. */
1120 static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu,
1121                                               struct vcpu_svm *svm)
1122 {
1123         /*
1124          * Intercept INVPCID if shadow paging is enabled to sync/free shadow
1125          * roots, or if INVPCID is disabled in the guest to inject #UD.
1126          */
1127         if (kvm_cpu_cap_has(X86_FEATURE_INVPCID)) {
1128                 if (!npt_enabled ||
1129                     !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID))
1130                         svm_set_intercept(svm, INTERCEPT_INVPCID);
1131                 else
1132                         svm_clr_intercept(svm, INTERCEPT_INVPCID);
1133         }
1134
1135         if (kvm_cpu_cap_has(X86_FEATURE_RDTSCP)) {
1136                 if (guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
1137                         svm_clr_intercept(svm, INTERCEPT_RDTSCP);
1138                 else
1139                         svm_set_intercept(svm, INTERCEPT_RDTSCP);
1140         }
1141 }
1142
1143 static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
1144 {
1145         struct vcpu_svm *svm = to_svm(vcpu);
1146
1147         if (guest_cpuid_is_intel(vcpu)) {
1148                 /*
1149                  * We must intercept SYSENTER_EIP and SYSENTER_ESP
1150                  * accesses because the processor only stores 32 bits.
1151                  * For the same reason we cannot use virtual VMLOAD/VMSAVE.
1152                  */
1153                 svm_set_intercept(svm, INTERCEPT_VMLOAD);
1154                 svm_set_intercept(svm, INTERCEPT_VMSAVE);
1155                 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1156
1157                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0);
1158                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0);
1159
1160                 svm->v_vmload_vmsave_enabled = false;
1161         } else {
1162                 /*
1163                  * If hardware supports Virtual VMLOAD VMSAVE then enable it
1164                  * in VMCB and clear intercepts to avoid #VMEXIT.
1165                  */
1166                 if (vls) {
1167                         svm_clr_intercept(svm, INTERCEPT_VMLOAD);
1168                         svm_clr_intercept(svm, INTERCEPT_VMSAVE);
1169                         svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1170                 }
1171                 /* No need to intercept these MSRs */
1172                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
1173                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
1174         }
1175 }
1176
1177 static void init_vmcb(struct kvm_vcpu *vcpu)
1178 {
1179         struct vcpu_svm *svm = to_svm(vcpu);
1180         struct vmcb *vmcb = svm->vmcb01.ptr;
1181         struct vmcb_control_area *control = &vmcb->control;
1182         struct vmcb_save_area *save = &vmcb->save;
1183
1184         svm_set_intercept(svm, INTERCEPT_CR0_READ);
1185         svm_set_intercept(svm, INTERCEPT_CR3_READ);
1186         svm_set_intercept(svm, INTERCEPT_CR4_READ);
1187         svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1188         svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
1189         svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
1190         if (!kvm_vcpu_apicv_active(vcpu))
1191                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
1192
1193         set_dr_intercepts(svm);
1194
1195         set_exception_intercept(svm, PF_VECTOR);
1196         set_exception_intercept(svm, UD_VECTOR);
1197         set_exception_intercept(svm, MC_VECTOR);
1198         set_exception_intercept(svm, AC_VECTOR);
1199         set_exception_intercept(svm, DB_VECTOR);
1200         /*
1201          * Guest access to VMware backdoor ports could legitimately
1202          * trigger #GP because of TSS I/O permission bitmap.
1203          * We intercept those #GP and allow access to them anyway
1204          * as VMware does.  Don't intercept #GP for SEV guests as KVM can't
1205          * decrypt guest memory to decode the faulting instruction.
1206          */
1207         if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
1208                 set_exception_intercept(svm, GP_VECTOR);
1209
1210         svm_set_intercept(svm, INTERCEPT_INTR);
1211         svm_set_intercept(svm, INTERCEPT_NMI);
1212
1213         if (intercept_smi)
1214                 svm_set_intercept(svm, INTERCEPT_SMI);
1215
1216         svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1217         svm_set_intercept(svm, INTERCEPT_RDPMC);
1218         svm_set_intercept(svm, INTERCEPT_CPUID);
1219         svm_set_intercept(svm, INTERCEPT_INVD);
1220         svm_set_intercept(svm, INTERCEPT_INVLPG);
1221         svm_set_intercept(svm, INTERCEPT_INVLPGA);
1222         svm_set_intercept(svm, INTERCEPT_IOIO_PROT);
1223         svm_set_intercept(svm, INTERCEPT_MSR_PROT);
1224         svm_set_intercept(svm, INTERCEPT_TASK_SWITCH);
1225         svm_set_intercept(svm, INTERCEPT_SHUTDOWN);
1226         svm_set_intercept(svm, INTERCEPT_VMRUN);
1227         svm_set_intercept(svm, INTERCEPT_VMMCALL);
1228         svm_set_intercept(svm, INTERCEPT_VMLOAD);
1229         svm_set_intercept(svm, INTERCEPT_VMSAVE);
1230         svm_set_intercept(svm, INTERCEPT_STGI);
1231         svm_set_intercept(svm, INTERCEPT_CLGI);
1232         svm_set_intercept(svm, INTERCEPT_SKINIT);
1233         svm_set_intercept(svm, INTERCEPT_WBINVD);
1234         svm_set_intercept(svm, INTERCEPT_XSETBV);
1235         svm_set_intercept(svm, INTERCEPT_RDPRU);
1236         svm_set_intercept(svm, INTERCEPT_RSM);
1237
1238         if (!kvm_mwait_in_guest(vcpu->kvm)) {
1239                 svm_set_intercept(svm, INTERCEPT_MONITOR);
1240                 svm_set_intercept(svm, INTERCEPT_MWAIT);
1241         }
1242
1243         if (!kvm_hlt_in_guest(vcpu->kvm))
1244                 svm_set_intercept(svm, INTERCEPT_HLT);
1245
1246         control->iopm_base_pa = __sme_set(iopm_base);
1247         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1248         control->int_ctl = V_INTR_MASKING_MASK;
1249
1250         init_seg(&save->es);
1251         init_seg(&save->ss);
1252         init_seg(&save->ds);
1253         init_seg(&save->fs);
1254         init_seg(&save->gs);
1255
1256         save->cs.selector = 0xf000;
1257         save->cs.base = 0xffff0000;
1258         /* Executable/Readable Code Segment */
1259         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1260                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1261         save->cs.limit = 0xffff;
1262
1263         save->gdtr.base = 0;
1264         save->gdtr.limit = 0xffff;
1265         save->idtr.base = 0;
1266         save->idtr.limit = 0xffff;
1267
1268         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1269         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1270
1271         if (npt_enabled) {
1272                 /* Setup VMCB for Nested Paging */
1273                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1274                 svm_clr_intercept(svm, INTERCEPT_INVLPG);
1275                 clr_exception_intercept(svm, PF_VECTOR);
1276                 svm_clr_intercept(svm, INTERCEPT_CR3_READ);
1277                 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE);
1278                 save->g_pat = vcpu->arch.pat;
1279                 save->cr3 = 0;
1280         }
1281         svm->current_vmcb->asid_generation = 0;
1282         svm->asid = 0;
1283
1284         svm->nested.vmcb12_gpa = INVALID_GPA;
1285         svm->nested.last_vmcb12_gpa = INVALID_GPA;
1286
1287         if (!kvm_pause_in_guest(vcpu->kvm)) {
1288                 control->pause_filter_count = pause_filter_count;
1289                 if (pause_filter_thresh)
1290                         control->pause_filter_thresh = pause_filter_thresh;
1291                 svm_set_intercept(svm, INTERCEPT_PAUSE);
1292         } else {
1293                 svm_clr_intercept(svm, INTERCEPT_PAUSE);
1294         }
1295
1296         svm_recalc_instruction_intercepts(vcpu, svm);
1297
1298         /*
1299          * If the host supports V_SPEC_CTRL then disable the interception
1300          * of MSR_IA32_SPEC_CTRL.
1301          */
1302         if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
1303                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
1304
1305         if (kvm_vcpu_apicv_active(vcpu))
1306                 avic_init_vmcb(svm, vmcb);
1307
1308         if (vnmi)
1309                 svm->vmcb->control.int_ctl |= V_NMI_ENABLE_MASK;
1310
1311         if (vgif) {
1312                 svm_clr_intercept(svm, INTERCEPT_STGI);
1313                 svm_clr_intercept(svm, INTERCEPT_CLGI);
1314                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1315         }
1316
1317         if (sev_guest(vcpu->kvm))
1318                 sev_init_vmcb(svm);
1319
1320         svm_hv_init_vmcb(vmcb);
1321         init_vmcb_after_set_cpuid(vcpu);
1322
1323         vmcb_mark_all_dirty(vmcb);
1324
1325         enable_gif(svm);
1326 }
1327
1328 static void __svm_vcpu_reset(struct kvm_vcpu *vcpu)
1329 {
1330         struct vcpu_svm *svm = to_svm(vcpu);
1331
1332         svm_vcpu_init_msrpm(vcpu, svm->msrpm);
1333
1334         svm_init_osvw(vcpu);
1335         vcpu->arch.microcode_version = 0x01000065;
1336         svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio;
1337
1338         svm->nmi_masked = false;
1339         svm->awaiting_iret_completion = false;
1340
1341         if (sev_es_guest(vcpu->kvm))
1342                 sev_es_vcpu_reset(svm);
1343 }
1344
1345 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1346 {
1347         struct vcpu_svm *svm = to_svm(vcpu);
1348
1349         svm->spec_ctrl = 0;
1350         svm->virt_spec_ctrl = 0;
1351
1352         init_vmcb(vcpu);
1353
1354         if (!init_event)
1355                 __svm_vcpu_reset(vcpu);
1356 }
1357
1358 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb)
1359 {
1360         svm->current_vmcb = target_vmcb;
1361         svm->vmcb = target_vmcb->ptr;
1362 }
1363
1364 static int svm_vcpu_create(struct kvm_vcpu *vcpu)
1365 {
1366         struct vcpu_svm *svm;
1367         struct page *vmcb01_page;
1368         struct page *vmsa_page = NULL;
1369         int err;
1370
1371         BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0);
1372         svm = to_svm(vcpu);
1373
1374         err = -ENOMEM;
1375         vmcb01_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1376         if (!vmcb01_page)
1377                 goto out;
1378
1379         if (sev_es_guest(vcpu->kvm)) {
1380                 /*
1381                  * SEV-ES guests require a separate VMSA page used to contain
1382                  * the encrypted register state of the guest.
1383                  */
1384                 vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1385                 if (!vmsa_page)
1386                         goto error_free_vmcb_page;
1387
1388                 /*
1389                  * SEV-ES guests maintain an encrypted version of their FPU
1390                  * state which is restored and saved on VMRUN and VMEXIT.
1391                  * Mark vcpu->arch.guest_fpu->fpstate as scratch so it won't
1392                  * do xsave/xrstor on it.
1393                  */
1394                 fpstate_set_confidential(&vcpu->arch.guest_fpu);
1395         }
1396
1397         err = avic_init_vcpu(svm);
1398         if (err)
1399                 goto error_free_vmsa_page;
1400
1401         svm->msrpm = svm_vcpu_alloc_msrpm();
1402         if (!svm->msrpm) {
1403                 err = -ENOMEM;
1404                 goto error_free_vmsa_page;
1405         }
1406
1407         svm->x2avic_msrs_intercepted = true;
1408
1409         svm->vmcb01.ptr = page_address(vmcb01_page);
1410         svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT);
1411         svm_switch_vmcb(svm, &svm->vmcb01);
1412
1413         if (vmsa_page)
1414                 svm->sev_es.vmsa = page_address(vmsa_page);
1415
1416         svm->guest_state_loaded = false;
1417
1418         return 0;
1419
1420 error_free_vmsa_page:
1421         if (vmsa_page)
1422                 __free_page(vmsa_page);
1423 error_free_vmcb_page:
1424         __free_page(vmcb01_page);
1425 out:
1426         return err;
1427 }
1428
1429 static void svm_clear_current_vmcb(struct vmcb *vmcb)
1430 {
1431         int i;
1432
1433         for_each_online_cpu(i)
1434                 cmpxchg(per_cpu_ptr(&svm_data.current_vmcb, i), vmcb, NULL);
1435 }
1436
1437 static void svm_vcpu_free(struct kvm_vcpu *vcpu)
1438 {
1439         struct vcpu_svm *svm = to_svm(vcpu);
1440
1441         /*
1442          * The vmcb page can be recycled, causing a false negative in
1443          * svm_vcpu_load(). So, ensure that no logical CPU has this
1444          * vmcb page recorded as its current vmcb.
1445          */
1446         svm_clear_current_vmcb(svm->vmcb);
1447
1448         svm_leave_nested(vcpu);
1449         svm_free_nested(svm);
1450
1451         sev_free_vcpu(vcpu);
1452
1453         __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT));
1454         __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
1455 }
1456
1457 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1458 {
1459         struct vcpu_svm *svm = to_svm(vcpu);
1460         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
1461
1462         if (sev_es_guest(vcpu->kvm))
1463                 sev_es_unmap_ghcb(svm);
1464
1465         if (svm->guest_state_loaded)
1466                 return;
1467
1468         /*
1469          * Save additional host state that will be restored on VMEXIT (sev-es)
1470          * or subsequent vmload of host save area.
1471          */
1472         vmsave(sd->save_area_pa);
1473         if (sev_es_guest(vcpu->kvm)) {
1474                 struct sev_es_save_area *hostsa;
1475                 hostsa = (struct sev_es_save_area *)(page_address(sd->save_area) + 0x400);
1476
1477                 sev_es_prepare_switch_to_guest(hostsa);
1478         }
1479
1480         if (tsc_scaling)
1481                 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1482
1483         if (likely(tsc_aux_uret_slot >= 0))
1484                 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
1485
1486         svm->guest_state_loaded = true;
1487 }
1488
1489 static void svm_prepare_host_switch(struct kvm_vcpu *vcpu)
1490 {
1491         to_svm(vcpu)->guest_state_loaded = false;
1492 }
1493
1494 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1495 {
1496         struct vcpu_svm *svm = to_svm(vcpu);
1497         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
1498
1499         if (sd->current_vmcb != svm->vmcb) {
1500                 sd->current_vmcb = svm->vmcb;
1501                 indirect_branch_prediction_barrier();
1502         }
1503         if (kvm_vcpu_apicv_active(vcpu))
1504                 avic_vcpu_load(vcpu, cpu);
1505 }
1506
1507 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
1508 {
1509         if (kvm_vcpu_apicv_active(vcpu))
1510                 avic_vcpu_put(vcpu);
1511
1512         svm_prepare_host_switch(vcpu);
1513
1514         ++vcpu->stat.host_state_reload;
1515 }
1516
1517 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
1518 {
1519         struct vcpu_svm *svm = to_svm(vcpu);
1520         unsigned long rflags = svm->vmcb->save.rflags;
1521
1522         if (svm->nmi_singlestep) {
1523                 /* Hide our flags if they were not set by the guest */
1524                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1525                         rflags &= ~X86_EFLAGS_TF;
1526                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1527                         rflags &= ~X86_EFLAGS_RF;
1528         }
1529         return rflags;
1530 }
1531
1532 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1533 {
1534         if (to_svm(vcpu)->nmi_singlestep)
1535                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
1536
1537        /*
1538         * Any change of EFLAGS.VM is accompanied by a reload of SS
1539         * (caused by either a task switch or an inter-privilege IRET),
1540         * so we do not need to update the CPL here.
1541         */
1542         to_svm(vcpu)->vmcb->save.rflags = rflags;
1543 }
1544
1545 static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
1546 {
1547         struct vmcb *vmcb = to_svm(vcpu)->vmcb;
1548
1549         return sev_es_guest(vcpu->kvm)
1550                 ? vmcb->control.int_state & SVM_GUEST_INTERRUPT_MASK
1551                 : kvm_get_rflags(vcpu) & X86_EFLAGS_IF;
1552 }
1553
1554 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1555 {
1556         kvm_register_mark_available(vcpu, reg);
1557
1558         switch (reg) {
1559         case VCPU_EXREG_PDPTR:
1560                 /*
1561                  * When !npt_enabled, mmu->pdptrs[] is already available since
1562                  * it is always updated per SDM when moving to CRs.
1563                  */
1564                 if (npt_enabled)
1565                         load_pdptrs(vcpu, kvm_read_cr3(vcpu));
1566                 break;
1567         default:
1568                 KVM_BUG_ON(1, vcpu->kvm);
1569         }
1570 }
1571
1572 static void svm_set_vintr(struct vcpu_svm *svm)
1573 {
1574         struct vmcb_control_area *control;
1575
1576         /*
1577          * The following fields are ignored when AVIC is enabled
1578          */
1579         WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu));
1580
1581         svm_set_intercept(svm, INTERCEPT_VINTR);
1582
1583         /*
1584          * Recalculating intercepts may have cleared the VINTR intercept.  If
1585          * V_INTR_MASKING is enabled in vmcb12, then the effective RFLAGS.IF
1586          * for L1 physical interrupts is L1's RFLAGS.IF at the time of VMRUN.
1587          * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as
1588          * interrupts will never be unblocked while L2 is running.
1589          */
1590         if (!svm_is_intercept(svm, INTERCEPT_VINTR))
1591                 return;
1592
1593         /*
1594          * This is just a dummy VINTR to actually cause a vmexit to happen.
1595          * Actual injection of virtual interrupts happens through EVENTINJ.
1596          */
1597         control = &svm->vmcb->control;
1598         control->int_vector = 0x0;
1599         control->int_ctl &= ~V_INTR_PRIO_MASK;
1600         control->int_ctl |= V_IRQ_MASK |
1601                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1602         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1603 }
1604
1605 static void svm_clear_vintr(struct vcpu_svm *svm)
1606 {
1607         svm_clr_intercept(svm, INTERCEPT_VINTR);
1608
1609         /* Drop int_ctl fields related to VINTR injection.  */
1610         svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1611         if (is_guest_mode(&svm->vcpu)) {
1612                 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK;
1613
1614                 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) !=
1615                         (svm->nested.ctl.int_ctl & V_TPR_MASK));
1616
1617                 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl &
1618                         V_IRQ_INJECTION_BITS_MASK;
1619
1620                 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector;
1621         }
1622
1623         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
1624 }
1625
1626 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
1627 {
1628         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1629         struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save;
1630
1631         switch (seg) {
1632         case VCPU_SREG_CS: return &save->cs;
1633         case VCPU_SREG_DS: return &save->ds;
1634         case VCPU_SREG_ES: return &save->es;
1635         case VCPU_SREG_FS: return &save01->fs;
1636         case VCPU_SREG_GS: return &save01->gs;
1637         case VCPU_SREG_SS: return &save->ss;
1638         case VCPU_SREG_TR: return &save01->tr;
1639         case VCPU_SREG_LDTR: return &save01->ldtr;
1640         }
1641         BUG();
1642         return NULL;
1643 }
1644
1645 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1646 {
1647         struct vmcb_seg *s = svm_seg(vcpu, seg);
1648
1649         return s->base;
1650 }
1651
1652 static void svm_get_segment(struct kvm_vcpu *vcpu,
1653                             struct kvm_segment *var, int seg)
1654 {
1655         struct vmcb_seg *s = svm_seg(vcpu, seg);
1656
1657         var->base = s->base;
1658         var->limit = s->limit;
1659         var->selector = s->selector;
1660         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
1661         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
1662         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
1663         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
1664         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
1665         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
1666         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
1667
1668         /*
1669          * AMD CPUs circa 2014 track the G bit for all segments except CS.
1670          * However, the SVM spec states that the G bit is not observed by the
1671          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
1672          * So let's synthesize a legal G bit for all segments, this helps
1673          * running KVM nested. It also helps cross-vendor migration, because
1674          * Intel's vmentry has a check on the 'G' bit.
1675          */
1676         var->g = s->limit > 0xfffff;
1677
1678         /*
1679          * AMD's VMCB does not have an explicit unusable field, so emulate it
1680          * for cross vendor migration purposes by "not present"
1681          */
1682         var->unusable = !var->present;
1683
1684         switch (seg) {
1685         case VCPU_SREG_TR:
1686                 /*
1687                  * Work around a bug where the busy flag in the tr selector
1688                  * isn't exposed
1689                  */
1690                 var->type |= 0x2;
1691                 break;
1692         case VCPU_SREG_DS:
1693         case VCPU_SREG_ES:
1694         case VCPU_SREG_FS:
1695         case VCPU_SREG_GS:
1696                 /*
1697                  * The accessed bit must always be set in the segment
1698                  * descriptor cache, although it can be cleared in the
1699                  * descriptor, the cached bit always remains at 1. Since
1700                  * Intel has a check on this, set it here to support
1701                  * cross-vendor migration.
1702                  */
1703                 if (!var->unusable)
1704                         var->type |= 0x1;
1705                 break;
1706         case VCPU_SREG_SS:
1707                 /*
1708                  * On AMD CPUs sometimes the DB bit in the segment
1709                  * descriptor is left as 1, although the whole segment has
1710                  * been made unusable. Clear it here to pass an Intel VMX
1711                  * entry check when cross vendor migrating.
1712                  */
1713                 if (var->unusable)
1714                         var->db = 0;
1715                 /* This is symmetric with svm_set_segment() */
1716                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1717                 break;
1718         }
1719 }
1720
1721 static int svm_get_cpl(struct kvm_vcpu *vcpu)
1722 {
1723         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
1724
1725         return save->cpl;
1726 }
1727
1728 static void svm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1729 {
1730         struct kvm_segment cs;
1731
1732         svm_get_segment(vcpu, &cs, VCPU_SREG_CS);
1733         *db = cs.db;
1734         *l = cs.l;
1735 }
1736
1737 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1738 {
1739         struct vcpu_svm *svm = to_svm(vcpu);
1740
1741         dt->size = svm->vmcb->save.idtr.limit;
1742         dt->address = svm->vmcb->save.idtr.base;
1743 }
1744
1745 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1746 {
1747         struct vcpu_svm *svm = to_svm(vcpu);
1748
1749         svm->vmcb->save.idtr.limit = dt->size;
1750         svm->vmcb->save.idtr.base = dt->address ;
1751         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1752 }
1753
1754 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1755 {
1756         struct vcpu_svm *svm = to_svm(vcpu);
1757
1758         dt->size = svm->vmcb->save.gdtr.limit;
1759         dt->address = svm->vmcb->save.gdtr.base;
1760 }
1761
1762 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
1763 {
1764         struct vcpu_svm *svm = to_svm(vcpu);
1765
1766         svm->vmcb->save.gdtr.limit = dt->size;
1767         svm->vmcb->save.gdtr.base = dt->address ;
1768         vmcb_mark_dirty(svm->vmcb, VMCB_DT);
1769 }
1770
1771 static void sev_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1772 {
1773         struct vcpu_svm *svm = to_svm(vcpu);
1774
1775         /*
1776          * For guests that don't set guest_state_protected, the cr3 update is
1777          * handled via kvm_mmu_load() while entering the guest. For guests
1778          * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
1779          * VMCB save area now, since the save area will become the initial
1780          * contents of the VMSA, and future VMCB save area updates won't be
1781          * seen.
1782          */
1783         if (sev_es_guest(vcpu->kvm)) {
1784                 svm->vmcb->save.cr3 = cr3;
1785                 vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1786         }
1787 }
1788
1789 void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1790 {
1791         struct vcpu_svm *svm = to_svm(vcpu);
1792         u64 hcr0 = cr0;
1793         bool old_paging = is_paging(vcpu);
1794
1795 #ifdef CONFIG_X86_64
1796         if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
1797                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
1798                         vcpu->arch.efer |= EFER_LMA;
1799                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
1800                 }
1801
1802                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
1803                         vcpu->arch.efer &= ~EFER_LMA;
1804                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
1805                 }
1806         }
1807 #endif
1808         vcpu->arch.cr0 = cr0;
1809
1810         if (!npt_enabled) {
1811                 hcr0 |= X86_CR0_PG | X86_CR0_WP;
1812                 if (old_paging != is_paging(vcpu))
1813                         svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
1814         }
1815
1816         /*
1817          * re-enable caching here because the QEMU bios
1818          * does not do it - this results in some delay at
1819          * reboot
1820          */
1821         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1822                 hcr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1823
1824         svm->vmcb->save.cr0 = hcr0;
1825         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
1826
1827         /*
1828          * SEV-ES guests must always keep the CR intercepts cleared. CR
1829          * tracking is done using the CR write traps.
1830          */
1831         if (sev_es_guest(vcpu->kvm))
1832                 return;
1833
1834         if (hcr0 == cr0) {
1835                 /* Selective CR0 write remains on.  */
1836                 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
1837                 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
1838         } else {
1839                 svm_set_intercept(svm, INTERCEPT_CR0_READ);
1840                 svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
1841         }
1842 }
1843
1844 static bool svm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1845 {
1846         return true;
1847 }
1848
1849 void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1850 {
1851         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
1852         unsigned long old_cr4 = vcpu->arch.cr4;
1853
1854         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
1855                 svm_flush_tlb_current(vcpu);
1856
1857         vcpu->arch.cr4 = cr4;
1858         if (!npt_enabled) {
1859                 cr4 |= X86_CR4_PAE;
1860
1861                 if (!is_paging(vcpu))
1862                         cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
1863         }
1864         cr4 |= host_cr4_mce;
1865         to_svm(vcpu)->vmcb->save.cr4 = cr4;
1866         vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
1867
1868         if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
1869                 kvm_update_cpuid_runtime(vcpu);
1870 }
1871
1872 static void svm_set_segment(struct kvm_vcpu *vcpu,
1873                             struct kvm_segment *var, int seg)
1874 {
1875         struct vcpu_svm *svm = to_svm(vcpu);
1876         struct vmcb_seg *s = svm_seg(vcpu, seg);
1877
1878         s->base = var->base;
1879         s->limit = var->limit;
1880         s->selector = var->selector;
1881         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
1882         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
1883         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
1884         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
1885         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
1886         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
1887         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
1888         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
1889
1890         /*
1891          * This is always accurate, except if SYSRET returned to a segment
1892          * with SS.DPL != 3.  Intel does not have this quirk, and always
1893          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
1894          * would entail passing the CPL to userspace and back.
1895          */
1896         if (seg == VCPU_SREG_SS)
1897                 /* This is symmetric with svm_get_segment() */
1898                 svm->vmcb->save.cpl = (var->dpl & 3);
1899
1900         vmcb_mark_dirty(svm->vmcb, VMCB_SEG);
1901 }
1902
1903 static void svm_update_exception_bitmap(struct kvm_vcpu *vcpu)
1904 {
1905         struct vcpu_svm *svm = to_svm(vcpu);
1906
1907         clr_exception_intercept(svm, BP_VECTOR);
1908
1909         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
1910                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1911                         set_exception_intercept(svm, BP_VECTOR);
1912         }
1913 }
1914
1915 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1916 {
1917         if (sd->next_asid > sd->max_asid) {
1918                 ++sd->asid_generation;
1919                 sd->next_asid = sd->min_asid;
1920                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
1921                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1922         }
1923
1924         svm->current_vmcb->asid_generation = sd->asid_generation;
1925         svm->asid = sd->next_asid++;
1926 }
1927
1928 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
1929 {
1930         struct vmcb *vmcb = svm->vmcb;
1931
1932         if (svm->vcpu.arch.guest_state_protected)
1933                 return;
1934
1935         if (unlikely(value != vmcb->save.dr6)) {
1936                 vmcb->save.dr6 = value;
1937                 vmcb_mark_dirty(vmcb, VMCB_DR);
1938         }
1939 }
1940
1941 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
1942 {
1943         struct vcpu_svm *svm = to_svm(vcpu);
1944
1945         if (vcpu->arch.guest_state_protected)
1946                 return;
1947
1948         get_debugreg(vcpu->arch.db[0], 0);
1949         get_debugreg(vcpu->arch.db[1], 1);
1950         get_debugreg(vcpu->arch.db[2], 2);
1951         get_debugreg(vcpu->arch.db[3], 3);
1952         /*
1953          * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here,
1954          * because db_interception might need it.  We can do it before vmentry.
1955          */
1956         vcpu->arch.dr6 = svm->vmcb->save.dr6;
1957         vcpu->arch.dr7 = svm->vmcb->save.dr7;
1958         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
1959         set_dr_intercepts(svm);
1960 }
1961
1962 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
1963 {
1964         struct vcpu_svm *svm = to_svm(vcpu);
1965
1966         if (vcpu->arch.guest_state_protected)
1967                 return;
1968
1969         svm->vmcb->save.dr7 = value;
1970         vmcb_mark_dirty(svm->vmcb, VMCB_DR);
1971 }
1972
1973 static int pf_interception(struct kvm_vcpu *vcpu)
1974 {
1975         struct vcpu_svm *svm = to_svm(vcpu);
1976
1977         u64 fault_address = svm->vmcb->control.exit_info_2;
1978         u64 error_code = svm->vmcb->control.exit_info_1;
1979
1980         return kvm_handle_page_fault(vcpu, error_code, fault_address,
1981                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1982                         svm->vmcb->control.insn_bytes : NULL,
1983                         svm->vmcb->control.insn_len);
1984 }
1985
1986 static int npf_interception(struct kvm_vcpu *vcpu)
1987 {
1988         struct vcpu_svm *svm = to_svm(vcpu);
1989
1990         u64 fault_address = svm->vmcb->control.exit_info_2;
1991         u64 error_code = svm->vmcb->control.exit_info_1;
1992
1993         trace_kvm_page_fault(vcpu, fault_address, error_code);
1994         return kvm_mmu_page_fault(vcpu, fault_address, error_code,
1995                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
1996                         svm->vmcb->control.insn_bytes : NULL,
1997                         svm->vmcb->control.insn_len);
1998 }
1999
2000 static int db_interception(struct kvm_vcpu *vcpu)
2001 {
2002         struct kvm_run *kvm_run = vcpu->run;
2003         struct vcpu_svm *svm = to_svm(vcpu);
2004
2005         if (!(vcpu->guest_debug &
2006               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2007                 !svm->nmi_singlestep) {
2008                 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW;
2009                 kvm_queue_exception_p(vcpu, DB_VECTOR, payload);
2010                 return 1;
2011         }
2012
2013         if (svm->nmi_singlestep) {
2014                 disable_nmi_singlestep(svm);
2015                 /* Make sure we check for pending NMIs upon entry */
2016                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2017         }
2018
2019         if (vcpu->guest_debug &
2020             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2021                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2022                 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6;
2023                 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7;
2024                 kvm_run->debug.arch.pc =
2025                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2026                 kvm_run->debug.arch.exception = DB_VECTOR;
2027                 return 0;
2028         }
2029
2030         return 1;
2031 }
2032
2033 static int bp_interception(struct kvm_vcpu *vcpu)
2034 {
2035         struct vcpu_svm *svm = to_svm(vcpu);
2036         struct kvm_run *kvm_run = vcpu->run;
2037
2038         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2039         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2040         kvm_run->debug.arch.exception = BP_VECTOR;
2041         return 0;
2042 }
2043
2044 static int ud_interception(struct kvm_vcpu *vcpu)
2045 {
2046         return handle_ud(vcpu);
2047 }
2048
2049 static int ac_interception(struct kvm_vcpu *vcpu)
2050 {
2051         kvm_queue_exception_e(vcpu, AC_VECTOR, 0);
2052         return 1;
2053 }
2054
2055 static bool is_erratum_383(void)
2056 {
2057         int err, i;
2058         u64 value;
2059
2060         if (!erratum_383_found)
2061                 return false;
2062
2063         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2064         if (err)
2065                 return false;
2066
2067         /* Bit 62 may or may not be set for this mce */
2068         value &= ~(1ULL << 62);
2069
2070         if (value != 0xb600000000010015ULL)
2071                 return false;
2072
2073         /* Clear MCi_STATUS registers */
2074         for (i = 0; i < 6; ++i)
2075                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2076
2077         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2078         if (!err) {
2079                 u32 low, high;
2080
2081                 value &= ~(1ULL << 2);
2082                 low    = lower_32_bits(value);
2083                 high   = upper_32_bits(value);
2084
2085                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2086         }
2087
2088         /* Flush tlb to evict multi-match entries */
2089         __flush_tlb_all();
2090
2091         return true;
2092 }
2093
2094 static void svm_handle_mce(struct kvm_vcpu *vcpu)
2095 {
2096         if (is_erratum_383()) {
2097                 /*
2098                  * Erratum 383 triggered. Guest state is corrupt so kill the
2099                  * guest.
2100                  */
2101                 pr_err("Guest triggered AMD Erratum 383\n");
2102
2103                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2104
2105                 return;
2106         }
2107
2108         /*
2109          * On an #MC intercept the MCE handler is not called automatically in
2110          * the host. So do it by hand here.
2111          */
2112         kvm_machine_check();
2113 }
2114
2115 static int mc_interception(struct kvm_vcpu *vcpu)
2116 {
2117         return 1;
2118 }
2119
2120 static int shutdown_interception(struct kvm_vcpu *vcpu)
2121 {
2122         struct kvm_run *kvm_run = vcpu->run;
2123         struct vcpu_svm *svm = to_svm(vcpu);
2124
2125         /*
2126          * The VM save area has already been encrypted so it
2127          * cannot be reinitialized - just terminate.
2128          */
2129         if (sev_es_guest(vcpu->kvm))
2130                 return -EINVAL;
2131
2132         /*
2133          * VMCB is undefined after a SHUTDOWN intercept.  INIT the vCPU to put
2134          * the VMCB in a known good state.  Unfortuately, KVM doesn't have
2135          * KVM_MP_STATE_SHUTDOWN and can't add it without potentially breaking
2136          * userspace.  At a platform view, INIT is acceptable behavior as
2137          * there exist bare metal platforms that automatically INIT the CPU
2138          * in response to shutdown.
2139          */
2140         clear_page(svm->vmcb);
2141         kvm_vcpu_reset(vcpu, true);
2142
2143         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2144         return 0;
2145 }
2146
2147 static int io_interception(struct kvm_vcpu *vcpu)
2148 {
2149         struct vcpu_svm *svm = to_svm(vcpu);
2150         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2151         int size, in, string;
2152         unsigned port;
2153
2154         ++vcpu->stat.io_exits;
2155         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2156         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2157         port = io_info >> 16;
2158         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2159
2160         if (string) {
2161                 if (sev_es_guest(vcpu->kvm))
2162                         return sev_es_string_io(svm, size, port, in);
2163                 else
2164                         return kvm_emulate_instruction(vcpu, 0);
2165         }
2166
2167         svm->next_rip = svm->vmcb->control.exit_info_2;
2168
2169         return kvm_fast_pio(vcpu, size, port, in);
2170 }
2171
2172 static int nmi_interception(struct kvm_vcpu *vcpu)
2173 {
2174         return 1;
2175 }
2176
2177 static int smi_interception(struct kvm_vcpu *vcpu)
2178 {
2179         return 1;
2180 }
2181
2182 static int intr_interception(struct kvm_vcpu *vcpu)
2183 {
2184         ++vcpu->stat.irq_exits;
2185         return 1;
2186 }
2187
2188 static int vmload_vmsave_interception(struct kvm_vcpu *vcpu, bool vmload)
2189 {
2190         struct vcpu_svm *svm = to_svm(vcpu);
2191         struct vmcb *vmcb12;
2192         struct kvm_host_map map;
2193         int ret;
2194
2195         if (nested_svm_check_permissions(vcpu))
2196                 return 1;
2197
2198         ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
2199         if (ret) {
2200                 if (ret == -EINVAL)
2201                         kvm_inject_gp(vcpu, 0);
2202                 return 1;
2203         }
2204
2205         vmcb12 = map.hva;
2206
2207         ret = kvm_skip_emulated_instruction(vcpu);
2208
2209         if (vmload) {
2210                 svm_copy_vmloadsave_state(svm->vmcb, vmcb12);
2211                 svm->sysenter_eip_hi = 0;
2212                 svm->sysenter_esp_hi = 0;
2213         } else {
2214                 svm_copy_vmloadsave_state(vmcb12, svm->vmcb);
2215         }
2216
2217         kvm_vcpu_unmap(vcpu, &map, true);
2218
2219         return ret;
2220 }
2221
2222 static int vmload_interception(struct kvm_vcpu *vcpu)
2223 {
2224         return vmload_vmsave_interception(vcpu, true);
2225 }
2226
2227 static int vmsave_interception(struct kvm_vcpu *vcpu)
2228 {
2229         return vmload_vmsave_interception(vcpu, false);
2230 }
2231
2232 static int vmrun_interception(struct kvm_vcpu *vcpu)
2233 {
2234         if (nested_svm_check_permissions(vcpu))
2235                 return 1;
2236
2237         return nested_svm_vmrun(vcpu);
2238 }
2239
2240 enum {
2241         NONE_SVM_INSTR,
2242         SVM_INSTR_VMRUN,
2243         SVM_INSTR_VMLOAD,
2244         SVM_INSTR_VMSAVE,
2245 };
2246
2247 /* Return NONE_SVM_INSTR if not SVM instrs, otherwise return decode result */
2248 static int svm_instr_opcode(struct kvm_vcpu *vcpu)
2249 {
2250         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
2251
2252         if (ctxt->b != 0x1 || ctxt->opcode_len != 2)
2253                 return NONE_SVM_INSTR;
2254
2255         switch (ctxt->modrm) {
2256         case 0xd8: /* VMRUN */
2257                 return SVM_INSTR_VMRUN;
2258         case 0xda: /* VMLOAD */
2259                 return SVM_INSTR_VMLOAD;
2260         case 0xdb: /* VMSAVE */
2261                 return SVM_INSTR_VMSAVE;
2262         default:
2263                 break;
2264         }
2265
2266         return NONE_SVM_INSTR;
2267 }
2268
2269 static int emulate_svm_instr(struct kvm_vcpu *vcpu, int opcode)
2270 {
2271         const int guest_mode_exit_codes[] = {
2272                 [SVM_INSTR_VMRUN] = SVM_EXIT_VMRUN,
2273                 [SVM_INSTR_VMLOAD] = SVM_EXIT_VMLOAD,
2274                 [SVM_INSTR_VMSAVE] = SVM_EXIT_VMSAVE,
2275         };
2276         int (*const svm_instr_handlers[])(struct kvm_vcpu *vcpu) = {
2277                 [SVM_INSTR_VMRUN] = vmrun_interception,
2278                 [SVM_INSTR_VMLOAD] = vmload_interception,
2279                 [SVM_INSTR_VMSAVE] = vmsave_interception,
2280         };
2281         struct vcpu_svm *svm = to_svm(vcpu);
2282         int ret;
2283
2284         if (is_guest_mode(vcpu)) {
2285                 /* Returns '1' or -errno on failure, '0' on success. */
2286                 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]);
2287                 if (ret)
2288                         return ret;
2289                 return 1;
2290         }
2291         return svm_instr_handlers[opcode](vcpu);
2292 }
2293
2294 /*
2295  * #GP handling code. Note that #GP can be triggered under the following two
2296  * cases:
2297  *   1) SVM VM-related instructions (VMRUN/VMSAVE/VMLOAD) that trigger #GP on
2298  *      some AMD CPUs when EAX of these instructions are in the reserved memory
2299  *      regions (e.g. SMM memory on host).
2300  *   2) VMware backdoor
2301  */
2302 static int gp_interception(struct kvm_vcpu *vcpu)
2303 {
2304         struct vcpu_svm *svm = to_svm(vcpu);
2305         u32 error_code = svm->vmcb->control.exit_info_1;
2306         int opcode;
2307
2308         /* Both #GP cases have zero error_code */
2309         if (error_code)
2310                 goto reinject;
2311
2312         /* Decode the instruction for usage later */
2313         if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
2314                 goto reinject;
2315
2316         opcode = svm_instr_opcode(vcpu);
2317
2318         if (opcode == NONE_SVM_INSTR) {
2319                 if (!enable_vmware_backdoor)
2320                         goto reinject;
2321
2322                 /*
2323                  * VMware backdoor emulation on #GP interception only handles
2324                  * IN{S}, OUT{S}, and RDPMC.
2325                  */
2326                 if (!is_guest_mode(vcpu))
2327                         return kvm_emulate_instruction(vcpu,
2328                                 EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
2329         } else {
2330                 /* All SVM instructions expect page aligned RAX */
2331                 if (svm->vmcb->save.rax & ~PAGE_MASK)
2332                         goto reinject;
2333
2334                 return emulate_svm_instr(vcpu, opcode);
2335         }
2336
2337 reinject:
2338         kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2339         return 1;
2340 }
2341
2342 void svm_set_gif(struct vcpu_svm *svm, bool value)
2343 {
2344         if (value) {
2345                 /*
2346                  * If VGIF is enabled, the STGI intercept is only added to
2347                  * detect the opening of the SMI/NMI window; remove it now.
2348                  * Likewise, clear the VINTR intercept, we will set it
2349                  * again while processing KVM_REQ_EVENT if needed.
2350                  */
2351                 if (vgif)
2352                         svm_clr_intercept(svm, INTERCEPT_STGI);
2353                 if (svm_is_intercept(svm, INTERCEPT_VINTR))
2354                         svm_clear_vintr(svm);
2355
2356                 enable_gif(svm);
2357                 if (svm->vcpu.arch.smi_pending ||
2358                     svm->vcpu.arch.nmi_pending ||
2359                     kvm_cpu_has_injectable_intr(&svm->vcpu) ||
2360                     kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
2361                         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
2362         } else {
2363                 disable_gif(svm);
2364
2365                 /*
2366                  * After a CLGI no interrupts should come.  But if vGIF is
2367                  * in use, we still rely on the VINTR intercept (rather than
2368                  * STGI) to detect an open interrupt window.
2369                 */
2370                 if (!vgif)
2371                         svm_clear_vintr(svm);
2372         }
2373 }
2374
2375 static int stgi_interception(struct kvm_vcpu *vcpu)
2376 {
2377         int ret;
2378
2379         if (nested_svm_check_permissions(vcpu))
2380                 return 1;
2381
2382         ret = kvm_skip_emulated_instruction(vcpu);
2383         svm_set_gif(to_svm(vcpu), true);
2384         return ret;
2385 }
2386
2387 static int clgi_interception(struct kvm_vcpu *vcpu)
2388 {
2389         int ret;
2390
2391         if (nested_svm_check_permissions(vcpu))
2392                 return 1;
2393
2394         ret = kvm_skip_emulated_instruction(vcpu);
2395         svm_set_gif(to_svm(vcpu), false);
2396         return ret;
2397 }
2398
2399 static int invlpga_interception(struct kvm_vcpu *vcpu)
2400 {
2401         gva_t gva = kvm_rax_read(vcpu);
2402         u32 asid = kvm_rcx_read(vcpu);
2403
2404         /* FIXME: Handle an address size prefix. */
2405         if (!is_long_mode(vcpu))
2406                 gva = (u32)gva;
2407
2408         trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva);
2409
2410         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
2411         kvm_mmu_invlpg(vcpu, gva);
2412
2413         return kvm_skip_emulated_instruction(vcpu);
2414 }
2415
2416 static int skinit_interception(struct kvm_vcpu *vcpu)
2417 {
2418         trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu));
2419
2420         kvm_queue_exception(vcpu, UD_VECTOR);
2421         return 1;
2422 }
2423
2424 static int task_switch_interception(struct kvm_vcpu *vcpu)
2425 {
2426         struct vcpu_svm *svm = to_svm(vcpu);
2427         u16 tss_selector;
2428         int reason;
2429         int int_type = svm->vmcb->control.exit_int_info &
2430                 SVM_EXITINTINFO_TYPE_MASK;
2431         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
2432         uint32_t type =
2433                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
2434         uint32_t idt_v =
2435                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
2436         bool has_error_code = false;
2437         u32 error_code = 0;
2438
2439         tss_selector = (u16)svm->vmcb->control.exit_info_1;
2440
2441         if (svm->vmcb->control.exit_info_2 &
2442             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
2443                 reason = TASK_SWITCH_IRET;
2444         else if (svm->vmcb->control.exit_info_2 &
2445                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
2446                 reason = TASK_SWITCH_JMP;
2447         else if (idt_v)
2448                 reason = TASK_SWITCH_GATE;
2449         else
2450                 reason = TASK_SWITCH_CALL;
2451
2452         if (reason == TASK_SWITCH_GATE) {
2453                 switch (type) {
2454                 case SVM_EXITINTINFO_TYPE_NMI:
2455                         vcpu->arch.nmi_injected = false;
2456                         break;
2457                 case SVM_EXITINTINFO_TYPE_EXEPT:
2458                         if (svm->vmcb->control.exit_info_2 &
2459                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
2460                                 has_error_code = true;
2461                                 error_code =
2462                                         (u32)svm->vmcb->control.exit_info_2;
2463                         }
2464                         kvm_clear_exception_queue(vcpu);
2465                         break;
2466                 case SVM_EXITINTINFO_TYPE_INTR:
2467                 case SVM_EXITINTINFO_TYPE_SOFT:
2468                         kvm_clear_interrupt_queue(vcpu);
2469                         break;
2470                 default:
2471                         break;
2472                 }
2473         }
2474
2475         if (reason != TASK_SWITCH_GATE ||
2476             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
2477             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
2478              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
2479                 if (!svm_skip_emulated_instruction(vcpu))
2480                         return 0;
2481         }
2482
2483         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
2484                 int_vec = -1;
2485
2486         return kvm_task_switch(vcpu, tss_selector, int_vec, reason,
2487                                has_error_code, error_code);
2488 }
2489
2490 static void svm_clr_iret_intercept(struct vcpu_svm *svm)
2491 {
2492         if (!sev_es_guest(svm->vcpu.kvm))
2493                 svm_clr_intercept(svm, INTERCEPT_IRET);
2494 }
2495
2496 static void svm_set_iret_intercept(struct vcpu_svm *svm)
2497 {
2498         if (!sev_es_guest(svm->vcpu.kvm))
2499                 svm_set_intercept(svm, INTERCEPT_IRET);
2500 }
2501
2502 static int iret_interception(struct kvm_vcpu *vcpu)
2503 {
2504         struct vcpu_svm *svm = to_svm(vcpu);
2505
2506         ++vcpu->stat.nmi_window_exits;
2507         svm->awaiting_iret_completion = true;
2508
2509         svm_clr_iret_intercept(svm);
2510         if (!sev_es_guest(vcpu->kvm))
2511                 svm->nmi_iret_rip = kvm_rip_read(vcpu);
2512
2513         kvm_make_request(KVM_REQ_EVENT, vcpu);
2514         return 1;
2515 }
2516
2517 static int invlpg_interception(struct kvm_vcpu *vcpu)
2518 {
2519         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2520                 return kvm_emulate_instruction(vcpu, 0);
2521
2522         kvm_mmu_invlpg(vcpu, to_svm(vcpu)->vmcb->control.exit_info_1);
2523         return kvm_skip_emulated_instruction(vcpu);
2524 }
2525
2526 static int emulate_on_interception(struct kvm_vcpu *vcpu)
2527 {
2528         return kvm_emulate_instruction(vcpu, 0);
2529 }
2530
2531 static int rsm_interception(struct kvm_vcpu *vcpu)
2532 {
2533         return kvm_emulate_instruction_from_buffer(vcpu, rsm_ins_bytes, 2);
2534 }
2535
2536 static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
2537                                             unsigned long val)
2538 {
2539         struct vcpu_svm *svm = to_svm(vcpu);
2540         unsigned long cr0 = vcpu->arch.cr0;
2541         bool ret = false;
2542
2543         if (!is_guest_mode(vcpu) ||
2544             (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
2545                 return false;
2546
2547         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
2548         val &= ~SVM_CR0_SELECTIVE_MASK;
2549
2550         if (cr0 ^ val) {
2551                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
2552                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
2553         }
2554
2555         return ret;
2556 }
2557
2558 #define CR_VALID (1ULL << 63)
2559
2560 static int cr_interception(struct kvm_vcpu *vcpu)
2561 {
2562         struct vcpu_svm *svm = to_svm(vcpu);
2563         int reg, cr;
2564         unsigned long val;
2565         int err;
2566
2567         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
2568                 return emulate_on_interception(vcpu);
2569
2570         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
2571                 return emulate_on_interception(vcpu);
2572
2573         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2574         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
2575                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
2576         else
2577                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
2578
2579         err = 0;
2580         if (cr >= 16) { /* mov to cr */
2581                 cr -= 16;
2582                 val = kvm_register_read(vcpu, reg);
2583                 trace_kvm_cr_write(cr, val);
2584                 switch (cr) {
2585                 case 0:
2586                         if (!check_selective_cr0_intercepted(vcpu, val))
2587                                 err = kvm_set_cr0(vcpu, val);
2588                         else
2589                                 return 1;
2590
2591                         break;
2592                 case 3:
2593                         err = kvm_set_cr3(vcpu, val);
2594                         break;
2595                 case 4:
2596                         err = kvm_set_cr4(vcpu, val);
2597                         break;
2598                 case 8:
2599                         err = kvm_set_cr8(vcpu, val);
2600                         break;
2601                 default:
2602                         WARN(1, "unhandled write to CR%d", cr);
2603                         kvm_queue_exception(vcpu, UD_VECTOR);
2604                         return 1;
2605                 }
2606         } else { /* mov from cr */
2607                 switch (cr) {
2608                 case 0:
2609                         val = kvm_read_cr0(vcpu);
2610                         break;
2611                 case 2:
2612                         val = vcpu->arch.cr2;
2613                         break;
2614                 case 3:
2615                         val = kvm_read_cr3(vcpu);
2616                         break;
2617                 case 4:
2618                         val = kvm_read_cr4(vcpu);
2619                         break;
2620                 case 8:
2621                         val = kvm_get_cr8(vcpu);
2622                         break;
2623                 default:
2624                         WARN(1, "unhandled read from CR%d", cr);
2625                         kvm_queue_exception(vcpu, UD_VECTOR);
2626                         return 1;
2627                 }
2628                 kvm_register_write(vcpu, reg, val);
2629                 trace_kvm_cr_read(cr, val);
2630         }
2631         return kvm_complete_insn_gp(vcpu, err);
2632 }
2633
2634 static int cr_trap(struct kvm_vcpu *vcpu)
2635 {
2636         struct vcpu_svm *svm = to_svm(vcpu);
2637         unsigned long old_value, new_value;
2638         unsigned int cr;
2639         int ret = 0;
2640
2641         new_value = (unsigned long)svm->vmcb->control.exit_info_1;
2642
2643         cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP;
2644         switch (cr) {
2645         case 0:
2646                 old_value = kvm_read_cr0(vcpu);
2647                 svm_set_cr0(vcpu, new_value);
2648
2649                 kvm_post_set_cr0(vcpu, old_value, new_value);
2650                 break;
2651         case 4:
2652                 old_value = kvm_read_cr4(vcpu);
2653                 svm_set_cr4(vcpu, new_value);
2654
2655                 kvm_post_set_cr4(vcpu, old_value, new_value);
2656                 break;
2657         case 8:
2658                 ret = kvm_set_cr8(vcpu, new_value);
2659                 break;
2660         default:
2661                 WARN(1, "unhandled CR%d write trap", cr);
2662                 kvm_queue_exception(vcpu, UD_VECTOR);
2663                 return 1;
2664         }
2665
2666         return kvm_complete_insn_gp(vcpu, ret);
2667 }
2668
2669 static int dr_interception(struct kvm_vcpu *vcpu)
2670 {
2671         struct vcpu_svm *svm = to_svm(vcpu);
2672         int reg, dr;
2673         unsigned long val;
2674         int err = 0;
2675
2676         if (vcpu->guest_debug == 0) {
2677                 /*
2678                  * No more DR vmexits; force a reload of the debug registers
2679                  * and reenter on this instruction.  The next vmexit will
2680                  * retrieve the full state of the debug registers.
2681                  */
2682                 clr_dr_intercepts(svm);
2683                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
2684                 return 1;
2685         }
2686
2687         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
2688                 return emulate_on_interception(vcpu);
2689
2690         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
2691         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
2692         if (dr >= 16) { /* mov to DRn  */
2693                 dr -= 16;
2694                 val = kvm_register_read(vcpu, reg);
2695                 err = kvm_set_dr(vcpu, dr, val);
2696         } else {
2697                 kvm_get_dr(vcpu, dr, &val);
2698                 kvm_register_write(vcpu, reg, val);
2699         }
2700
2701         return kvm_complete_insn_gp(vcpu, err);
2702 }
2703
2704 static int cr8_write_interception(struct kvm_vcpu *vcpu)
2705 {
2706         int r;
2707
2708         u8 cr8_prev = kvm_get_cr8(vcpu);
2709         /* instruction emulation calls kvm_set_cr8() */
2710         r = cr_interception(vcpu);
2711         if (lapic_in_kernel(vcpu))
2712                 return r;
2713         if (cr8_prev <= kvm_get_cr8(vcpu))
2714                 return r;
2715         vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
2716         return 0;
2717 }
2718
2719 static int efer_trap(struct kvm_vcpu *vcpu)
2720 {
2721         struct msr_data msr_info;
2722         int ret;
2723
2724         /*
2725          * Clear the EFER_SVME bit from EFER. The SVM code always sets this
2726          * bit in svm_set_efer(), but __kvm_valid_efer() checks it against
2727          * whether the guest has X86_FEATURE_SVM - this avoids a failure if
2728          * the guest doesn't have X86_FEATURE_SVM.
2729          */
2730         msr_info.host_initiated = false;
2731         msr_info.index = MSR_EFER;
2732         msr_info.data = to_svm(vcpu)->vmcb->control.exit_info_1 & ~EFER_SVME;
2733         ret = kvm_set_msr_common(vcpu, &msr_info);
2734
2735         return kvm_complete_insn_gp(vcpu, ret);
2736 }
2737
2738 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
2739 {
2740         msr->data = 0;
2741
2742         switch (msr->index) {
2743         case MSR_AMD64_DE_CFG:
2744                 if (cpu_feature_enabled(X86_FEATURE_LFENCE_RDTSC))
2745                         msr->data |= MSR_AMD64_DE_CFG_LFENCE_SERIALIZE;
2746                 break;
2747         default:
2748                 return KVM_MSR_RET_INVALID;
2749         }
2750
2751         return 0;
2752 }
2753
2754 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2755 {
2756         struct vcpu_svm *svm = to_svm(vcpu);
2757
2758         switch (msr_info->index) {
2759         case MSR_AMD64_TSC_RATIO:
2760                 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled)
2761                         return 1;
2762                 msr_info->data = svm->tsc_ratio_msr;
2763                 break;
2764         case MSR_STAR:
2765                 msr_info->data = svm->vmcb01.ptr->save.star;
2766                 break;
2767 #ifdef CONFIG_X86_64
2768         case MSR_LSTAR:
2769                 msr_info->data = svm->vmcb01.ptr->save.lstar;
2770                 break;
2771         case MSR_CSTAR:
2772                 msr_info->data = svm->vmcb01.ptr->save.cstar;
2773                 break;
2774         case MSR_KERNEL_GS_BASE:
2775                 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base;
2776                 break;
2777         case MSR_SYSCALL_MASK:
2778                 msr_info->data = svm->vmcb01.ptr->save.sfmask;
2779                 break;
2780 #endif
2781         case MSR_IA32_SYSENTER_CS:
2782                 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs;
2783                 break;
2784         case MSR_IA32_SYSENTER_EIP:
2785                 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip;
2786                 if (guest_cpuid_is_intel(vcpu))
2787                         msr_info->data |= (u64)svm->sysenter_eip_hi << 32;
2788                 break;
2789         case MSR_IA32_SYSENTER_ESP:
2790                 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp;
2791                 if (guest_cpuid_is_intel(vcpu))
2792                         msr_info->data |= (u64)svm->sysenter_esp_hi << 32;
2793                 break;
2794         case MSR_TSC_AUX:
2795                 msr_info->data = svm->tsc_aux;
2796                 break;
2797         case MSR_IA32_DEBUGCTLMSR:
2798         case MSR_IA32_LASTBRANCHFROMIP:
2799         case MSR_IA32_LASTBRANCHTOIP:
2800         case MSR_IA32_LASTINTFROMIP:
2801         case MSR_IA32_LASTINTTOIP:
2802                 msr_info->data = svm_get_lbr_msr(svm, msr_info->index);
2803                 break;
2804         case MSR_VM_HSAVE_PA:
2805                 msr_info->data = svm->nested.hsave_msr;
2806                 break;
2807         case MSR_VM_CR:
2808                 msr_info->data = svm->nested.vm_cr_msr;
2809                 break;
2810         case MSR_IA32_SPEC_CTRL:
2811                 if (!msr_info->host_initiated &&
2812                     !guest_has_spec_ctrl_msr(vcpu))
2813                         return 1;
2814
2815                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2816                         msr_info->data = svm->vmcb->save.spec_ctrl;
2817                 else
2818                         msr_info->data = svm->spec_ctrl;
2819                 break;
2820         case MSR_AMD64_VIRT_SPEC_CTRL:
2821                 if (!msr_info->host_initiated &&
2822                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2823                         return 1;
2824
2825                 msr_info->data = svm->virt_spec_ctrl;
2826                 break;
2827         case MSR_F15H_IC_CFG: {
2828
2829                 int family, model;
2830
2831                 family = guest_cpuid_family(vcpu);
2832                 model  = guest_cpuid_model(vcpu);
2833
2834                 if (family < 0 || model < 0)
2835                         return kvm_get_msr_common(vcpu, msr_info);
2836
2837                 msr_info->data = 0;
2838
2839                 if (family == 0x15 &&
2840                     (model >= 0x2 && model < 0x20))
2841                         msr_info->data = 0x1E;
2842                 }
2843                 break;
2844         case MSR_AMD64_DE_CFG:
2845                 msr_info->data = svm->msr_decfg;
2846                 break;
2847         default:
2848                 return kvm_get_msr_common(vcpu, msr_info);
2849         }
2850         return 0;
2851 }
2852
2853 static int svm_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
2854 {
2855         struct vcpu_svm *svm = to_svm(vcpu);
2856         if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb))
2857                 return kvm_complete_insn_gp(vcpu, err);
2858
2859         ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1);
2860         ghcb_set_sw_exit_info_2(svm->sev_es.ghcb,
2861                                 X86_TRAP_GP |
2862                                 SVM_EVTINJ_TYPE_EXEPT |
2863                                 SVM_EVTINJ_VALID);
2864         return 1;
2865 }
2866
2867 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
2868 {
2869         struct vcpu_svm *svm = to_svm(vcpu);
2870         int svm_dis, chg_mask;
2871
2872         if (data & ~SVM_VM_CR_VALID_MASK)
2873                 return 1;
2874
2875         chg_mask = SVM_VM_CR_VALID_MASK;
2876
2877         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
2878                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
2879
2880         svm->nested.vm_cr_msr &= ~chg_mask;
2881         svm->nested.vm_cr_msr |= (data & chg_mask);
2882
2883         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
2884
2885         /* check for svm_disable while efer.svme is set */
2886         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
2887                 return 1;
2888
2889         return 0;
2890 }
2891
2892 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2893 {
2894         struct vcpu_svm *svm = to_svm(vcpu);
2895         int ret = 0;
2896
2897         u32 ecx = msr->index;
2898         u64 data = msr->data;
2899         switch (ecx) {
2900         case MSR_AMD64_TSC_RATIO:
2901
2902                 if (!svm->tsc_scaling_enabled) {
2903
2904                         if (!msr->host_initiated)
2905                                 return 1;
2906                         /*
2907                          * In case TSC scaling is not enabled, always
2908                          * leave this MSR at the default value.
2909                          *
2910                          * Due to bug in qemu 6.2.0, it would try to set
2911                          * this msr to 0 if tsc scaling is not enabled.
2912                          * Ignore this value as well.
2913                          */
2914                         if (data != 0 && data != svm->tsc_ratio_msr)
2915                                 return 1;
2916                         break;
2917                 }
2918
2919                 if (data & SVM_TSC_RATIO_RSVD)
2920                         return 1;
2921
2922                 svm->tsc_ratio_msr = data;
2923
2924                 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
2925                         nested_svm_update_tsc_ratio_msr(vcpu);
2926
2927                 break;
2928         case MSR_IA32_CR_PAT:
2929                 ret = kvm_set_msr_common(vcpu, msr);
2930                 if (ret)
2931                         break;
2932
2933                 svm->vmcb01.ptr->save.g_pat = data;
2934                 if (is_guest_mode(vcpu))
2935                         nested_vmcb02_compute_g_pat(svm);
2936                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
2937                 break;
2938         case MSR_IA32_SPEC_CTRL:
2939                 if (!msr->host_initiated &&
2940                     !guest_has_spec_ctrl_msr(vcpu))
2941                         return 1;
2942
2943                 if (kvm_spec_ctrl_test_value(data))
2944                         return 1;
2945
2946                 if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
2947                         svm->vmcb->save.spec_ctrl = data;
2948                 else
2949                         svm->spec_ctrl = data;
2950                 if (!data)
2951                         break;
2952
2953                 /*
2954                  * For non-nested:
2955                  * When it's written (to non-zero) for the first time, pass
2956                  * it through.
2957                  *
2958                  * For nested:
2959                  * The handling of the MSR bitmap for L2 guests is done in
2960                  * nested_svm_vmrun_msrpm.
2961                  * We update the L1 MSR bit as well since it will end up
2962                  * touching the MSR anyway now.
2963                  */
2964                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
2965                 break;
2966         case MSR_AMD64_VIRT_SPEC_CTRL:
2967                 if (!msr->host_initiated &&
2968                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
2969                         return 1;
2970
2971                 if (data & ~SPEC_CTRL_SSBD)
2972                         return 1;
2973
2974                 svm->virt_spec_ctrl = data;
2975                 break;
2976         case MSR_STAR:
2977                 svm->vmcb01.ptr->save.star = data;
2978                 break;
2979 #ifdef CONFIG_X86_64
2980         case MSR_LSTAR:
2981                 svm->vmcb01.ptr->save.lstar = data;
2982                 break;
2983         case MSR_CSTAR:
2984                 svm->vmcb01.ptr->save.cstar = data;
2985                 break;
2986         case MSR_KERNEL_GS_BASE:
2987                 svm->vmcb01.ptr->save.kernel_gs_base = data;
2988                 break;
2989         case MSR_SYSCALL_MASK:
2990                 svm->vmcb01.ptr->save.sfmask = data;
2991                 break;
2992 #endif
2993         case MSR_IA32_SYSENTER_CS:
2994                 svm->vmcb01.ptr->save.sysenter_cs = data;
2995                 break;
2996         case MSR_IA32_SYSENTER_EIP:
2997                 svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
2998                 /*
2999                  * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
3000                  * when we spoof an Intel vendor ID (for cross vendor migration).
3001                  * In this case we use this intercept to track the high
3002                  * 32 bit part of these msrs to support Intel's
3003                  * implementation of SYSENTER/SYSEXIT.
3004                  */
3005                 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3006                 break;
3007         case MSR_IA32_SYSENTER_ESP:
3008                 svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
3009                 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
3010                 break;
3011         case MSR_TSC_AUX:
3012                 /*
3013                  * TSC_AUX is usually changed only during boot and never read
3014                  * directly.  Intercept TSC_AUX instead of exposing it to the
3015                  * guest via direct_access_msrs, and switch it via user return.
3016                  */
3017                 preempt_disable();
3018                 ret = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
3019                 preempt_enable();
3020                 if (ret)
3021                         break;
3022
3023                 svm->tsc_aux = data;
3024                 break;
3025         case MSR_IA32_DEBUGCTLMSR:
3026                 if (!lbrv) {
3027                         kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3028                         break;
3029                 }
3030                 if (data & DEBUGCTL_RESERVED_BITS)
3031                         return 1;
3032
3033                 if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK)
3034                         svm->vmcb->save.dbgctl = data;
3035                 else
3036                         svm->vmcb01.ptr->save.dbgctl = data;
3037
3038                 svm_update_lbrv(vcpu);
3039
3040                 break;
3041         case MSR_VM_HSAVE_PA:
3042                 /*
3043                  * Old kernels did not validate the value written to
3044                  * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
3045                  * value to allow live migrating buggy or malicious guests
3046                  * originating from those kernels.
3047                  */
3048                 if (!msr->host_initiated && !page_address_valid(vcpu, data))
3049                         return 1;
3050
3051                 svm->nested.hsave_msr = data & PAGE_MASK;
3052                 break;
3053         case MSR_VM_CR:
3054                 return svm_set_vm_cr(vcpu, data);
3055         case MSR_VM_IGNNE:
3056                 kvm_pr_unimpl_wrmsr(vcpu, ecx, data);
3057                 break;
3058         case MSR_AMD64_DE_CFG: {
3059                 struct kvm_msr_entry msr_entry;
3060
3061                 msr_entry.index = msr->index;
3062                 if (svm_get_msr_feature(&msr_entry))
3063                         return 1;
3064
3065                 /* Check the supported bits */
3066                 if (data & ~msr_entry.data)
3067                         return 1;
3068
3069                 /* Don't allow the guest to change a bit, #GP */
3070                 if (!msr->host_initiated && (data ^ msr_entry.data))
3071                         return 1;
3072
3073                 svm->msr_decfg = data;
3074                 break;
3075         }
3076         default:
3077                 return kvm_set_msr_common(vcpu, msr);
3078         }
3079         return ret;
3080 }
3081
3082 static int msr_interception(struct kvm_vcpu *vcpu)
3083 {
3084         if (to_svm(vcpu)->vmcb->control.exit_info_1)
3085                 return kvm_emulate_wrmsr(vcpu);
3086         else
3087                 return kvm_emulate_rdmsr(vcpu);
3088 }
3089
3090 static int interrupt_window_interception(struct kvm_vcpu *vcpu)
3091 {
3092         kvm_make_request(KVM_REQ_EVENT, vcpu);
3093         svm_clear_vintr(to_svm(vcpu));
3094
3095         /*
3096          * If not running nested, for AVIC, the only reason to end up here is ExtINTs.
3097          * In this case AVIC was temporarily disabled for
3098          * requesting the IRQ window and we have to re-enable it.
3099          *
3100          * If running nested, still remove the VM wide AVIC inhibit to
3101          * support case in which the interrupt window was requested when the
3102          * vCPU was not running nested.
3103
3104          * All vCPUs which run still run nested, will remain to have their
3105          * AVIC still inhibited due to per-cpu AVIC inhibition.
3106          */
3107         kvm_clear_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3108
3109         ++vcpu->stat.irq_window_exits;
3110         return 1;
3111 }
3112
3113 static int pause_interception(struct kvm_vcpu *vcpu)
3114 {
3115         bool in_kernel;
3116         /*
3117          * CPL is not made available for an SEV-ES guest, therefore
3118          * vcpu->arch.preempted_in_kernel can never be true.  Just
3119          * set in_kernel to false as well.
3120          */
3121         in_kernel = !sev_es_guest(vcpu->kvm) && svm_get_cpl(vcpu) == 0;
3122
3123         grow_ple_window(vcpu);
3124
3125         kvm_vcpu_on_spin(vcpu, in_kernel);
3126         return kvm_skip_emulated_instruction(vcpu);
3127 }
3128
3129 static int invpcid_interception(struct kvm_vcpu *vcpu)
3130 {
3131         struct vcpu_svm *svm = to_svm(vcpu);
3132         unsigned long type;
3133         gva_t gva;
3134
3135         if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
3136                 kvm_queue_exception(vcpu, UD_VECTOR);
3137                 return 1;
3138         }
3139
3140         /*
3141          * For an INVPCID intercept:
3142          * EXITINFO1 provides the linear address of the memory operand.
3143          * EXITINFO2 provides the contents of the register operand.
3144          */
3145         type = svm->vmcb->control.exit_info_2;
3146         gva = svm->vmcb->control.exit_info_1;
3147
3148         return kvm_handle_invpcid(vcpu, type, gva);
3149 }
3150
3151 static int (*const svm_exit_handlers[])(struct kvm_vcpu *vcpu) = {
3152         [SVM_EXIT_READ_CR0]                     = cr_interception,
3153         [SVM_EXIT_READ_CR3]                     = cr_interception,
3154         [SVM_EXIT_READ_CR4]                     = cr_interception,
3155         [SVM_EXIT_READ_CR8]                     = cr_interception,
3156         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
3157         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
3158         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
3159         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
3160         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
3161         [SVM_EXIT_READ_DR0]                     = dr_interception,
3162         [SVM_EXIT_READ_DR1]                     = dr_interception,
3163         [SVM_EXIT_READ_DR2]                     = dr_interception,
3164         [SVM_EXIT_READ_DR3]                     = dr_interception,
3165         [SVM_EXIT_READ_DR4]                     = dr_interception,
3166         [SVM_EXIT_READ_DR5]                     = dr_interception,
3167         [SVM_EXIT_READ_DR6]                     = dr_interception,
3168         [SVM_EXIT_READ_DR7]                     = dr_interception,
3169         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
3170         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
3171         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
3172         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
3173         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
3174         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
3175         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
3176         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
3177         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
3178         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
3179         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
3180         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
3181         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
3182         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
3183         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
3184         [SVM_EXIT_INTR]                         = intr_interception,
3185         [SVM_EXIT_NMI]                          = nmi_interception,
3186         [SVM_EXIT_SMI]                          = smi_interception,
3187         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
3188         [SVM_EXIT_RDPMC]                        = kvm_emulate_rdpmc,
3189         [SVM_EXIT_CPUID]                        = kvm_emulate_cpuid,
3190         [SVM_EXIT_IRET]                         = iret_interception,
3191         [SVM_EXIT_INVD]                         = kvm_emulate_invd,
3192         [SVM_EXIT_PAUSE]                        = pause_interception,
3193         [SVM_EXIT_HLT]                          = kvm_emulate_halt,
3194         [SVM_EXIT_INVLPG]                       = invlpg_interception,
3195         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
3196         [SVM_EXIT_IOIO]                         = io_interception,
3197         [SVM_EXIT_MSR]                          = msr_interception,
3198         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
3199         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
3200         [SVM_EXIT_VMRUN]                        = vmrun_interception,
3201         [SVM_EXIT_VMMCALL]                      = kvm_emulate_hypercall,
3202         [SVM_EXIT_VMLOAD]                       = vmload_interception,
3203         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
3204         [SVM_EXIT_STGI]                         = stgi_interception,
3205         [SVM_EXIT_CLGI]                         = clgi_interception,
3206         [SVM_EXIT_SKINIT]                       = skinit_interception,
3207         [SVM_EXIT_RDTSCP]                       = kvm_handle_invalid_op,
3208         [SVM_EXIT_WBINVD]                       = kvm_emulate_wbinvd,
3209         [SVM_EXIT_MONITOR]                      = kvm_emulate_monitor,
3210         [SVM_EXIT_MWAIT]                        = kvm_emulate_mwait,
3211         [SVM_EXIT_XSETBV]                       = kvm_emulate_xsetbv,
3212         [SVM_EXIT_RDPRU]                        = kvm_handle_invalid_op,
3213         [SVM_EXIT_EFER_WRITE_TRAP]              = efer_trap,
3214         [SVM_EXIT_CR0_WRITE_TRAP]               = cr_trap,
3215         [SVM_EXIT_CR4_WRITE_TRAP]               = cr_trap,
3216         [SVM_EXIT_CR8_WRITE_TRAP]               = cr_trap,
3217         [SVM_EXIT_INVPCID]                      = invpcid_interception,
3218         [SVM_EXIT_NPF]                          = npf_interception,
3219         [SVM_EXIT_RSM]                          = rsm_interception,
3220         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
3221         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
3222         [SVM_EXIT_VMGEXIT]                      = sev_handle_vmgexit,
3223 };
3224
3225 static void dump_vmcb(struct kvm_vcpu *vcpu)
3226 {
3227         struct vcpu_svm *svm = to_svm(vcpu);
3228         struct vmcb_control_area *control = &svm->vmcb->control;
3229         struct vmcb_save_area *save = &svm->vmcb->save;
3230         struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save;
3231
3232         if (!dump_invalid_vmcb) {
3233                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
3234                 return;
3235         }
3236
3237         pr_err("VMCB %p, last attempted VMRUN on CPU %d\n",
3238                svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu);
3239         pr_err("VMCB Control Area:\n");
3240         pr_err("%-20s%04x\n", "cr_read:", control->intercepts[INTERCEPT_CR] & 0xffff);
3241         pr_err("%-20s%04x\n", "cr_write:", control->intercepts[INTERCEPT_CR] >> 16);
3242         pr_err("%-20s%04x\n", "dr_read:", control->intercepts[INTERCEPT_DR] & 0xffff);
3243         pr_err("%-20s%04x\n", "dr_write:", control->intercepts[INTERCEPT_DR] >> 16);
3244         pr_err("%-20s%08x\n", "exceptions:", control->intercepts[INTERCEPT_EXCEPTION]);
3245         pr_err("%-20s%08x %08x\n", "intercepts:",
3246               control->intercepts[INTERCEPT_WORD3],
3247                control->intercepts[INTERCEPT_WORD4]);
3248         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
3249         pr_err("%-20s%d\n", "pause filter threshold:",
3250                control->pause_filter_thresh);
3251         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
3252         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
3253         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
3254         pr_err("%-20s%d\n", "asid:", control->asid);
3255         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
3256         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
3257         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
3258         pr_err("%-20s%08x\n", "int_state:", control->int_state);
3259         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
3260         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
3261         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
3262         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
3263         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
3264         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
3265         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
3266         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
3267         pr_err("%-20s%016llx\n", "ghcb:", control->ghcb_gpa);
3268         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
3269         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
3270         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
3271         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
3272         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
3273         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
3274         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
3275         pr_err("%-20s%016llx\n", "vmsa_pa:", control->vmsa_pa);
3276         pr_err("VMCB State Save Area:\n");
3277         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3278                "es:",
3279                save->es.selector, save->es.attrib,
3280                save->es.limit, save->es.base);
3281         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3282                "cs:",
3283                save->cs.selector, save->cs.attrib,
3284                save->cs.limit, save->cs.base);
3285         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3286                "ss:",
3287                save->ss.selector, save->ss.attrib,
3288                save->ss.limit, save->ss.base);
3289         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3290                "ds:",
3291                save->ds.selector, save->ds.attrib,
3292                save->ds.limit, save->ds.base);
3293         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3294                "fs:",
3295                save01->fs.selector, save01->fs.attrib,
3296                save01->fs.limit, save01->fs.base);
3297         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3298                "gs:",
3299                save01->gs.selector, save01->gs.attrib,
3300                save01->gs.limit, save01->gs.base);
3301         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3302                "gdtr:",
3303                save->gdtr.selector, save->gdtr.attrib,
3304                save->gdtr.limit, save->gdtr.base);
3305         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3306                "ldtr:",
3307                save01->ldtr.selector, save01->ldtr.attrib,
3308                save01->ldtr.limit, save01->ldtr.base);
3309         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3310                "idtr:",
3311                save->idtr.selector, save->idtr.attrib,
3312                save->idtr.limit, save->idtr.base);
3313         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
3314                "tr:",
3315                save01->tr.selector, save01->tr.attrib,
3316                save01->tr.limit, save01->tr.base);
3317         pr_err("vmpl: %d   cpl:  %d               efer:          %016llx\n",
3318                save->vmpl, save->cpl, save->efer);
3319         pr_err("%-15s %016llx %-13s %016llx\n",
3320                "cr0:", save->cr0, "cr2:", save->cr2);
3321         pr_err("%-15s %016llx %-13s %016llx\n",
3322                "cr3:", save->cr3, "cr4:", save->cr4);
3323         pr_err("%-15s %016llx %-13s %016llx\n",
3324                "dr6:", save->dr6, "dr7:", save->dr7);
3325         pr_err("%-15s %016llx %-13s %016llx\n",
3326                "rip:", save->rip, "rflags:", save->rflags);
3327         pr_err("%-15s %016llx %-13s %016llx\n",
3328                "rsp:", save->rsp, "rax:", save->rax);
3329         pr_err("%-15s %016llx %-13s %016llx\n",
3330                "star:", save01->star, "lstar:", save01->lstar);
3331         pr_err("%-15s %016llx %-13s %016llx\n",
3332                "cstar:", save01->cstar, "sfmask:", save01->sfmask);
3333         pr_err("%-15s %016llx %-13s %016llx\n",
3334                "kernel_gs_base:", save01->kernel_gs_base,
3335                "sysenter_cs:", save01->sysenter_cs);
3336         pr_err("%-15s %016llx %-13s %016llx\n",
3337                "sysenter_esp:", save01->sysenter_esp,
3338                "sysenter_eip:", save01->sysenter_eip);
3339         pr_err("%-15s %016llx %-13s %016llx\n",
3340                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
3341         pr_err("%-15s %016llx %-13s %016llx\n",
3342                "br_from:", save->br_from, "br_to:", save->br_to);
3343         pr_err("%-15s %016llx %-13s %016llx\n",
3344                "excp_from:", save->last_excp_from,
3345                "excp_to:", save->last_excp_to);
3346 }
3347
3348 static bool svm_check_exit_valid(u64 exit_code)
3349 {
3350         return (exit_code < ARRAY_SIZE(svm_exit_handlers) &&
3351                 svm_exit_handlers[exit_code]);
3352 }
3353
3354 static int svm_handle_invalid_exit(struct kvm_vcpu *vcpu, u64 exit_code)
3355 {
3356         vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%llx\n", exit_code);
3357         dump_vmcb(vcpu);
3358         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3359         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
3360         vcpu->run->internal.ndata = 2;
3361         vcpu->run->internal.data[0] = exit_code;
3362         vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
3363         return 0;
3364 }
3365
3366 int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code)
3367 {
3368         if (!svm_check_exit_valid(exit_code))
3369                 return svm_handle_invalid_exit(vcpu, exit_code);
3370
3371 #ifdef CONFIG_RETPOLINE
3372         if (exit_code == SVM_EXIT_MSR)
3373                 return msr_interception(vcpu);
3374         else if (exit_code == SVM_EXIT_VINTR)
3375                 return interrupt_window_interception(vcpu);
3376         else if (exit_code == SVM_EXIT_INTR)
3377                 return intr_interception(vcpu);
3378         else if (exit_code == SVM_EXIT_HLT)
3379                 return kvm_emulate_halt(vcpu);
3380         else if (exit_code == SVM_EXIT_NPF)
3381                 return npf_interception(vcpu);
3382 #endif
3383         return svm_exit_handlers[exit_code](vcpu);
3384 }
3385
3386 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u32 *reason,
3387                               u64 *info1, u64 *info2,
3388                               u32 *intr_info, u32 *error_code)
3389 {
3390         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3391
3392         *reason = control->exit_code;
3393         *info1 = control->exit_info_1;
3394         *info2 = control->exit_info_2;
3395         *intr_info = control->exit_int_info;
3396         if ((*intr_info & SVM_EXITINTINFO_VALID) &&
3397             (*intr_info & SVM_EXITINTINFO_VALID_ERR))
3398                 *error_code = control->exit_int_info_err;
3399         else
3400                 *error_code = 0;
3401 }
3402
3403 static int svm_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
3404 {
3405         struct vcpu_svm *svm = to_svm(vcpu);
3406         struct kvm_run *kvm_run = vcpu->run;
3407         u32 exit_code = svm->vmcb->control.exit_code;
3408
3409         /* SEV-ES guests must use the CR write traps to track CR registers. */
3410         if (!sev_es_guest(vcpu->kvm)) {
3411                 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE))
3412                         vcpu->arch.cr0 = svm->vmcb->save.cr0;
3413                 if (npt_enabled)
3414                         vcpu->arch.cr3 = svm->vmcb->save.cr3;
3415         }
3416
3417         if (is_guest_mode(vcpu)) {
3418                 int vmexit;
3419
3420                 trace_kvm_nested_vmexit(vcpu, KVM_ISA_SVM);
3421
3422                 vmexit = nested_svm_exit_special(svm);
3423
3424                 if (vmexit == NESTED_EXIT_CONTINUE)
3425                         vmexit = nested_svm_exit_handled(svm);
3426
3427                 if (vmexit == NESTED_EXIT_DONE)
3428                         return 1;
3429         }
3430
3431         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
3432                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3433                 kvm_run->fail_entry.hardware_entry_failure_reason
3434                         = svm->vmcb->control.exit_code;
3435                 kvm_run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu;
3436                 dump_vmcb(vcpu);
3437                 return 0;
3438         }
3439
3440         if (exit_fastpath != EXIT_FASTPATH_NONE)
3441                 return 1;
3442
3443         return svm_invoke_exit_handler(vcpu, exit_code);
3444 }
3445
3446 static void pre_svm_run(struct kvm_vcpu *vcpu)
3447 {
3448         struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, vcpu->cpu);
3449         struct vcpu_svm *svm = to_svm(vcpu);
3450
3451         /*
3452          * If the previous vmrun of the vmcb occurred on a different physical
3453          * cpu, then mark the vmcb dirty and assign a new asid.  Hardware's
3454          * vmcb clean bits are per logical CPU, as are KVM's asid assignments.
3455          */
3456         if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) {
3457                 svm->current_vmcb->asid_generation = 0;
3458                 vmcb_mark_all_dirty(svm->vmcb);
3459                 svm->current_vmcb->cpu = vcpu->cpu;
3460         }
3461
3462         if (sev_guest(vcpu->kvm))
3463                 return pre_sev_run(svm, vcpu->cpu);
3464
3465         /* FIXME: handle wraparound of asid_generation */
3466         if (svm->current_vmcb->asid_generation != sd->asid_generation)
3467                 new_asid(svm, sd);
3468 }
3469
3470 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
3471 {
3472         struct vcpu_svm *svm = to_svm(vcpu);
3473
3474         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
3475
3476         if (svm->nmi_l1_to_l2)
3477                 return;
3478
3479         svm->nmi_masked = true;
3480         svm_set_iret_intercept(svm);
3481         ++vcpu->stat.nmi_injections;
3482 }
3483
3484 static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
3485 {
3486         struct vcpu_svm *svm = to_svm(vcpu);
3487
3488         if (!is_vnmi_enabled(svm))
3489                 return false;
3490
3491         return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
3492 }
3493
3494 static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
3495 {
3496         struct vcpu_svm *svm = to_svm(vcpu);
3497
3498         if (!is_vnmi_enabled(svm))
3499                 return false;
3500
3501         if (svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK)
3502                 return false;
3503
3504         svm->vmcb->control.int_ctl |= V_NMI_PENDING_MASK;
3505         vmcb_mark_dirty(svm->vmcb, VMCB_INTR);
3506
3507         /*
3508          * Because the pending NMI is serviced by hardware, KVM can't know when
3509          * the NMI is "injected", but for all intents and purposes, passing the
3510          * NMI off to hardware counts as injection.
3511          */
3512         ++vcpu->stat.nmi_injections;
3513
3514         return true;
3515 }
3516
3517 static void svm_inject_irq(struct kvm_vcpu *vcpu, bool reinjected)
3518 {
3519         struct vcpu_svm *svm = to_svm(vcpu);
3520         u32 type;
3521
3522         if (vcpu->arch.interrupt.soft) {
3523                 if (svm_update_soft_interrupt_rip(vcpu))
3524                         return;
3525
3526                 type = SVM_EVTINJ_TYPE_SOFT;
3527         } else {
3528                 type = SVM_EVTINJ_TYPE_INTR;
3529         }
3530
3531         trace_kvm_inj_virq(vcpu->arch.interrupt.nr,
3532                            vcpu->arch.interrupt.soft, reinjected);
3533         ++vcpu->stat.irq_injections;
3534
3535         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
3536                                        SVM_EVTINJ_VALID | type;
3537 }
3538
3539 void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
3540                                      int trig_mode, int vector)
3541 {
3542         /*
3543          * apic->apicv_active must be read after vcpu->mode.
3544          * Pairs with smp_store_release in vcpu_enter_guest.
3545          */
3546         bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
3547
3548         /* Note, this is called iff the local APIC is in-kernel. */
3549         if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
3550                 /* Process the interrupt via kvm_check_and_inject_events(). */
3551                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3552                 kvm_vcpu_kick(vcpu);
3553                 return;
3554         }
3555
3556         trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
3557         if (in_guest_mode) {
3558                 /*
3559                  * Signal the doorbell to tell hardware to inject the IRQ.  If
3560                  * the vCPU exits the guest before the doorbell chimes, hardware
3561                  * will automatically process AVIC interrupts at the next VMRUN.
3562                  */
3563                 avic_ring_doorbell(vcpu);
3564         } else {
3565                 /*
3566                  * Wake the vCPU if it was blocking.  KVM will then detect the
3567                  * pending IRQ when checking if the vCPU has a wake event.
3568                  */
3569                 kvm_vcpu_wake_up(vcpu);
3570         }
3571 }
3572
3573 static void svm_deliver_interrupt(struct kvm_lapic *apic,  int delivery_mode,
3574                                   int trig_mode, int vector)
3575 {
3576         kvm_lapic_set_irr(vector, apic);
3577
3578         /*
3579          * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
3580          * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
3581          * the read of guest_mode.  This guarantees that either VMRUN will see
3582          * and process the new vIRR entry, or that svm_complete_interrupt_delivery
3583          * will signal the doorbell if the CPU has already entered the guest.
3584          */
3585         smp_mb__after_atomic();
3586         svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
3587 }
3588
3589 static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3590 {
3591         struct vcpu_svm *svm = to_svm(vcpu);
3592
3593         /*
3594          * SEV-ES guests must always keep the CR intercepts cleared. CR
3595          * tracking is done using the CR write traps.
3596          */
3597         if (sev_es_guest(vcpu->kvm))
3598                 return;
3599
3600         if (nested_svm_virtualize_tpr(vcpu))
3601                 return;
3602
3603         svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3604
3605         if (irr == -1)
3606                 return;
3607
3608         if (tpr >= irr)
3609                 svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
3610 }
3611
3612 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
3613 {
3614         struct vcpu_svm *svm = to_svm(vcpu);
3615
3616         if (is_vnmi_enabled(svm))
3617                 return svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK;
3618         else
3619                 return svm->nmi_masked;
3620 }
3621
3622 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
3623 {
3624         struct vcpu_svm *svm = to_svm(vcpu);
3625
3626         if (is_vnmi_enabled(svm)) {
3627                 if (masked)
3628                         svm->vmcb->control.int_ctl |= V_NMI_BLOCKING_MASK;
3629                 else
3630                         svm->vmcb->control.int_ctl &= ~V_NMI_BLOCKING_MASK;
3631
3632         } else {
3633                 svm->nmi_masked = masked;
3634                 if (masked)
3635                         svm_set_iret_intercept(svm);
3636                 else
3637                         svm_clr_iret_intercept(svm);
3638         }
3639 }
3640
3641 bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
3642 {
3643         struct vcpu_svm *svm = to_svm(vcpu);
3644         struct vmcb *vmcb = svm->vmcb;
3645
3646         if (!gif_set(svm))
3647                 return true;
3648
3649         if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3650                 return false;
3651
3652         if (svm_get_nmi_mask(vcpu))
3653                 return true;
3654
3655         return vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK;
3656 }
3657
3658 static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3659 {
3660         struct vcpu_svm *svm = to_svm(vcpu);
3661         if (svm->nested.nested_run_pending)
3662                 return -EBUSY;
3663
3664         if (svm_nmi_blocked(vcpu))
3665                 return 0;
3666
3667         /* An NMI must not be injected into L2 if it's supposed to VM-Exit.  */
3668         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
3669                 return -EBUSY;
3670         return 1;
3671 }
3672
3673 bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
3674 {
3675         struct vcpu_svm *svm = to_svm(vcpu);
3676         struct vmcb *vmcb = svm->vmcb;
3677
3678         if (!gif_set(svm))
3679                 return true;
3680
3681         if (is_guest_mode(vcpu)) {
3682                 /* As long as interrupts are being delivered...  */
3683                 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK)
3684                     ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF)
3685                     : !(kvm_get_rflags(vcpu) & X86_EFLAGS_IF))
3686                         return true;
3687
3688                 /* ... vmexits aren't blocked by the interrupt shadow  */
3689                 if (nested_exit_on_intr(svm))
3690                         return false;
3691         } else {
3692                 if (!svm_get_if_flag(vcpu))
3693                         return true;
3694         }
3695
3696         return (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK);
3697 }
3698
3699 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
3700 {
3701         struct vcpu_svm *svm = to_svm(vcpu);
3702
3703         if (svm->nested.nested_run_pending)
3704                 return -EBUSY;
3705
3706         if (svm_interrupt_blocked(vcpu))
3707                 return 0;
3708
3709         /*
3710          * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
3711          * e.g. if the IRQ arrived asynchronously after checking nested events.
3712          */
3713         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
3714                 return -EBUSY;
3715
3716         return 1;
3717 }
3718
3719 static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
3720 {
3721         struct vcpu_svm *svm = to_svm(vcpu);
3722
3723         /*
3724          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
3725          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
3726          * get that intercept, this function will be called again though and
3727          * we'll get the vintr intercept. However, if the vGIF feature is
3728          * enabled, the STGI interception will not occur. Enable the irq
3729          * window under the assumption that the hardware will set the GIF.
3730          */
3731         if (vgif || gif_set(svm)) {
3732                 /*
3733                  * IRQ window is not needed when AVIC is enabled,
3734                  * unless we have pending ExtINT since it cannot be injected
3735                  * via AVIC. In such case, KVM needs to temporarily disable AVIC,
3736                  * and fallback to injecting IRQ via V_IRQ.
3737                  *
3738                  * If running nested, AVIC is already locally inhibited
3739                  * on this vCPU, therefore there is no need to request
3740                  * the VM wide AVIC inhibition.
3741                  */
3742                 if (!is_guest_mode(vcpu))
3743                         kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_IRQWIN);
3744
3745                 svm_set_vintr(svm);
3746         }
3747 }
3748
3749 static void svm_enable_nmi_window(struct kvm_vcpu *vcpu)
3750 {
3751         struct vcpu_svm *svm = to_svm(vcpu);
3752
3753         /*
3754          * KVM should never request an NMI window when vNMI is enabled, as KVM
3755          * allows at most one to-be-injected NMI and one pending NMI, i.e. if
3756          * two NMIs arrive simultaneously, KVM will inject one and set
3757          * V_NMI_PENDING for the other.  WARN, but continue with the standard
3758          * single-step approach to try and salvage the pending NMI.
3759          */
3760         WARN_ON_ONCE(is_vnmi_enabled(svm));
3761
3762         if (svm_get_nmi_mask(vcpu) && !svm->awaiting_iret_completion)
3763                 return; /* IRET will cause a vm exit */
3764
3765         if (!gif_set(svm)) {
3766                 if (vgif)
3767                         svm_set_intercept(svm, INTERCEPT_STGI);
3768                 return; /* STGI will cause a vm exit */
3769         }
3770
3771         /*
3772          * Something prevents NMI from been injected. Single step over possible
3773          * problem (IRET or exception injection or interrupt shadow)
3774          */
3775         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
3776         svm->nmi_singlestep = true;
3777         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3778 }
3779
3780 static void svm_flush_tlb_asid(struct kvm_vcpu *vcpu)
3781 {
3782         struct vcpu_svm *svm = to_svm(vcpu);
3783
3784         /*
3785          * Unlike VMX, SVM doesn't provide a way to flush only NPT TLB entries.
3786          * A TLB flush for the current ASID flushes both "host" and "guest" TLB
3787          * entries, and thus is a superset of Hyper-V's fine grained flushing.
3788          */
3789         kvm_hv_vcpu_purge_flush_tlb(vcpu);
3790
3791         /*
3792          * Flush only the current ASID even if the TLB flush was invoked via
3793          * kvm_flush_remote_tlbs().  Although flushing remote TLBs requires all
3794          * ASIDs to be flushed, KVM uses a single ASID for L1 and L2, and
3795          * unconditionally does a TLB flush on both nested VM-Enter and nested
3796          * VM-Exit (via kvm_mmu_reset_context()).
3797          */
3798         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
3799                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
3800         else
3801                 svm->current_vmcb->asid_generation--;
3802 }
3803
3804 static void svm_flush_tlb_current(struct kvm_vcpu *vcpu)
3805 {
3806         hpa_t root_tdp = vcpu->arch.mmu->root.hpa;
3807
3808         /*
3809          * When running on Hyper-V with EnlightenedNptTlb enabled, explicitly
3810          * flush the NPT mappings via hypercall as flushing the ASID only
3811          * affects virtual to physical mappings, it does not invalidate guest
3812          * physical to host physical mappings.
3813          */
3814         if (svm_hv_is_enlightened_tlb_enabled(vcpu) && VALID_PAGE(root_tdp))
3815                 hyperv_flush_guest_mapping(root_tdp);
3816
3817         svm_flush_tlb_asid(vcpu);
3818 }
3819
3820 static void svm_flush_tlb_all(struct kvm_vcpu *vcpu)
3821 {
3822         /*
3823          * When running on Hyper-V with EnlightenedNptTlb enabled, remote TLB
3824          * flushes should be routed to hv_flush_remote_tlbs() without requesting
3825          * a "regular" remote flush.  Reaching this point means either there's
3826          * a KVM bug or a prior hv_flush_remote_tlbs() call failed, both of
3827          * which might be fatal to the guest.  Yell, but try to recover.
3828          */
3829         if (WARN_ON_ONCE(svm_hv_is_enlightened_tlb_enabled(vcpu)))
3830                 hv_flush_remote_tlbs(vcpu->kvm);
3831
3832         svm_flush_tlb_asid(vcpu);
3833 }
3834
3835 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
3836 {
3837         struct vcpu_svm *svm = to_svm(vcpu);
3838
3839         invlpga(gva, svm->vmcb->control.asid);
3840 }
3841
3842 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
3843 {
3844         struct vcpu_svm *svm = to_svm(vcpu);
3845
3846         if (nested_svm_virtualize_tpr(vcpu))
3847                 return;
3848
3849         if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) {
3850                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
3851                 kvm_set_cr8(vcpu, cr8);
3852         }
3853 }
3854
3855 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
3856 {
3857         struct vcpu_svm *svm = to_svm(vcpu);
3858         u64 cr8;
3859
3860         if (nested_svm_virtualize_tpr(vcpu) ||
3861             kvm_vcpu_apicv_active(vcpu))
3862                 return;
3863
3864         cr8 = kvm_get_cr8(vcpu);
3865         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
3866         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
3867 }
3868
3869 static void svm_complete_soft_interrupt(struct kvm_vcpu *vcpu, u8 vector,
3870                                         int type)
3871 {
3872         bool is_exception = (type == SVM_EXITINTINFO_TYPE_EXEPT);
3873         bool is_soft = (type == SVM_EXITINTINFO_TYPE_SOFT);
3874         struct vcpu_svm *svm = to_svm(vcpu);
3875
3876         /*
3877          * If NRIPS is enabled, KVM must snapshot the pre-VMRUN next_rip that's
3878          * associated with the original soft exception/interrupt.  next_rip is
3879          * cleared on all exits that can occur while vectoring an event, so KVM
3880          * needs to manually set next_rip for re-injection.  Unlike the !nrips
3881          * case below, this needs to be done if and only if KVM is re-injecting
3882          * the same event, i.e. if the event is a soft exception/interrupt,
3883          * otherwise next_rip is unused on VMRUN.
3884          */
3885         if (nrips && (is_soft || (is_exception && kvm_exception_is_soft(vector))) &&
3886             kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase))
3887                 svm->vmcb->control.next_rip = svm->soft_int_next_rip;
3888         /*
3889          * If NRIPS isn't enabled, KVM must manually advance RIP prior to
3890          * injecting the soft exception/interrupt.  That advancement needs to
3891          * be unwound if vectoring didn't complete.  Note, the new event may
3892          * not be the injected event, e.g. if KVM injected an INTn, the INTn
3893          * hit a #NP in the guest, and the #NP encountered a #PF, the #NP will
3894          * be the reported vectored event, but RIP still needs to be unwound.
3895          */
3896         else if (!nrips && (is_soft || is_exception) &&
3897                  kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase))
3898                 kvm_rip_write(vcpu, svm->soft_int_old_rip);
3899 }
3900
3901 static void svm_complete_interrupts(struct kvm_vcpu *vcpu)
3902 {
3903         struct vcpu_svm *svm = to_svm(vcpu);
3904         u8 vector;
3905         int type;
3906         u32 exitintinfo = svm->vmcb->control.exit_int_info;
3907         bool nmi_l1_to_l2 = svm->nmi_l1_to_l2;
3908         bool soft_int_injected = svm->soft_int_injected;
3909
3910         svm->nmi_l1_to_l2 = false;
3911         svm->soft_int_injected = false;
3912
3913         /*
3914          * If we've made progress since setting HF_IRET_MASK, we've
3915          * executed an IRET and can allow NMI injection.
3916          */
3917         if (svm->awaiting_iret_completion &&
3918             (sev_es_guest(vcpu->kvm) ||
3919              kvm_rip_read(vcpu) != svm->nmi_iret_rip)) {
3920                 svm->awaiting_iret_completion = false;
3921                 svm->nmi_masked = false;
3922                 kvm_make_request(KVM_REQ_EVENT, vcpu);
3923         }
3924
3925         vcpu->arch.nmi_injected = false;
3926         kvm_clear_exception_queue(vcpu);
3927         kvm_clear_interrupt_queue(vcpu);
3928
3929         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
3930                 return;
3931
3932         kvm_make_request(KVM_REQ_EVENT, vcpu);
3933
3934         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
3935         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
3936
3937         if (soft_int_injected)
3938                 svm_complete_soft_interrupt(vcpu, vector, type);
3939
3940         switch (type) {
3941         case SVM_EXITINTINFO_TYPE_NMI:
3942                 vcpu->arch.nmi_injected = true;
3943                 svm->nmi_l1_to_l2 = nmi_l1_to_l2;
3944                 break;
3945         case SVM_EXITINTINFO_TYPE_EXEPT:
3946                 /*
3947                  * Never re-inject a #VC exception.
3948                  */
3949                 if (vector == X86_TRAP_VC)
3950                         break;
3951
3952                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
3953                         u32 err = svm->vmcb->control.exit_int_info_err;
3954                         kvm_requeue_exception_e(vcpu, vector, err);
3955
3956                 } else
3957                         kvm_requeue_exception(vcpu, vector);
3958                 break;
3959         case SVM_EXITINTINFO_TYPE_INTR:
3960                 kvm_queue_interrupt(vcpu, vector, false);
3961                 break;
3962         case SVM_EXITINTINFO_TYPE_SOFT:
3963                 kvm_queue_interrupt(vcpu, vector, true);
3964                 break;
3965         default:
3966                 break;
3967         }
3968
3969 }
3970
3971 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
3972 {
3973         struct vcpu_svm *svm = to_svm(vcpu);
3974         struct vmcb_control_area *control = &svm->vmcb->control;
3975
3976         control->exit_int_info = control->event_inj;
3977         control->exit_int_info_err = control->event_inj_err;
3978         control->event_inj = 0;
3979         svm_complete_interrupts(vcpu);
3980 }
3981
3982 static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
3983 {
3984         return 1;
3985 }
3986
3987 static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
3988 {
3989         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
3990
3991         /*
3992          * Note, the next RIP must be provided as SRCU isn't held, i.e. KVM
3993          * can't read guest memory (dereference memslots) to decode the WRMSR.
3994          */
3995         if (control->exit_code == SVM_EXIT_MSR && control->exit_info_1 &&
3996             nrips && control->next_rip)
3997                 return handle_fastpath_set_msr_irqoff(vcpu);
3998
3999         return EXIT_FASTPATH_NONE;
4000 }
4001
4002 static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_intercepted)
4003 {
4004         struct vcpu_svm *svm = to_svm(vcpu);
4005
4006         guest_state_enter_irqoff();
4007
4008         if (sev_es_guest(vcpu->kvm))
4009                 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
4010         else
4011                 __svm_vcpu_run(svm, spec_ctrl_intercepted);
4012
4013         guest_state_exit_irqoff();
4014 }
4015
4016 static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
4017 {
4018         struct vcpu_svm *svm = to_svm(vcpu);
4019         bool spec_ctrl_intercepted = msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL);
4020
4021         trace_kvm_entry(vcpu);
4022
4023         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4024         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4025         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4026
4027         /*
4028          * Disable singlestep if we're injecting an interrupt/exception.
4029          * We don't want our modified rflags to be pushed on the stack where
4030          * we might not be able to easily reset them if we disabled NMI
4031          * singlestep later.
4032          */
4033         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
4034                 /*
4035                  * Event injection happens before external interrupts cause a
4036                  * vmexit and interrupts are disabled here, so smp_send_reschedule
4037                  * is enough to force an immediate vmexit.
4038                  */
4039                 disable_nmi_singlestep(svm);
4040                 smp_send_reschedule(vcpu->cpu);
4041         }
4042
4043         pre_svm_run(vcpu);
4044
4045         sync_lapic_to_cr8(vcpu);
4046
4047         if (unlikely(svm->asid != svm->vmcb->control.asid)) {
4048                 svm->vmcb->control.asid = svm->asid;
4049                 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
4050         }
4051         svm->vmcb->save.cr2 = vcpu->arch.cr2;
4052
4053         svm_hv_update_vp_id(svm->vmcb, vcpu);
4054
4055         /*
4056          * Run with all-zero DR6 unless needed, so that we can get the exact cause
4057          * of a #DB.
4058          */
4059         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
4060                 svm_set_dr6(svm, vcpu->arch.dr6);
4061         else
4062                 svm_set_dr6(svm, DR6_ACTIVE_LOW);
4063
4064         clgi();
4065         kvm_load_guest_xsave_state(vcpu);
4066
4067         kvm_wait_lapic_expire(vcpu);
4068
4069         /*
4070          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
4071          * it's non-zero. Since vmentry is serialising on affected CPUs, there
4072          * is no need to worry about the conditional branch over the wrmsr
4073          * being speculatively taken.
4074          */
4075         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4076                 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl);
4077
4078         svm_vcpu_enter_exit(vcpu, spec_ctrl_intercepted);
4079
4080         if (!static_cpu_has(X86_FEATURE_V_SPEC_CTRL))
4081                 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl);
4082
4083         if (!sev_es_guest(vcpu->kvm)) {
4084                 vcpu->arch.cr2 = svm->vmcb->save.cr2;
4085                 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
4086                 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
4087                 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
4088         }
4089         vcpu->arch.regs_dirty = 0;
4090
4091         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4092                 kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
4093
4094         kvm_load_host_xsave_state(vcpu);
4095         stgi();
4096
4097         /* Any pending NMI will happen here */
4098
4099         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
4100                 kvm_after_interrupt(vcpu);
4101
4102         sync_cr8_to_lapic(vcpu);
4103
4104         svm->next_rip = 0;
4105         if (is_guest_mode(vcpu)) {
4106                 nested_sync_control_from_vmcb02(svm);
4107
4108                 /* Track VMRUNs that have made past consistency checking */
4109                 if (svm->nested.nested_run_pending &&
4110                     svm->vmcb->control.exit_code != SVM_EXIT_ERR)
4111                         ++vcpu->stat.nested_run;
4112
4113                 svm->nested.nested_run_pending = 0;
4114         }
4115
4116         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
4117         vmcb_mark_all_clean(svm->vmcb);
4118
4119         /* if exit due to PF check for async PF */
4120         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
4121                 vcpu->arch.apf.host_apf_flags =
4122                         kvm_read_and_reset_apf_flags();
4123
4124         vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
4125
4126         /*
4127          * We need to handle MC intercepts here before the vcpu has a chance to
4128          * change the physical cpu
4129          */
4130         if (unlikely(svm->vmcb->control.exit_code ==
4131                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
4132                 svm_handle_mce(vcpu);
4133
4134         trace_kvm_exit(vcpu, KVM_ISA_SVM);
4135
4136         svm_complete_interrupts(vcpu);
4137
4138         if (is_guest_mode(vcpu))
4139                 return EXIT_FASTPATH_NONE;
4140
4141         return svm_exit_handlers_fastpath(vcpu);
4142 }
4143
4144 static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
4145                              int root_level)
4146 {
4147         struct vcpu_svm *svm = to_svm(vcpu);
4148         unsigned long cr3;
4149
4150         if (npt_enabled) {
4151                 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa);
4152                 vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
4153
4154                 hv_track_root_tdp(vcpu, root_hpa);
4155
4156                 cr3 = vcpu->arch.cr3;
4157         } else if (root_level >= PT64_ROOT_4LEVEL) {
4158                 cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
4159         } else {
4160                 /* PCID in the guest should be impossible with a 32-bit MMU. */
4161                 WARN_ON_ONCE(kvm_get_active_pcid(vcpu));
4162                 cr3 = root_hpa;
4163         }
4164
4165         svm->vmcb->save.cr3 = cr3;
4166         vmcb_mark_dirty(svm->vmcb, VMCB_CR);
4167 }
4168
4169 static void
4170 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4171 {
4172         /*
4173          * Patch in the VMMCALL instruction:
4174          */
4175         hypercall[0] = 0x0f;
4176         hypercall[1] = 0x01;
4177         hypercall[2] = 0xd9;
4178 }
4179
4180 /*
4181  * The kvm parameter can be NULL (module initialization, or invocation before
4182  * VM creation). Be sure to check the kvm parameter before using it.
4183  */
4184 static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
4185 {
4186         switch (index) {
4187         case MSR_IA32_MCG_EXT_CTL:
4188         case KVM_FIRST_EMULATED_VMX_MSR ... KVM_LAST_EMULATED_VMX_MSR:
4189                 return false;
4190         case MSR_IA32_SMBASE:
4191                 if (!IS_ENABLED(CONFIG_KVM_SMM))
4192                         return false;
4193                 /* SEV-ES guests do not support SMM, so report false */
4194                 if (kvm && sev_es_guest(kvm))
4195                         return false;
4196                 break;
4197         default:
4198                 break;
4199         }
4200
4201         return true;
4202 }
4203
4204 static void svm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
4205 {
4206         struct vcpu_svm *svm = to_svm(vcpu);
4207         struct kvm_cpuid_entry2 *best;
4208
4209         vcpu->arch.xsaves_enabled = guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
4210                                     boot_cpu_has(X86_FEATURE_XSAVE) &&
4211                                     boot_cpu_has(X86_FEATURE_XSAVES);
4212
4213         /* Update nrips enabled cache */
4214         svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) &&
4215                              guest_cpuid_has(vcpu, X86_FEATURE_NRIPS);
4216
4217         svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR);
4218         svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV);
4219
4220         svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD);
4221
4222         svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) &&
4223                         guest_cpuid_has(vcpu, X86_FEATURE_PAUSEFILTER);
4224
4225         svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) &&
4226                         guest_cpuid_has(vcpu, X86_FEATURE_PFTHRESHOLD);
4227
4228         svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF);
4229
4230         svm->vnmi_enabled = vnmi && guest_cpuid_has(vcpu, X86_FEATURE_VNMI);
4231
4232         svm_recalc_instruction_intercepts(vcpu, svm);
4233
4234         if (boot_cpu_has(X86_FEATURE_IBPB))
4235                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0,
4236                                      !!guest_has_pred_cmd_msr(vcpu));
4237
4238         if (boot_cpu_has(X86_FEATURE_FLUSH_L1D))
4239                 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_FLUSH_CMD, 0,
4240                                      !!guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D));
4241
4242         /* For sev guests, the memory encryption bit is not reserved in CR3.  */
4243         if (sev_guest(vcpu->kvm)) {
4244                 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
4245                 if (best)
4246                         vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
4247         }
4248
4249         init_vmcb_after_set_cpuid(vcpu);
4250 }
4251
4252 static bool svm_has_wbinvd_exit(void)
4253 {
4254         return true;
4255 }
4256
4257 #define PRE_EX(exit)  { .exit_code = (exit), \
4258                         .stage = X86_ICPT_PRE_EXCEPT, }
4259 #define POST_EX(exit) { .exit_code = (exit), \
4260                         .stage = X86_ICPT_POST_EXCEPT, }
4261 #define POST_MEM(exit) { .exit_code = (exit), \
4262                         .stage = X86_ICPT_POST_MEMACCESS, }
4263
4264 static const struct __x86_intercept {
4265         u32 exit_code;
4266         enum x86_intercept_stage stage;
4267 } x86_intercept_map[] = {
4268         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
4269         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
4270         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
4271         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
4272         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
4273         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
4274         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
4275         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
4276         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
4277         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
4278         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
4279         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
4280         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
4281         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
4282         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
4283         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
4284         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
4285         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
4286         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
4287         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
4288         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
4289         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
4290         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
4291         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
4292         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
4293         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
4294         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
4295         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
4296         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
4297         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
4298         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
4299         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
4300         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
4301         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
4302         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
4303         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
4304         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
4305         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
4306         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
4307         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
4308         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
4309         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
4310         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
4311         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
4312         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
4313         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
4314         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
4315 };
4316
4317 #undef PRE_EX
4318 #undef POST_EX
4319 #undef POST_MEM
4320
4321 static int svm_check_intercept(struct kvm_vcpu *vcpu,
4322                                struct x86_instruction_info *info,
4323                                enum x86_intercept_stage stage,
4324                                struct x86_exception *exception)
4325 {
4326         struct vcpu_svm *svm = to_svm(vcpu);
4327         int vmexit, ret = X86EMUL_CONTINUE;
4328         struct __x86_intercept icpt_info;
4329         struct vmcb *vmcb = svm->vmcb;
4330
4331         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
4332                 goto out;
4333
4334         icpt_info = x86_intercept_map[info->intercept];
4335
4336         if (stage != icpt_info.stage)
4337                 goto out;
4338
4339         switch (icpt_info.exit_code) {
4340         case SVM_EXIT_READ_CR0:
4341                 if (info->intercept == x86_intercept_cr_read)
4342                         icpt_info.exit_code += info->modrm_reg;
4343                 break;
4344         case SVM_EXIT_WRITE_CR0: {
4345                 unsigned long cr0, val;
4346
4347                 if (info->intercept == x86_intercept_cr_write)
4348                         icpt_info.exit_code += info->modrm_reg;
4349
4350                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
4351                     info->intercept == x86_intercept_clts)
4352                         break;
4353
4354                 if (!(vmcb12_is_intercept(&svm->nested.ctl,
4355                                         INTERCEPT_SELECTIVE_CR0)))
4356                         break;
4357
4358                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
4359                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
4360
4361                 if (info->intercept == x86_intercept_lmsw) {
4362                         cr0 &= 0xfUL;
4363                         val &= 0xfUL;
4364                         /* lmsw can't clear PE - catch this here */
4365                         if (cr0 & X86_CR0_PE)
4366                                 val |= X86_CR0_PE;
4367                 }
4368
4369                 if (cr0 ^ val)
4370                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
4371
4372                 break;
4373         }
4374         case SVM_EXIT_READ_DR0:
4375         case SVM_EXIT_WRITE_DR0:
4376                 icpt_info.exit_code += info->modrm_reg;
4377                 break;
4378         case SVM_EXIT_MSR:
4379                 if (info->intercept == x86_intercept_wrmsr)
4380                         vmcb->control.exit_info_1 = 1;
4381                 else
4382                         vmcb->control.exit_info_1 = 0;
4383                 break;
4384         case SVM_EXIT_PAUSE:
4385                 /*
4386                  * We get this for NOP only, but pause
4387                  * is rep not, check this here
4388                  */
4389                 if (info->rep_prefix != REPE_PREFIX)
4390                         goto out;
4391                 break;
4392         case SVM_EXIT_IOIO: {
4393                 u64 exit_info;
4394                 u32 bytes;
4395
4396                 if (info->intercept == x86_intercept_in ||
4397                     info->intercept == x86_intercept_ins) {
4398                         exit_info = ((info->src_val & 0xffff) << 16) |
4399                                 SVM_IOIO_TYPE_MASK;
4400                         bytes = info->dst_bytes;
4401                 } else {
4402                         exit_info = (info->dst_val & 0xffff) << 16;
4403                         bytes = info->src_bytes;
4404                 }
4405
4406                 if (info->intercept == x86_intercept_outs ||
4407                     info->intercept == x86_intercept_ins)
4408                         exit_info |= SVM_IOIO_STR_MASK;
4409
4410                 if (info->rep_prefix)
4411                         exit_info |= SVM_IOIO_REP_MASK;
4412
4413                 bytes = min(bytes, 4u);
4414
4415                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
4416
4417                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
4418
4419                 vmcb->control.exit_info_1 = exit_info;
4420                 vmcb->control.exit_info_2 = info->next_rip;
4421
4422                 break;
4423         }
4424         default:
4425                 break;
4426         }
4427
4428         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
4429         if (static_cpu_has(X86_FEATURE_NRIPS))
4430                 vmcb->control.next_rip  = info->next_rip;
4431         vmcb->control.exit_code = icpt_info.exit_code;
4432         vmexit = nested_svm_exit_handled(svm);
4433
4434         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
4435                                            : X86EMUL_CONTINUE;
4436
4437 out:
4438         return ret;
4439 }
4440
4441 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
4442 {
4443         if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR)
4444                 vcpu->arch.at_instruction_boundary = true;
4445 }
4446
4447 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
4448 {
4449         if (!kvm_pause_in_guest(vcpu->kvm))
4450                 shrink_ple_window(vcpu);
4451 }
4452
4453 static void svm_setup_mce(struct kvm_vcpu *vcpu)
4454 {
4455         /* [63:9] are reserved. */
4456         vcpu->arch.mcg_cap &= 0x1ff;
4457 }
4458
4459 #ifdef CONFIG_KVM_SMM
4460 bool svm_smi_blocked(struct kvm_vcpu *vcpu)
4461 {
4462         struct vcpu_svm *svm = to_svm(vcpu);
4463
4464         /* Per APM Vol.2 15.22.2 "Response to SMI" */
4465         if (!gif_set(svm))
4466                 return true;
4467
4468         return is_smm(vcpu);
4469 }
4470
4471 static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
4472 {
4473         struct vcpu_svm *svm = to_svm(vcpu);
4474         if (svm->nested.nested_run_pending)
4475                 return -EBUSY;
4476
4477         if (svm_smi_blocked(vcpu))
4478                 return 0;
4479
4480         /* An SMI must not be injected into L2 if it's supposed to VM-Exit.  */
4481         if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
4482                 return -EBUSY;
4483
4484         return 1;
4485 }
4486
4487 static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
4488 {
4489         struct vcpu_svm *svm = to_svm(vcpu);
4490         struct kvm_host_map map_save;
4491         int ret;
4492
4493         if (!is_guest_mode(vcpu))
4494                 return 0;
4495
4496         /*
4497          * 32-bit SMRAM format doesn't preserve EFER and SVM state.  Userspace is
4498          * responsible for ensuring nested SVM and SMIs are mutually exclusive.
4499          */
4500
4501         if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4502                 return 1;
4503
4504         smram->smram64.svm_guest_flag = 1;
4505         smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
4506
4507         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
4508         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
4509         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
4510
4511         ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW);
4512         if (ret)
4513                 return ret;
4514
4515         /*
4516          * KVM uses VMCB01 to store L1 host state while L2 runs but
4517          * VMCB01 is going to be used during SMM and thus the state will
4518          * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
4519          * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
4520          * format of the area is identical to guest save area offsetted
4521          * by 0x400 (matches the offset of 'struct vmcb_save_area'
4522          * within 'struct vmcb'). Note: HSAVE area may also be used by
4523          * L1 hypervisor to save additional host context (e.g. KVM does
4524          * that, see svm_prepare_switch_to_guest()) which must be
4525          * preserved.
4526          */
4527         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4528                 return 1;
4529
4530         BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
4531
4532         svm_copy_vmrun_state(map_save.hva + 0x400,
4533                              &svm->vmcb01.ptr->save);
4534
4535         kvm_vcpu_unmap(vcpu, &map_save, true);
4536         return 0;
4537 }
4538
4539 static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
4540 {
4541         struct vcpu_svm *svm = to_svm(vcpu);
4542         struct kvm_host_map map, map_save;
4543         struct vmcb *vmcb12;
4544         int ret;
4545
4546         const struct kvm_smram_state_64 *smram64 = &smram->smram64;
4547
4548         if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
4549                 return 0;
4550
4551         /* Non-zero if SMI arrived while vCPU was in guest mode. */
4552         if (!smram64->svm_guest_flag)
4553                 return 0;
4554
4555         if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
4556                 return 1;
4557
4558         if (!(smram64->efer & EFER_SVME))
4559                 return 1;
4560
4561         if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram64->svm_guest_vmcb_gpa), &map))
4562                 return 1;
4563
4564         ret = 1;
4565         if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save))
4566                 goto unmap_map;
4567
4568         if (svm_allocate_nested(svm))
4569                 goto unmap_save;
4570
4571         /*
4572          * Restore L1 host state from L1 HSAVE area as VMCB01 was
4573          * used during SMM (see svm_enter_smm())
4574          */
4575
4576         svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
4577
4578         /*
4579          * Enter the nested guest now
4580          */
4581
4582         vmcb_mark_all_dirty(svm->vmcb01.ptr);
4583
4584         vmcb12 = map.hva;
4585         nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
4586         nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
4587         ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
4588
4589         if (ret)
4590                 goto unmap_save;
4591
4592         svm->nested.nested_run_pending = 1;
4593
4594 unmap_save:
4595         kvm_vcpu_unmap(vcpu, &map_save, true);
4596 unmap_map:
4597         kvm_vcpu_unmap(vcpu, &map, true);
4598         return ret;
4599 }
4600
4601 static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
4602 {
4603         struct vcpu_svm *svm = to_svm(vcpu);
4604
4605         if (!gif_set(svm)) {
4606                 if (vgif)
4607                         svm_set_intercept(svm, INTERCEPT_STGI);
4608                 /* STGI will cause a vm exit */
4609         } else {
4610                 /* We must be in SMM; RSM will cause a vmexit anyway.  */
4611         }
4612 }
4613 #endif
4614
4615 static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
4616                                         void *insn, int insn_len)
4617 {
4618         bool smep, smap, is_user;
4619         u64 error_code;
4620
4621         /* Emulation is always possible when KVM has access to all guest state. */
4622         if (!sev_guest(vcpu->kvm))
4623                 return true;
4624
4625         /* #UD and #GP should never be intercepted for SEV guests. */
4626         WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
4627                                   EMULTYPE_TRAP_UD_FORCED |
4628                                   EMULTYPE_VMWARE_GP));
4629
4630         /*
4631          * Emulation is impossible for SEV-ES guests as KVM doesn't have access
4632          * to guest register state.
4633          */
4634         if (sev_es_guest(vcpu->kvm))
4635                 return false;
4636
4637         /*
4638          * Emulation is possible if the instruction is already decoded, e.g.
4639          * when completing I/O after returning from userspace.
4640          */
4641         if (emul_type & EMULTYPE_NO_DECODE)
4642                 return true;
4643
4644         /*
4645          * Emulation is possible for SEV guests if and only if a prefilled
4646          * buffer containing the bytes of the intercepted instruction is
4647          * available. SEV guest memory is encrypted with a guest specific key
4648          * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
4649          * decode garbage.
4650          *
4651          * Inject #UD if KVM reached this point without an instruction buffer.
4652          * In practice, this path should never be hit by a well-behaved guest,
4653          * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
4654          * is still theoretically reachable, e.g. via unaccelerated fault-like
4655          * AVIC access, and needs to be handled by KVM to avoid putting the
4656          * guest into an infinite loop.   Injecting #UD is somewhat arbitrary,
4657          * but its the least awful option given lack of insight into the guest.
4658          */
4659         if (unlikely(!insn)) {
4660                 kvm_queue_exception(vcpu, UD_VECTOR);
4661                 return false;
4662         }
4663
4664         /*
4665          * Emulate for SEV guests if the insn buffer is not empty.  The buffer
4666          * will be empty if the DecodeAssist microcode cannot fetch bytes for
4667          * the faulting instruction because the code fetch itself faulted, e.g.
4668          * the guest attempted to fetch from emulated MMIO or a guest page
4669          * table used to translate CS:RIP resides in emulated MMIO.
4670          */
4671         if (likely(insn_len))
4672                 return true;
4673
4674         /*
4675          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
4676          *
4677          * Errata:
4678          * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
4679          * possible that CPU microcode implementing DecodeAssist will fail to
4680          * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
4681          * be '0'.  This happens because microcode reads CS:RIP using a _data_
4682          * loap uop with CPL=0 privileges.  If the load hits a SMAP #PF, ucode
4683          * gives up and does not fill the instruction bytes buffer.
4684          *
4685          * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
4686          * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
4687          * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
4688          * GuestIntrBytes field of the VMCB.
4689          *
4690          * This does _not_ mean that the erratum has been encountered, as the
4691          * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
4692          * #PF, e.g. if the guest attempt to execute from emulated MMIO and
4693          * encountered a reserved/not-present #PF.
4694          *
4695          * To hit the erratum, the following conditions must be true:
4696          *    1. CR4.SMAP=1 (obviously).
4697          *    2. CR4.SMEP=0 || CPL=3.  If SMEP=1 and CPL<3, the erratum cannot
4698          *       have been hit as the guest would have encountered a SMEP
4699          *       violation #PF, not a #NPF.
4700          *    3. The #NPF is not due to a code fetch, in which case failure to
4701          *       retrieve the instruction bytes is legitimate (see abvoe).
4702          *
4703          * In addition, don't apply the erratum workaround if the #NPF occurred
4704          * while translating guest page tables (see below).
4705          */
4706         error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
4707         if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
4708                 goto resume_guest;
4709
4710         smep = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMEP);
4711         smap = kvm_is_cr4_bit_set(vcpu, X86_CR4_SMAP);
4712         is_user = svm_get_cpl(vcpu) == 3;
4713         if (smap && (!smep || is_user)) {
4714                 pr_err_ratelimited("SEV Guest triggered AMD Erratum 1096\n");
4715
4716                 /*
4717                  * If the fault occurred in userspace, arbitrarily inject #GP
4718                  * to avoid killing the guest and to hopefully avoid confusing
4719                  * the guest kernel too much, e.g. injecting #PF would not be
4720                  * coherent with respect to the guest's page tables.  Request
4721                  * triple fault if the fault occurred in the kernel as there's
4722                  * no fault that KVM can inject without confusing the guest.
4723                  * In practice, the triple fault is moot as no sane SEV kernel
4724                  * will execute from user memory while also running with SMAP=1.
4725                  */
4726                 if (is_user)
4727                         kvm_inject_gp(vcpu, 0);
4728                 else
4729                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4730         }
4731
4732 resume_guest:
4733         /*
4734          * If the erratum was not hit, simply resume the guest and let it fault
4735          * again.  While awful, e.g. the vCPU may get stuck in an infinite loop
4736          * if the fault is at CPL=0, it's the lesser of all evils.  Exiting to
4737          * userspace will kill the guest, and letting the emulator read garbage
4738          * will yield random behavior and potentially corrupt the guest.
4739          *
4740          * Simply resuming the guest is technically not a violation of the SEV
4741          * architecture.  AMD's APM states that all code fetches and page table
4742          * accesses for SEV guest are encrypted, regardless of the C-Bit.  The
4743          * APM also states that encrypted accesses to MMIO are "ignored", but
4744          * doesn't explicitly define "ignored", i.e. doing nothing and letting
4745          * the guest spin is technically "ignoring" the access.
4746          */
4747         return false;
4748 }
4749
4750 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
4751 {
4752         struct vcpu_svm *svm = to_svm(vcpu);
4753
4754         return !gif_set(svm);
4755 }
4756
4757 static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
4758 {
4759         if (!sev_es_guest(vcpu->kvm))
4760                 return kvm_vcpu_deliver_sipi_vector(vcpu, vector);
4761
4762         sev_vcpu_deliver_sipi_vector(vcpu, vector);
4763 }
4764
4765 static void svm_vm_destroy(struct kvm *kvm)
4766 {
4767         avic_vm_destroy(kvm);
4768         sev_vm_destroy(kvm);
4769 }
4770
4771 static int svm_vm_init(struct kvm *kvm)
4772 {
4773         if (!pause_filter_count || !pause_filter_thresh)
4774                 kvm->arch.pause_in_guest = true;
4775
4776         if (enable_apicv) {
4777                 int ret = avic_vm_init(kvm);
4778                 if (ret)
4779                         return ret;
4780         }
4781
4782         return 0;
4783 }
4784
4785 static struct kvm_x86_ops svm_x86_ops __initdata = {
4786         .name = KBUILD_MODNAME,
4787
4788         .check_processor_compatibility = svm_check_processor_compat,
4789
4790         .hardware_unsetup = svm_hardware_unsetup,
4791         .hardware_enable = svm_hardware_enable,
4792         .hardware_disable = svm_hardware_disable,
4793         .has_emulated_msr = svm_has_emulated_msr,
4794
4795         .vcpu_create = svm_vcpu_create,
4796         .vcpu_free = svm_vcpu_free,
4797         .vcpu_reset = svm_vcpu_reset,
4798
4799         .vm_size = sizeof(struct kvm_svm),
4800         .vm_init = svm_vm_init,
4801         .vm_destroy = svm_vm_destroy,
4802
4803         .prepare_switch_to_guest = svm_prepare_switch_to_guest,
4804         .vcpu_load = svm_vcpu_load,
4805         .vcpu_put = svm_vcpu_put,
4806         .vcpu_blocking = avic_vcpu_blocking,
4807         .vcpu_unblocking = avic_vcpu_unblocking,
4808
4809         .update_exception_bitmap = svm_update_exception_bitmap,
4810         .get_msr_feature = svm_get_msr_feature,
4811         .get_msr = svm_get_msr,
4812         .set_msr = svm_set_msr,
4813         .get_segment_base = svm_get_segment_base,
4814         .get_segment = svm_get_segment,
4815         .set_segment = svm_set_segment,
4816         .get_cpl = svm_get_cpl,
4817         .get_cs_db_l_bits = svm_get_cs_db_l_bits,
4818         .set_cr0 = svm_set_cr0,
4819         .post_set_cr3 = sev_post_set_cr3,
4820         .is_valid_cr4 = svm_is_valid_cr4,
4821         .set_cr4 = svm_set_cr4,
4822         .set_efer = svm_set_efer,
4823         .get_idt = svm_get_idt,
4824         .set_idt = svm_set_idt,
4825         .get_gdt = svm_get_gdt,
4826         .set_gdt = svm_set_gdt,
4827         .set_dr7 = svm_set_dr7,
4828         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
4829         .cache_reg = svm_cache_reg,
4830         .get_rflags = svm_get_rflags,
4831         .set_rflags = svm_set_rflags,
4832         .get_if_flag = svm_get_if_flag,
4833
4834         .flush_tlb_all = svm_flush_tlb_all,
4835         .flush_tlb_current = svm_flush_tlb_current,
4836         .flush_tlb_gva = svm_flush_tlb_gva,
4837         .flush_tlb_guest = svm_flush_tlb_asid,
4838
4839         .vcpu_pre_run = svm_vcpu_pre_run,
4840         .vcpu_run = svm_vcpu_run,
4841         .handle_exit = svm_handle_exit,
4842         .skip_emulated_instruction = svm_skip_emulated_instruction,
4843         .update_emulated_instruction = NULL,
4844         .set_interrupt_shadow = svm_set_interrupt_shadow,
4845         .get_interrupt_shadow = svm_get_interrupt_shadow,
4846         .patch_hypercall = svm_patch_hypercall,
4847         .inject_irq = svm_inject_irq,
4848         .inject_nmi = svm_inject_nmi,
4849         .is_vnmi_pending = svm_is_vnmi_pending,
4850         .set_vnmi_pending = svm_set_vnmi_pending,
4851         .inject_exception = svm_inject_exception,
4852         .cancel_injection = svm_cancel_injection,
4853         .interrupt_allowed = svm_interrupt_allowed,
4854         .nmi_allowed = svm_nmi_allowed,
4855         .get_nmi_mask = svm_get_nmi_mask,
4856         .set_nmi_mask = svm_set_nmi_mask,
4857         .enable_nmi_window = svm_enable_nmi_window,
4858         .enable_irq_window = svm_enable_irq_window,
4859         .update_cr8_intercept = svm_update_cr8_intercept,
4860         .set_virtual_apic_mode = avic_refresh_virtual_apic_mode,
4861         .refresh_apicv_exec_ctrl = avic_refresh_apicv_exec_ctrl,
4862         .apicv_post_state_restore = avic_apicv_post_state_restore,
4863         .required_apicv_inhibits = AVIC_REQUIRED_APICV_INHIBITS,
4864
4865         .get_exit_info = svm_get_exit_info,
4866
4867         .vcpu_after_set_cpuid = svm_vcpu_after_set_cpuid,
4868
4869         .has_wbinvd_exit = svm_has_wbinvd_exit,
4870
4871         .get_l2_tsc_offset = svm_get_l2_tsc_offset,
4872         .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
4873         .write_tsc_offset = svm_write_tsc_offset,
4874         .write_tsc_multiplier = svm_write_tsc_multiplier,
4875
4876         .load_mmu_pgd = svm_load_mmu_pgd,
4877
4878         .check_intercept = svm_check_intercept,
4879         .handle_exit_irqoff = svm_handle_exit_irqoff,
4880
4881         .request_immediate_exit = __kvm_request_immediate_exit,
4882
4883         .sched_in = svm_sched_in,
4884
4885         .nested_ops = &svm_nested_ops,
4886
4887         .deliver_interrupt = svm_deliver_interrupt,
4888         .pi_update_irte = avic_pi_update_irte,
4889         .setup_mce = svm_setup_mce,
4890
4891 #ifdef CONFIG_KVM_SMM
4892         .smi_allowed = svm_smi_allowed,
4893         .enter_smm = svm_enter_smm,
4894         .leave_smm = svm_leave_smm,
4895         .enable_smi_window = svm_enable_smi_window,
4896 #endif
4897
4898         .mem_enc_ioctl = sev_mem_enc_ioctl,
4899         .mem_enc_register_region = sev_mem_enc_register_region,
4900         .mem_enc_unregister_region = sev_mem_enc_unregister_region,
4901         .guest_memory_reclaimed = sev_guest_memory_reclaimed,
4902
4903         .vm_copy_enc_context_from = sev_vm_copy_enc_context_from,
4904         .vm_move_enc_context_from = sev_vm_move_enc_context_from,
4905
4906         .can_emulate_instruction = svm_can_emulate_instruction,
4907
4908         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
4909
4910         .msr_filter_changed = svm_msr_filter_changed,
4911         .complete_emulated_msr = svm_complete_emulated_msr,
4912
4913         .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
4914         .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons,
4915 };
4916
4917 /*
4918  * The default MMIO mask is a single bit (excluding the present bit),
4919  * which could conflict with the memory encryption bit. Check for
4920  * memory encryption support and override the default MMIO mask if
4921  * memory encryption is enabled.
4922  */
4923 static __init void svm_adjust_mmio_mask(void)
4924 {
4925         unsigned int enc_bit, mask_bit;
4926         u64 msr, mask;
4927
4928         /* If there is no memory encryption support, use existing mask */
4929         if (cpuid_eax(0x80000000) < 0x8000001f)
4930                 return;
4931
4932         /* If memory encryption is not enabled, use existing mask */
4933         rdmsrl(MSR_AMD64_SYSCFG, msr);
4934         if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
4935                 return;
4936
4937         enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
4938         mask_bit = boot_cpu_data.x86_phys_bits;
4939
4940         /* Increment the mask bit if it is the same as the encryption bit */
4941         if (enc_bit == mask_bit)
4942                 mask_bit++;
4943
4944         /*
4945          * If the mask bit location is below 52, then some bits above the
4946          * physical addressing limit will always be reserved, so use the
4947          * rsvd_bits() function to generate the mask. This mask, along with
4948          * the present bit, will be used to generate a page fault with
4949          * PFER.RSV = 1.
4950          *
4951          * If the mask bit location is 52 (or above), then clear the mask.
4952          */
4953         mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
4954
4955         kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
4956 }
4957
4958 static __init void svm_set_cpu_caps(void)
4959 {
4960         kvm_set_cpu_caps();
4961
4962         kvm_caps.supported_perf_cap = 0;
4963         kvm_caps.supported_xss = 0;
4964
4965         /* CPUID 0x80000001 and 0x8000000A (SVM features) */
4966         if (nested) {
4967                 kvm_cpu_cap_set(X86_FEATURE_SVM);
4968                 kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
4969
4970                 if (nrips)
4971                         kvm_cpu_cap_set(X86_FEATURE_NRIPS);
4972
4973                 if (npt_enabled)
4974                         kvm_cpu_cap_set(X86_FEATURE_NPT);
4975
4976                 if (tsc_scaling)
4977                         kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
4978
4979                 if (vls)
4980                         kvm_cpu_cap_set(X86_FEATURE_V_VMSAVE_VMLOAD);
4981                 if (lbrv)
4982                         kvm_cpu_cap_set(X86_FEATURE_LBRV);
4983
4984                 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER))
4985                         kvm_cpu_cap_set(X86_FEATURE_PAUSEFILTER);
4986
4987                 if (boot_cpu_has(X86_FEATURE_PFTHRESHOLD))
4988                         kvm_cpu_cap_set(X86_FEATURE_PFTHRESHOLD);
4989
4990                 if (vgif)
4991                         kvm_cpu_cap_set(X86_FEATURE_VGIF);
4992
4993                 if (vnmi)
4994                         kvm_cpu_cap_set(X86_FEATURE_VNMI);
4995
4996                 /* Nested VM can receive #VMEXIT instead of triggering #GP */
4997                 kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
4998         }
4999
5000         /* CPUID 0x80000008 */
5001         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5002             boot_cpu_has(X86_FEATURE_AMD_SSBD))
5003                 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
5004
5005         if (enable_pmu) {
5006                 /*
5007                  * Enumerate support for PERFCTR_CORE if and only if KVM has
5008                  * access to enough counters to virtualize "core" support,
5009                  * otherwise limit vPMU support to the legacy number of counters.
5010                  */
5011                 if (kvm_pmu_cap.num_counters_gp < AMD64_NUM_COUNTERS_CORE)
5012                         kvm_pmu_cap.num_counters_gp = min(AMD64_NUM_COUNTERS,
5013                                                           kvm_pmu_cap.num_counters_gp);
5014                 else
5015                         kvm_cpu_cap_check_and_set(X86_FEATURE_PERFCTR_CORE);
5016
5017                 if (kvm_pmu_cap.version != 2 ||
5018                     !kvm_cpu_cap_has(X86_FEATURE_PERFCTR_CORE))
5019                         kvm_cpu_cap_clear(X86_FEATURE_PERFMON_V2);
5020         }
5021
5022         /* CPUID 0x8000001F (SME/SEV features) */
5023         sev_set_cpu_caps();
5024 }
5025
5026 static __init int svm_hardware_setup(void)
5027 {
5028         int cpu;
5029         struct page *iopm_pages;
5030         void *iopm_va;
5031         int r;
5032         unsigned int order = get_order(IOPM_SIZE);
5033
5034         /*
5035          * NX is required for shadow paging and for NPT if the NX huge pages
5036          * mitigation is enabled.
5037          */
5038         if (!boot_cpu_has(X86_FEATURE_NX)) {
5039                 pr_err_ratelimited("NX (Execute Disable) not supported\n");
5040                 return -EOPNOTSUPP;
5041         }
5042         kvm_enable_efer_bits(EFER_NX);
5043
5044         iopm_pages = alloc_pages(GFP_KERNEL, order);
5045
5046         if (!iopm_pages)
5047                 return -ENOMEM;
5048
5049         iopm_va = page_address(iopm_pages);
5050         memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
5051         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
5052
5053         init_msrpm_offsets();
5054
5055         kvm_caps.supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
5056                                      XFEATURE_MASK_BNDCSR);
5057
5058         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
5059                 kvm_enable_efer_bits(EFER_FFXSR);
5060
5061         if (tsc_scaling) {
5062                 if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
5063                         tsc_scaling = false;
5064                 } else {
5065                         pr_info("TSC scaling supported\n");
5066                         kvm_caps.has_tsc_control = true;
5067                 }
5068         }
5069         kvm_caps.max_tsc_scaling_ratio = SVM_TSC_RATIO_MAX;
5070         kvm_caps.tsc_scaling_ratio_frac_bits = 32;
5071
5072         tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
5073
5074         if (boot_cpu_has(X86_FEATURE_AUTOIBRS))
5075                 kvm_enable_efer_bits(EFER_AUTOIBRS);
5076
5077         /* Check for pause filtering support */
5078         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
5079                 pause_filter_count = 0;
5080                 pause_filter_thresh = 0;
5081         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
5082                 pause_filter_thresh = 0;
5083         }
5084
5085         if (nested) {
5086                 pr_info("Nested Virtualization enabled\n");
5087                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
5088         }
5089
5090         /*
5091          * KVM's MMU doesn't support using 2-level paging for itself, and thus
5092          * NPT isn't supported if the host is using 2-level paging since host
5093          * CR4 is unchanged on VMRUN.
5094          */
5095         if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
5096                 npt_enabled = false;
5097
5098         if (!boot_cpu_has(X86_FEATURE_NPT))
5099                 npt_enabled = false;
5100
5101         /* Force VM NPT level equal to the host's paging level */
5102         kvm_configure_mmu(npt_enabled, get_npt_level(),
5103                           get_npt_level(), PG_LEVEL_1G);
5104         pr_info("Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
5105
5106         /* Setup shadow_me_value and shadow_me_mask */
5107         kvm_mmu_set_me_spte_mask(sme_me_mask, sme_me_mask);
5108
5109         svm_adjust_mmio_mask();
5110
5111         /*
5112          * Note, SEV setup consumes npt_enabled and enable_mmio_caching (which
5113          * may be modified by svm_adjust_mmio_mask()).
5114          */
5115         sev_hardware_setup();
5116
5117         svm_hv_hardware_setup();
5118
5119         for_each_possible_cpu(cpu) {
5120                 r = svm_cpu_init(cpu);
5121                 if (r)
5122                         goto err;
5123         }
5124
5125         if (nrips) {
5126                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
5127                         nrips = false;
5128         }
5129
5130         enable_apicv = avic = avic && avic_hardware_setup();
5131
5132         if (!enable_apicv) {
5133                 svm_x86_ops.vcpu_blocking = NULL;
5134                 svm_x86_ops.vcpu_unblocking = NULL;
5135                 svm_x86_ops.vcpu_get_apicv_inhibit_reasons = NULL;
5136         } else if (!x2avic_enabled) {
5137                 svm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization = true;
5138         }
5139
5140         if (vls) {
5141                 if (!npt_enabled ||
5142                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
5143                     !IS_ENABLED(CONFIG_X86_64)) {
5144                         vls = false;
5145                 } else {
5146                         pr_info("Virtual VMLOAD VMSAVE supported\n");
5147                 }
5148         }
5149
5150         if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
5151                 svm_gp_erratum_intercept = false;
5152
5153         if (vgif) {
5154                 if (!boot_cpu_has(X86_FEATURE_VGIF))
5155                         vgif = false;
5156                 else
5157                         pr_info("Virtual GIF supported\n");
5158         }
5159
5160         vnmi = vgif && vnmi && boot_cpu_has(X86_FEATURE_VNMI);
5161         if (vnmi)
5162                 pr_info("Virtual NMI enabled\n");
5163
5164         if (!vnmi) {
5165                 svm_x86_ops.is_vnmi_pending = NULL;
5166                 svm_x86_ops.set_vnmi_pending = NULL;
5167         }
5168
5169
5170         if (lbrv) {
5171                 if (!boot_cpu_has(X86_FEATURE_LBRV))
5172                         lbrv = false;
5173                 else
5174                         pr_info("LBR virtualization supported\n");
5175         }
5176
5177         if (!enable_pmu)
5178                 pr_info("PMU virtualization is disabled\n");
5179
5180         svm_set_cpu_caps();
5181
5182         /*
5183          * It seems that on AMD processors PTE's accessed bit is
5184          * being set by the CPU hardware before the NPF vmexit.
5185          * This is not expected behaviour and our tests fail because
5186          * of it.
5187          * A workaround here is to disable support for
5188          * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
5189          * In this case userspace can know if there is support using
5190          * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
5191          * it
5192          * If future AMD CPU models change the behaviour described above,
5193          * this variable can be changed accordingly
5194          */
5195         allow_smaller_maxphyaddr = !npt_enabled;
5196
5197         return 0;
5198
5199 err:
5200         svm_hardware_unsetup();
5201         return r;
5202 }
5203
5204
5205 static struct kvm_x86_init_ops svm_init_ops __initdata = {
5206         .hardware_setup = svm_hardware_setup,
5207
5208         .runtime_ops = &svm_x86_ops,
5209         .pmu_ops = &amd_pmu_ops,
5210 };
5211
5212 static int __init svm_init(void)
5213 {
5214         int r;
5215
5216         __unused_size_checks();
5217
5218         if (!kvm_is_svm_supported())
5219                 return -EOPNOTSUPP;
5220
5221         r = kvm_x86_vendor_init(&svm_init_ops);
5222         if (r)
5223                 return r;
5224
5225         /*
5226          * Common KVM initialization _must_ come last, after this, /dev/kvm is
5227          * exposed to userspace!
5228          */
5229         r = kvm_init(sizeof(struct vcpu_svm), __alignof__(struct vcpu_svm),
5230                      THIS_MODULE);
5231         if (r)
5232                 goto err_kvm_init;
5233
5234         return 0;
5235
5236 err_kvm_init:
5237         kvm_x86_vendor_exit();
5238         return r;
5239 }
5240
5241 static void __exit svm_exit(void)
5242 {
5243         kvm_exit();
5244         kvm_x86_vendor_exit();
5245 }
5246
5247 module_init(svm_init)
5248 module_exit(svm_exit)