Merge tag 'tty-5.4-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[platform/kernel/linux-rpi.git] / arch / x86 / kvm / svm.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM support
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *   Avi Kivity   <avi@qumranet.com>
13  */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_host.h>
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23 #include "cpuid.h"
24 #include "pmu.h"
25
26 #include <linux/module.h>
27 #include <linux/mod_devicetable.h>
28 #include <linux/kernel.h>
29 #include <linux/vmalloc.h>
30 #include <linux/highmem.h>
31 #include <linux/sched.h>
32 #include <linux/trace_events.h>
33 #include <linux/slab.h>
34 #include <linux/amd-iommu.h>
35 #include <linux/hashtable.h>
36 #include <linux/frame.h>
37 #include <linux/psp-sev.h>
38 #include <linux/file.h>
39 #include <linux/pagemap.h>
40 #include <linux/swap.h>
41
42 #include <asm/apic.h>
43 #include <asm/perf_event.h>
44 #include <asm/tlbflush.h>
45 #include <asm/desc.h>
46 #include <asm/debugreg.h>
47 #include <asm/kvm_para.h>
48 #include <asm/irq_remapping.h>
49 #include <asm/spec-ctrl.h>
50
51 #include <asm/virtext.h>
52 #include "trace.h"
53
54 #define __ex(x) __kvm_handle_fault_on_reboot(x)
55
56 MODULE_AUTHOR("Qumranet");
57 MODULE_LICENSE("GPL");
58
59 static const struct x86_cpu_id svm_cpu_id[] = {
60         X86_FEATURE_MATCH(X86_FEATURE_SVM),
61         {}
62 };
63 MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
64
65 #define IOPM_ALLOC_ORDER 2
66 #define MSRPM_ALLOC_ORDER 1
67
68 #define SEG_TYPE_LDT 2
69 #define SEG_TYPE_BUSY_TSS16 3
70
71 #define SVM_FEATURE_LBRV           (1 <<  1)
72 #define SVM_FEATURE_SVML           (1 <<  2)
73 #define SVM_FEATURE_TSC_RATE       (1 <<  4)
74 #define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
75 #define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
76 #define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
77 #define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
78
79 #define SVM_AVIC_DOORBELL       0xc001011b
80
81 #define NESTED_EXIT_HOST        0       /* Exit handled on host level */
82 #define NESTED_EXIT_DONE        1       /* Exit caused nested vmexit  */
83 #define NESTED_EXIT_CONTINUE    2       /* Further checks needed      */
84
85 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
86
87 #define TSC_RATIO_RSVD          0xffffff0000000000ULL
88 #define TSC_RATIO_MIN           0x0000000000000001ULL
89 #define TSC_RATIO_MAX           0x000000ffffffffffULL
90
91 #define AVIC_HPA_MASK   ~((0xFFFULL << 52) | 0xFFF)
92
93 /*
94  * 0xff is broadcast, so the max index allowed for physical APIC ID
95  * table is 0xfe.  APIC IDs above 0xff are reserved.
96  */
97 #define AVIC_MAX_PHYSICAL_ID_COUNT      255
98
99 #define AVIC_UNACCEL_ACCESS_WRITE_MASK          1
100 #define AVIC_UNACCEL_ACCESS_OFFSET_MASK         0xFF0
101 #define AVIC_UNACCEL_ACCESS_VECTOR_MASK         0xFFFFFFFF
102
103 /* AVIC GATAG is encoded using VM and VCPU IDs */
104 #define AVIC_VCPU_ID_BITS               8
105 #define AVIC_VCPU_ID_MASK               ((1 << AVIC_VCPU_ID_BITS) - 1)
106
107 #define AVIC_VM_ID_BITS                 24
108 #define AVIC_VM_ID_NR                   (1 << AVIC_VM_ID_BITS)
109 #define AVIC_VM_ID_MASK                 ((1 << AVIC_VM_ID_BITS) - 1)
110
111 #define AVIC_GATAG(x, y)                (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
112                                                 (y & AVIC_VCPU_ID_MASK))
113 #define AVIC_GATAG_TO_VMID(x)           ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
114 #define AVIC_GATAG_TO_VCPUID(x)         (x & AVIC_VCPU_ID_MASK)
115
116 static bool erratum_383_found __read_mostly;
117
118 static const u32 host_save_user_msrs[] = {
119 #ifdef CONFIG_X86_64
120         MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
121         MSR_FS_BASE,
122 #endif
123         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
124         MSR_TSC_AUX,
125 };
126
127 #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
128
129 struct kvm_sev_info {
130         bool active;            /* SEV enabled guest */
131         unsigned int asid;      /* ASID used for this guest */
132         unsigned int handle;    /* SEV firmware handle */
133         int fd;                 /* SEV device fd */
134         unsigned long pages_locked; /* Number of pages locked */
135         struct list_head regions_list;  /* List of registered regions */
136 };
137
138 struct kvm_svm {
139         struct kvm kvm;
140
141         /* Struct members for AVIC */
142         u32 avic_vm_id;
143         struct page *avic_logical_id_table_page;
144         struct page *avic_physical_id_table_page;
145         struct hlist_node hnode;
146
147         struct kvm_sev_info sev_info;
148 };
149
150 struct kvm_vcpu;
151
152 struct nested_state {
153         struct vmcb *hsave;
154         u64 hsave_msr;
155         u64 vm_cr_msr;
156         u64 vmcb;
157
158         /* These are the merged vectors */
159         u32 *msrpm;
160
161         /* gpa pointers to the real vectors */
162         u64 vmcb_msrpm;
163         u64 vmcb_iopm;
164
165         /* A VMEXIT is required but not yet emulated */
166         bool exit_required;
167
168         /* cache for intercepts of the guest */
169         u32 intercept_cr;
170         u32 intercept_dr;
171         u32 intercept_exceptions;
172         u64 intercept;
173
174         /* Nested Paging related state */
175         u64 nested_cr3;
176 };
177
178 #define MSRPM_OFFSETS   16
179 static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
180
181 /*
182  * Set osvw_len to higher value when updated Revision Guides
183  * are published and we know what the new status bits are
184  */
185 static uint64_t osvw_len = 4, osvw_status;
186
187 struct vcpu_svm {
188         struct kvm_vcpu vcpu;
189         struct vmcb *vmcb;
190         unsigned long vmcb_pa;
191         struct svm_cpu_data *svm_data;
192         uint64_t asid_generation;
193         uint64_t sysenter_esp;
194         uint64_t sysenter_eip;
195         uint64_t tsc_aux;
196
197         u64 msr_decfg;
198
199         u64 next_rip;
200
201         u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
202         struct {
203                 u16 fs;
204                 u16 gs;
205                 u16 ldt;
206                 u64 gs_base;
207         } host;
208
209         u64 spec_ctrl;
210         /*
211          * Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
212          * translated into the appropriate L2_CFG bits on the host to
213          * perform speculative control.
214          */
215         u64 virt_spec_ctrl;
216
217         u32 *msrpm;
218
219         ulong nmi_iret_rip;
220
221         struct nested_state nested;
222
223         bool nmi_singlestep;
224         u64 nmi_singlestep_guest_rflags;
225
226         unsigned int3_injected;
227         unsigned long int3_rip;
228
229         /* cached guest cpuid flags for faster access */
230         bool nrips_enabled      : 1;
231
232         u32 ldr_reg;
233         u32 dfr_reg;
234         struct page *avic_backing_page;
235         u64 *avic_physical_id_cache;
236         bool avic_is_running;
237
238         /*
239          * Per-vcpu list of struct amd_svm_iommu_ir:
240          * This is used mainly to store interrupt remapping information used
241          * when update the vcpu affinity. This avoids the need to scan for
242          * IRTE and try to match ga_tag in the IOMMU driver.
243          */
244         struct list_head ir_list;
245         spinlock_t ir_list_lock;
246
247         /* which host CPU was used for running this vcpu */
248         unsigned int last_cpu;
249 };
250
251 /*
252  * This is a wrapper of struct amd_iommu_ir_data.
253  */
254 struct amd_svm_iommu_ir {
255         struct list_head node;  /* Used by SVM for per-vcpu ir_list */
256         void *data;             /* Storing pointer to struct amd_ir_data */
257 };
258
259 #define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK    (0xFF)
260 #define AVIC_LOGICAL_ID_ENTRY_VALID_BIT                 31
261 #define AVIC_LOGICAL_ID_ENTRY_VALID_MASK                (1 << 31)
262
263 #define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK    (0xFFULL)
264 #define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK        (0xFFFFFFFFFFULL << 12)
265 #define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK          (1ULL << 62)
266 #define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK               (1ULL << 63)
267
268 static DEFINE_PER_CPU(u64, current_tsc_ratio);
269 #define TSC_RATIO_DEFAULT       0x0100000000ULL
270
271 #define MSR_INVALID                     0xffffffffU
272
273 static const struct svm_direct_access_msrs {
274         u32 index;   /* Index of the MSR */
275         bool always; /* True if intercept is always on */
276 } direct_access_msrs[] = {
277         { .index = MSR_STAR,                            .always = true  },
278         { .index = MSR_IA32_SYSENTER_CS,                .always = true  },
279 #ifdef CONFIG_X86_64
280         { .index = MSR_GS_BASE,                         .always = true  },
281         { .index = MSR_FS_BASE,                         .always = true  },
282         { .index = MSR_KERNEL_GS_BASE,                  .always = true  },
283         { .index = MSR_LSTAR,                           .always = true  },
284         { .index = MSR_CSTAR,                           .always = true  },
285         { .index = MSR_SYSCALL_MASK,                    .always = true  },
286 #endif
287         { .index = MSR_IA32_SPEC_CTRL,                  .always = false },
288         { .index = MSR_IA32_PRED_CMD,                   .always = false },
289         { .index = MSR_IA32_LASTBRANCHFROMIP,           .always = false },
290         { .index = MSR_IA32_LASTBRANCHTOIP,             .always = false },
291         { .index = MSR_IA32_LASTINTFROMIP,              .always = false },
292         { .index = MSR_IA32_LASTINTTOIP,                .always = false },
293         { .index = MSR_INVALID,                         .always = false },
294 };
295
296 /* enable NPT for AMD64 and X86 with PAE */
297 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
298 static bool npt_enabled = true;
299 #else
300 static bool npt_enabled;
301 #endif
302
303 /*
304  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
305  * pause_filter_count: On processors that support Pause filtering(indicated
306  *      by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
307  *      count value. On VMRUN this value is loaded into an internal counter.
308  *      Each time a pause instruction is executed, this counter is decremented
309  *      until it reaches zero at which time a #VMEXIT is generated if pause
310  *      intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
311  *      Intercept Filtering for more details.
312  *      This also indicate if ple logic enabled.
313  *
314  * pause_filter_thresh: In addition, some processor families support advanced
315  *      pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
316  *      the amount of time a guest is allowed to execute in a pause loop.
317  *      In this mode, a 16-bit pause filter threshold field is added in the
318  *      VMCB. The threshold value is a cycle count that is used to reset the
319  *      pause counter. As with simple pause filtering, VMRUN loads the pause
320  *      count value from VMCB into an internal counter. Then, on each pause
321  *      instruction the hardware checks the elapsed number of cycles since
322  *      the most recent pause instruction against the pause filter threshold.
323  *      If the elapsed cycle count is greater than the pause filter threshold,
324  *      then the internal pause count is reloaded from the VMCB and execution
325  *      continues. If the elapsed cycle count is less than the pause filter
326  *      threshold, then the internal pause count is decremented. If the count
327  *      value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
328  *      triggered. If advanced pause filtering is supported and pause filter
329  *      threshold field is set to zero, the filter will operate in the simpler,
330  *      count only mode.
331  */
332
333 static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
334 module_param(pause_filter_thresh, ushort, 0444);
335
336 static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
337 module_param(pause_filter_count, ushort, 0444);
338
339 /* Default doubles per-vcpu window every exit. */
340 static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
341 module_param(pause_filter_count_grow, ushort, 0444);
342
343 /* Default resets per-vcpu window every exit to pause_filter_count. */
344 static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
345 module_param(pause_filter_count_shrink, ushort, 0444);
346
347 /* Default is to compute the maximum so we can never overflow. */
348 static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
349 module_param(pause_filter_count_max, ushort, 0444);
350
351 /* allow nested paging (virtualized MMU) for all guests */
352 static int npt = true;
353 module_param(npt, int, S_IRUGO);
354
355 /* allow nested virtualization in KVM/SVM */
356 static int nested = true;
357 module_param(nested, int, S_IRUGO);
358
359 /* enable / disable AVIC */
360 static int avic;
361 #ifdef CONFIG_X86_LOCAL_APIC
362 module_param(avic, int, S_IRUGO);
363 #endif
364
365 /* enable/disable Next RIP Save */
366 static int nrips = true;
367 module_param(nrips, int, 0444);
368
369 /* enable/disable Virtual VMLOAD VMSAVE */
370 static int vls = true;
371 module_param(vls, int, 0444);
372
373 /* enable/disable Virtual GIF */
374 static int vgif = true;
375 module_param(vgif, int, 0444);
376
377 /* enable/disable SEV support */
378 static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
379 module_param(sev, int, 0444);
380
381 static bool __read_mostly dump_invalid_vmcb = 0;
382 module_param(dump_invalid_vmcb, bool, 0644);
383
384 static u8 rsm_ins_bytes[] = "\x0f\xaa";
385
386 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
387 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
388 static void svm_complete_interrupts(struct vcpu_svm *svm);
389
390 static int nested_svm_exit_handled(struct vcpu_svm *svm);
391 static int nested_svm_intercept(struct vcpu_svm *svm);
392 static int nested_svm_vmexit(struct vcpu_svm *svm);
393 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
394                                       bool has_error_code, u32 error_code);
395
396 enum {
397         VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
398                             pause filter count */
399         VMCB_PERM_MAP,   /* IOPM Base and MSRPM Base */
400         VMCB_ASID,       /* ASID */
401         VMCB_INTR,       /* int_ctl, int_vector */
402         VMCB_NPT,        /* npt_en, nCR3, gPAT */
403         VMCB_CR,         /* CR0, CR3, CR4, EFER */
404         VMCB_DR,         /* DR6, DR7 */
405         VMCB_DT,         /* GDT, IDT */
406         VMCB_SEG,        /* CS, DS, SS, ES, CPL */
407         VMCB_CR2,        /* CR2 only */
408         VMCB_LBR,        /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
409         VMCB_AVIC,       /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
410                           * AVIC PHYSICAL_TABLE pointer,
411                           * AVIC LOGICAL_TABLE pointer
412                           */
413         VMCB_DIRTY_MAX,
414 };
415
416 /* TPR and CR2 are always written before VMRUN */
417 #define VMCB_ALWAYS_DIRTY_MASK  ((1U << VMCB_INTR) | (1U << VMCB_CR2))
418
419 #define VMCB_AVIC_APIC_BAR_MASK         0xFFFFFFFFFF000ULL
420
421 static unsigned int max_sev_asid;
422 static unsigned int min_sev_asid;
423 static unsigned long *sev_asid_bitmap;
424 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
425
426 struct enc_region {
427         struct list_head list;
428         unsigned long npages;
429         struct page **pages;
430         unsigned long uaddr;
431         unsigned long size;
432 };
433
434
435 static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
436 {
437         return container_of(kvm, struct kvm_svm, kvm);
438 }
439
440 static inline bool svm_sev_enabled(void)
441 {
442         return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0;
443 }
444
445 static inline bool sev_guest(struct kvm *kvm)
446 {
447 #ifdef CONFIG_KVM_AMD_SEV
448         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
449
450         return sev->active;
451 #else
452         return false;
453 #endif
454 }
455
456 static inline int sev_get_asid(struct kvm *kvm)
457 {
458         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
459
460         return sev->asid;
461 }
462
463 static inline void mark_all_dirty(struct vmcb *vmcb)
464 {
465         vmcb->control.clean = 0;
466 }
467
468 static inline void mark_all_clean(struct vmcb *vmcb)
469 {
470         vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
471                                & ~VMCB_ALWAYS_DIRTY_MASK;
472 }
473
474 static inline void mark_dirty(struct vmcb *vmcb, int bit)
475 {
476         vmcb->control.clean &= ~(1 << bit);
477 }
478
479 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
480 {
481         return container_of(vcpu, struct vcpu_svm, vcpu);
482 }
483
484 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
485 {
486         svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
487         mark_dirty(svm->vmcb, VMCB_AVIC);
488 }
489
490 static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
491 {
492         struct vcpu_svm *svm = to_svm(vcpu);
493         u64 *entry = svm->avic_physical_id_cache;
494
495         if (!entry)
496                 return false;
497
498         return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
499 }
500
501 static void recalc_intercepts(struct vcpu_svm *svm)
502 {
503         struct vmcb_control_area *c, *h;
504         struct nested_state *g;
505
506         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
507
508         if (!is_guest_mode(&svm->vcpu))
509                 return;
510
511         c = &svm->vmcb->control;
512         h = &svm->nested.hsave->control;
513         g = &svm->nested;
514
515         c->intercept_cr = h->intercept_cr | g->intercept_cr;
516         c->intercept_dr = h->intercept_dr | g->intercept_dr;
517         c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
518         c->intercept = h->intercept | g->intercept;
519 }
520
521 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
522 {
523         if (is_guest_mode(&svm->vcpu))
524                 return svm->nested.hsave;
525         else
526                 return svm->vmcb;
527 }
528
529 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
530 {
531         struct vmcb *vmcb = get_host_vmcb(svm);
532
533         vmcb->control.intercept_cr |= (1U << bit);
534
535         recalc_intercepts(svm);
536 }
537
538 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
539 {
540         struct vmcb *vmcb = get_host_vmcb(svm);
541
542         vmcb->control.intercept_cr &= ~(1U << bit);
543
544         recalc_intercepts(svm);
545 }
546
547 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
548 {
549         struct vmcb *vmcb = get_host_vmcb(svm);
550
551         return vmcb->control.intercept_cr & (1U << bit);
552 }
553
554 static inline void set_dr_intercepts(struct vcpu_svm *svm)
555 {
556         struct vmcb *vmcb = get_host_vmcb(svm);
557
558         vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
559                 | (1 << INTERCEPT_DR1_READ)
560                 | (1 << INTERCEPT_DR2_READ)
561                 | (1 << INTERCEPT_DR3_READ)
562                 | (1 << INTERCEPT_DR4_READ)
563                 | (1 << INTERCEPT_DR5_READ)
564                 | (1 << INTERCEPT_DR6_READ)
565                 | (1 << INTERCEPT_DR7_READ)
566                 | (1 << INTERCEPT_DR0_WRITE)
567                 | (1 << INTERCEPT_DR1_WRITE)
568                 | (1 << INTERCEPT_DR2_WRITE)
569                 | (1 << INTERCEPT_DR3_WRITE)
570                 | (1 << INTERCEPT_DR4_WRITE)
571                 | (1 << INTERCEPT_DR5_WRITE)
572                 | (1 << INTERCEPT_DR6_WRITE)
573                 | (1 << INTERCEPT_DR7_WRITE);
574
575         recalc_intercepts(svm);
576 }
577
578 static inline void clr_dr_intercepts(struct vcpu_svm *svm)
579 {
580         struct vmcb *vmcb = get_host_vmcb(svm);
581
582         vmcb->control.intercept_dr = 0;
583
584         recalc_intercepts(svm);
585 }
586
587 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
588 {
589         struct vmcb *vmcb = get_host_vmcb(svm);
590
591         vmcb->control.intercept_exceptions |= (1U << bit);
592
593         recalc_intercepts(svm);
594 }
595
596 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
597 {
598         struct vmcb *vmcb = get_host_vmcb(svm);
599
600         vmcb->control.intercept_exceptions &= ~(1U << bit);
601
602         recalc_intercepts(svm);
603 }
604
605 static inline void set_intercept(struct vcpu_svm *svm, int bit)
606 {
607         struct vmcb *vmcb = get_host_vmcb(svm);
608
609         vmcb->control.intercept |= (1ULL << bit);
610
611         recalc_intercepts(svm);
612 }
613
614 static inline void clr_intercept(struct vcpu_svm *svm, int bit)
615 {
616         struct vmcb *vmcb = get_host_vmcb(svm);
617
618         vmcb->control.intercept &= ~(1ULL << bit);
619
620         recalc_intercepts(svm);
621 }
622
623 static inline bool vgif_enabled(struct vcpu_svm *svm)
624 {
625         return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
626 }
627
628 static inline void enable_gif(struct vcpu_svm *svm)
629 {
630         if (vgif_enabled(svm))
631                 svm->vmcb->control.int_ctl |= V_GIF_MASK;
632         else
633                 svm->vcpu.arch.hflags |= HF_GIF_MASK;
634 }
635
636 static inline void disable_gif(struct vcpu_svm *svm)
637 {
638         if (vgif_enabled(svm))
639                 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
640         else
641                 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
642 }
643
644 static inline bool gif_set(struct vcpu_svm *svm)
645 {
646         if (vgif_enabled(svm))
647                 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
648         else
649                 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
650 }
651
652 static unsigned long iopm_base;
653
654 struct kvm_ldttss_desc {
655         u16 limit0;
656         u16 base0;
657         unsigned base1:8, type:5, dpl:2, p:1;
658         unsigned limit1:4, zero0:3, g:1, base2:8;
659         u32 base3;
660         u32 zero1;
661 } __attribute__((packed));
662
663 struct svm_cpu_data {
664         int cpu;
665
666         u64 asid_generation;
667         u32 max_asid;
668         u32 next_asid;
669         u32 min_asid;
670         struct kvm_ldttss_desc *tss_desc;
671
672         struct page *save_area;
673         struct vmcb *current_vmcb;
674
675         /* index = sev_asid, value = vmcb pointer */
676         struct vmcb **sev_vmcbs;
677 };
678
679 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
680
681 static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
682
683 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
684 #define MSRS_RANGE_SIZE 2048
685 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
686
687 static u32 svm_msrpm_offset(u32 msr)
688 {
689         u32 offset;
690         int i;
691
692         for (i = 0; i < NUM_MSR_MAPS; i++) {
693                 if (msr < msrpm_ranges[i] ||
694                     msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
695                         continue;
696
697                 offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
698                 offset += (i * MSRS_RANGE_SIZE);       /* add range offset */
699
700                 /* Now we have the u8 offset - but need the u32 offset */
701                 return offset / 4;
702         }
703
704         /* MSR not in any range */
705         return MSR_INVALID;
706 }
707
708 #define MAX_INST_SIZE 15
709
710 static inline void clgi(void)
711 {
712         asm volatile (__ex("clgi"));
713 }
714
715 static inline void stgi(void)
716 {
717         asm volatile (__ex("stgi"));
718 }
719
720 static inline void invlpga(unsigned long addr, u32 asid)
721 {
722         asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
723 }
724
725 static int get_npt_level(struct kvm_vcpu *vcpu)
726 {
727 #ifdef CONFIG_X86_64
728         return PT64_ROOT_4LEVEL;
729 #else
730         return PT32E_ROOT_LEVEL;
731 #endif
732 }
733
734 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
735 {
736         vcpu->arch.efer = efer;
737         if (!npt_enabled && !(efer & EFER_LMA))
738                 efer &= ~EFER_LME;
739
740         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
741         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
742 }
743
744 static int is_external_interrupt(u32 info)
745 {
746         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
747         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
748 }
749
750 static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
751 {
752         struct vcpu_svm *svm = to_svm(vcpu);
753         u32 ret = 0;
754
755         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
756                 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
757         return ret;
758 }
759
760 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
761 {
762         struct vcpu_svm *svm = to_svm(vcpu);
763
764         if (mask == 0)
765                 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
766         else
767                 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
768
769 }
770
771 static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
772 {
773         struct vcpu_svm *svm = to_svm(vcpu);
774
775         if (nrips && svm->vmcb->control.next_rip != 0) {
776                 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
777                 svm->next_rip = svm->vmcb->control.next_rip;
778         }
779
780         if (!svm->next_rip) {
781                 if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
782                         return 0;
783         } else {
784                 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
785                         pr_err("%s: ip 0x%lx next 0x%llx\n",
786                                __func__, kvm_rip_read(vcpu), svm->next_rip);
787                 kvm_rip_write(vcpu, svm->next_rip);
788         }
789         svm_set_interrupt_shadow(vcpu, 0);
790
791         return 1;
792 }
793
794 static void svm_queue_exception(struct kvm_vcpu *vcpu)
795 {
796         struct vcpu_svm *svm = to_svm(vcpu);
797         unsigned nr = vcpu->arch.exception.nr;
798         bool has_error_code = vcpu->arch.exception.has_error_code;
799         bool reinject = vcpu->arch.exception.injected;
800         u32 error_code = vcpu->arch.exception.error_code;
801
802         /*
803          * If we are within a nested VM we'd better #VMEXIT and let the guest
804          * handle the exception
805          */
806         if (!reinject &&
807             nested_svm_check_exception(svm, nr, has_error_code, error_code))
808                 return;
809
810         kvm_deliver_exception_payload(&svm->vcpu);
811
812         if (nr == BP_VECTOR && !nrips) {
813                 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
814
815                 /*
816                  * For guest debugging where we have to reinject #BP if some
817                  * INT3 is guest-owned:
818                  * Emulate nRIP by moving RIP forward. Will fail if injection
819                  * raises a fault that is not intercepted. Still better than
820                  * failing in all cases.
821                  */
822                 (void)skip_emulated_instruction(&svm->vcpu);
823                 rip = kvm_rip_read(&svm->vcpu);
824                 svm->int3_rip = rip + svm->vmcb->save.cs.base;
825                 svm->int3_injected = rip - old_rip;
826         }
827
828         svm->vmcb->control.event_inj = nr
829                 | SVM_EVTINJ_VALID
830                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
831                 | SVM_EVTINJ_TYPE_EXEPT;
832         svm->vmcb->control.event_inj_err = error_code;
833 }
834
835 static void svm_init_erratum_383(void)
836 {
837         u32 low, high;
838         int err;
839         u64 val;
840
841         if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
842                 return;
843
844         /* Use _safe variants to not break nested virtualization */
845         val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
846         if (err)
847                 return;
848
849         val |= (1ULL << 47);
850
851         low  = lower_32_bits(val);
852         high = upper_32_bits(val);
853
854         native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
855
856         erratum_383_found = true;
857 }
858
859 static void svm_init_osvw(struct kvm_vcpu *vcpu)
860 {
861         /*
862          * Guests should see errata 400 and 415 as fixed (assuming that
863          * HLT and IO instructions are intercepted).
864          */
865         vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
866         vcpu->arch.osvw.status = osvw_status & ~(6ULL);
867
868         /*
869          * By increasing VCPU's osvw.length to 3 we are telling the guest that
870          * all osvw.status bits inside that length, including bit 0 (which is
871          * reserved for erratum 298), are valid. However, if host processor's
872          * osvw_len is 0 then osvw_status[0] carries no information. We need to
873          * be conservative here and therefore we tell the guest that erratum 298
874          * is present (because we really don't know).
875          */
876         if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
877                 vcpu->arch.osvw.status |= 1;
878 }
879
880 static int has_svm(void)
881 {
882         const char *msg;
883
884         if (!cpu_has_svm(&msg)) {
885                 printk(KERN_INFO "has_svm: %s\n", msg);
886                 return 0;
887         }
888
889         return 1;
890 }
891
892 static void svm_hardware_disable(void)
893 {
894         /* Make sure we clean up behind us */
895         if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
896                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
897
898         cpu_svm_disable();
899
900         amd_pmu_disable_virt();
901 }
902
903 static int svm_hardware_enable(void)
904 {
905
906         struct svm_cpu_data *sd;
907         uint64_t efer;
908         struct desc_struct *gdt;
909         int me = raw_smp_processor_id();
910
911         rdmsrl(MSR_EFER, efer);
912         if (efer & EFER_SVME)
913                 return -EBUSY;
914
915         if (!has_svm()) {
916                 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
917                 return -EINVAL;
918         }
919         sd = per_cpu(svm_data, me);
920         if (!sd) {
921                 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
922                 return -EINVAL;
923         }
924
925         sd->asid_generation = 1;
926         sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
927         sd->next_asid = sd->max_asid + 1;
928         sd->min_asid = max_sev_asid + 1;
929
930         gdt = get_current_gdt_rw();
931         sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
932
933         wrmsrl(MSR_EFER, efer | EFER_SVME);
934
935         wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
936
937         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
938                 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
939                 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
940         }
941
942
943         /*
944          * Get OSVW bits.
945          *
946          * Note that it is possible to have a system with mixed processor
947          * revisions and therefore different OSVW bits. If bits are not the same
948          * on different processors then choose the worst case (i.e. if erratum
949          * is present on one processor and not on another then assume that the
950          * erratum is present everywhere).
951          */
952         if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
953                 uint64_t len, status = 0;
954                 int err;
955
956                 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
957                 if (!err)
958                         status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
959                                                       &err);
960
961                 if (err)
962                         osvw_status = osvw_len = 0;
963                 else {
964                         if (len < osvw_len)
965                                 osvw_len = len;
966                         osvw_status |= status;
967                         osvw_status &= (1ULL << osvw_len) - 1;
968                 }
969         } else
970                 osvw_status = osvw_len = 0;
971
972         svm_init_erratum_383();
973
974         amd_pmu_enable_virt();
975
976         return 0;
977 }
978
979 static void svm_cpu_uninit(int cpu)
980 {
981         struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
982
983         if (!sd)
984                 return;
985
986         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
987         kfree(sd->sev_vmcbs);
988         __free_page(sd->save_area);
989         kfree(sd);
990 }
991
992 static int svm_cpu_init(int cpu)
993 {
994         struct svm_cpu_data *sd;
995         int r;
996
997         sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
998         if (!sd)
999                 return -ENOMEM;
1000         sd->cpu = cpu;
1001         r = -ENOMEM;
1002         sd->save_area = alloc_page(GFP_KERNEL);
1003         if (!sd->save_area)
1004                 goto err_1;
1005
1006         if (svm_sev_enabled()) {
1007                 r = -ENOMEM;
1008                 sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
1009                                               sizeof(void *),
1010                                               GFP_KERNEL);
1011                 if (!sd->sev_vmcbs)
1012                         goto err_1;
1013         }
1014
1015         per_cpu(svm_data, cpu) = sd;
1016
1017         return 0;
1018
1019 err_1:
1020         kfree(sd);
1021         return r;
1022
1023 }
1024
1025 static bool valid_msr_intercept(u32 index)
1026 {
1027         int i;
1028
1029         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
1030                 if (direct_access_msrs[i].index == index)
1031                         return true;
1032
1033         return false;
1034 }
1035
1036 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
1037 {
1038         u8 bit_write;
1039         unsigned long tmp;
1040         u32 offset;
1041         u32 *msrpm;
1042
1043         msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
1044                                       to_svm(vcpu)->msrpm;
1045
1046         offset    = svm_msrpm_offset(msr);
1047         bit_write = 2 * (msr & 0x0f) + 1;
1048         tmp       = msrpm[offset];
1049
1050         BUG_ON(offset == MSR_INVALID);
1051
1052         return !!test_bit(bit_write,  &tmp);
1053 }
1054
1055 static void set_msr_interception(u32 *msrpm, unsigned msr,
1056                                  int read, int write)
1057 {
1058         u8 bit_read, bit_write;
1059         unsigned long tmp;
1060         u32 offset;
1061
1062         /*
1063          * If this warning triggers extend the direct_access_msrs list at the
1064          * beginning of the file
1065          */
1066         WARN_ON(!valid_msr_intercept(msr));
1067
1068         offset    = svm_msrpm_offset(msr);
1069         bit_read  = 2 * (msr & 0x0f);
1070         bit_write = 2 * (msr & 0x0f) + 1;
1071         tmp       = msrpm[offset];
1072
1073         BUG_ON(offset == MSR_INVALID);
1074
1075         read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
1076         write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
1077
1078         msrpm[offset] = tmp;
1079 }
1080
1081 static void svm_vcpu_init_msrpm(u32 *msrpm)
1082 {
1083         int i;
1084
1085         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
1086
1087         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1088                 if (!direct_access_msrs[i].always)
1089                         continue;
1090
1091                 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
1092         }
1093 }
1094
1095 static void add_msr_offset(u32 offset)
1096 {
1097         int i;
1098
1099         for (i = 0; i < MSRPM_OFFSETS; ++i) {
1100
1101                 /* Offset already in list? */
1102                 if (msrpm_offsets[i] == offset)
1103                         return;
1104
1105                 /* Slot used by another offset? */
1106                 if (msrpm_offsets[i] != MSR_INVALID)
1107                         continue;
1108
1109                 /* Add offset to list */
1110                 msrpm_offsets[i] = offset;
1111
1112                 return;
1113         }
1114
1115         /*
1116          * If this BUG triggers the msrpm_offsets table has an overflow. Just
1117          * increase MSRPM_OFFSETS in this case.
1118          */
1119         BUG();
1120 }
1121
1122 static void init_msrpm_offsets(void)
1123 {
1124         int i;
1125
1126         memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1127
1128         for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1129                 u32 offset;
1130
1131                 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1132                 BUG_ON(offset == MSR_INVALID);
1133
1134                 add_msr_offset(offset);
1135         }
1136 }
1137
1138 static void svm_enable_lbrv(struct vcpu_svm *svm)
1139 {
1140         u32 *msrpm = svm->msrpm;
1141
1142         svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
1143         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1144         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1145         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1146         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1147 }
1148
1149 static void svm_disable_lbrv(struct vcpu_svm *svm)
1150 {
1151         u32 *msrpm = svm->msrpm;
1152
1153         svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
1154         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1155         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1156         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1157         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1158 }
1159
1160 static void disable_nmi_singlestep(struct vcpu_svm *svm)
1161 {
1162         svm->nmi_singlestep = false;
1163
1164         if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1165                 /* Clear our flags if they were not set by the guest */
1166                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1167                         svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1168                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1169                         svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1170         }
1171 }
1172
1173 /* Note:
1174  * This hash table is used to map VM_ID to a struct kvm_svm,
1175  * when handling AMD IOMMU GALOG notification to schedule in
1176  * a particular vCPU.
1177  */
1178 #define SVM_VM_DATA_HASH_BITS   8
1179 static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
1180 static u32 next_vm_id = 0;
1181 static bool next_vm_id_wrapped = 0;
1182 static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
1183
1184 /* Note:
1185  * This function is called from IOMMU driver to notify
1186  * SVM to schedule in a particular vCPU of a particular VM.
1187  */
1188 static int avic_ga_log_notifier(u32 ga_tag)
1189 {
1190         unsigned long flags;
1191         struct kvm_svm *kvm_svm;
1192         struct kvm_vcpu *vcpu = NULL;
1193         u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1194         u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1195
1196         pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1197
1198         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1199         hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
1200                 if (kvm_svm->avic_vm_id != vm_id)
1201                         continue;
1202                 vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
1203                 break;
1204         }
1205         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1206
1207         /* Note:
1208          * At this point, the IOMMU should have already set the pending
1209          * bit in the vAPIC backing page. So, we just need to schedule
1210          * in the vcpu.
1211          */
1212         if (vcpu)
1213                 kvm_vcpu_wake_up(vcpu);
1214
1215         return 0;
1216 }
1217
1218 static __init int sev_hardware_setup(void)
1219 {
1220         struct sev_user_data_status *status;
1221         int rc;
1222
1223         /* Maximum number of encrypted guests supported simultaneously */
1224         max_sev_asid = cpuid_ecx(0x8000001F);
1225
1226         if (!max_sev_asid)
1227                 return 1;
1228
1229         /* Minimum ASID value that should be used for SEV guest */
1230         min_sev_asid = cpuid_edx(0x8000001F);
1231
1232         /* Initialize SEV ASID bitmap */
1233         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1234         if (!sev_asid_bitmap)
1235                 return 1;
1236
1237         status = kmalloc(sizeof(*status), GFP_KERNEL);
1238         if (!status)
1239                 return 1;
1240
1241         /*
1242          * Check SEV platform status.
1243          *
1244          * PLATFORM_STATUS can be called in any state, if we failed to query
1245          * the PLATFORM status then either PSP firmware does not support SEV
1246          * feature or SEV firmware is dead.
1247          */
1248         rc = sev_platform_status(status, NULL);
1249         if (rc)
1250                 goto err;
1251
1252         pr_info("SEV supported\n");
1253
1254 err:
1255         kfree(status);
1256         return rc;
1257 }
1258
1259 static void grow_ple_window(struct kvm_vcpu *vcpu)
1260 {
1261         struct vcpu_svm *svm = to_svm(vcpu);
1262         struct vmcb_control_area *control = &svm->vmcb->control;
1263         int old = control->pause_filter_count;
1264
1265         control->pause_filter_count = __grow_ple_window(old,
1266                                                         pause_filter_count,
1267                                                         pause_filter_count_grow,
1268                                                         pause_filter_count_max);
1269
1270         if (control->pause_filter_count != old) {
1271                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1272                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1273                                             control->pause_filter_count, old);
1274         }
1275 }
1276
1277 static void shrink_ple_window(struct kvm_vcpu *vcpu)
1278 {
1279         struct vcpu_svm *svm = to_svm(vcpu);
1280         struct vmcb_control_area *control = &svm->vmcb->control;
1281         int old = control->pause_filter_count;
1282
1283         control->pause_filter_count =
1284                                 __shrink_ple_window(old,
1285                                                     pause_filter_count,
1286                                                     pause_filter_count_shrink,
1287                                                     pause_filter_count);
1288         if (control->pause_filter_count != old) {
1289                 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1290                 trace_kvm_ple_window_update(vcpu->vcpu_id,
1291                                             control->pause_filter_count, old);
1292         }
1293 }
1294
1295 static __init int svm_hardware_setup(void)
1296 {
1297         int cpu;
1298         struct page *iopm_pages;
1299         void *iopm_va;
1300         int r;
1301
1302         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1303
1304         if (!iopm_pages)
1305                 return -ENOMEM;
1306
1307         iopm_va = page_address(iopm_pages);
1308         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
1309         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1310
1311         init_msrpm_offsets();
1312
1313         if (boot_cpu_has(X86_FEATURE_NX))
1314                 kvm_enable_efer_bits(EFER_NX);
1315
1316         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1317                 kvm_enable_efer_bits(EFER_FFXSR);
1318
1319         if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
1320                 kvm_has_tsc_control = true;
1321                 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1322                 kvm_tsc_scaling_ratio_frac_bits = 32;
1323         }
1324
1325         /* Check for pause filtering support */
1326         if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
1327                 pause_filter_count = 0;
1328                 pause_filter_thresh = 0;
1329         } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
1330                 pause_filter_thresh = 0;
1331         }
1332
1333         if (nested) {
1334                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
1335                 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
1336         }
1337
1338         if (sev) {
1339                 if (boot_cpu_has(X86_FEATURE_SEV) &&
1340                     IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1341                         r = sev_hardware_setup();
1342                         if (r)
1343                                 sev = false;
1344                 } else {
1345                         sev = false;
1346                 }
1347         }
1348
1349         for_each_possible_cpu(cpu) {
1350                 r = svm_cpu_init(cpu);
1351                 if (r)
1352                         goto err;
1353         }
1354
1355         if (!boot_cpu_has(X86_FEATURE_NPT))
1356                 npt_enabled = false;
1357
1358         if (npt_enabled && !npt) {
1359                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1360                 npt_enabled = false;
1361         }
1362
1363         if (npt_enabled) {
1364                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
1365                 kvm_enable_tdp();
1366         } else
1367                 kvm_disable_tdp();
1368
1369         if (nrips) {
1370                 if (!boot_cpu_has(X86_FEATURE_NRIPS))
1371                         nrips = false;
1372         }
1373
1374         if (avic) {
1375                 if (!npt_enabled ||
1376                     !boot_cpu_has(X86_FEATURE_AVIC) ||
1377                     !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
1378                         avic = false;
1379                 } else {
1380                         pr_info("AVIC enabled\n");
1381
1382                         amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1383                 }
1384         }
1385
1386         if (vls) {
1387                 if (!npt_enabled ||
1388                     !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
1389                     !IS_ENABLED(CONFIG_X86_64)) {
1390                         vls = false;
1391                 } else {
1392                         pr_info("Virtual VMLOAD VMSAVE supported\n");
1393                 }
1394         }
1395
1396         if (vgif) {
1397                 if (!boot_cpu_has(X86_FEATURE_VGIF))
1398                         vgif = false;
1399                 else
1400                         pr_info("Virtual GIF supported\n");
1401         }
1402
1403         return 0;
1404
1405 err:
1406         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1407         iopm_base = 0;
1408         return r;
1409 }
1410
1411 static __exit void svm_hardware_unsetup(void)
1412 {
1413         int cpu;
1414
1415         if (svm_sev_enabled())
1416                 bitmap_free(sev_asid_bitmap);
1417
1418         for_each_possible_cpu(cpu)
1419                 svm_cpu_uninit(cpu);
1420
1421         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
1422         iopm_base = 0;
1423 }
1424
1425 static void init_seg(struct vmcb_seg *seg)
1426 {
1427         seg->selector = 0;
1428         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
1429                       SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
1430         seg->limit = 0xffff;
1431         seg->base = 0;
1432 }
1433
1434 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1435 {
1436         seg->selector = 0;
1437         seg->attrib = SVM_SELECTOR_P_MASK | type;
1438         seg->limit = 0xffff;
1439         seg->base = 0;
1440 }
1441
1442 static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
1443 {
1444         struct vcpu_svm *svm = to_svm(vcpu);
1445
1446         if (is_guest_mode(vcpu))
1447                 return svm->nested.hsave->control.tsc_offset;
1448
1449         return vcpu->arch.tsc_offset;
1450 }
1451
1452 static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1453 {
1454         struct vcpu_svm *svm = to_svm(vcpu);
1455         u64 g_tsc_offset = 0;
1456
1457         if (is_guest_mode(vcpu)) {
1458                 /* Write L1's TSC offset.  */
1459                 g_tsc_offset = svm->vmcb->control.tsc_offset -
1460                                svm->nested.hsave->control.tsc_offset;
1461                 svm->nested.hsave->control.tsc_offset = offset;
1462         }
1463
1464         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1465                                    svm->vmcb->control.tsc_offset - g_tsc_offset,
1466                                    offset);
1467
1468         svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
1469
1470         mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1471         return svm->vmcb->control.tsc_offset;
1472 }
1473
1474 static void avic_init_vmcb(struct vcpu_svm *svm)
1475 {
1476         struct vmcb *vmcb = svm->vmcb;
1477         struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
1478         phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1479         phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
1480         phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
1481
1482         vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1483         vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1484         vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1485         vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1486         vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
1487 }
1488
1489 static void init_vmcb(struct vcpu_svm *svm)
1490 {
1491         struct vmcb_control_area *control = &svm->vmcb->control;
1492         struct vmcb_save_area *save = &svm->vmcb->save;
1493
1494         svm->vcpu.arch.hflags = 0;
1495
1496         set_cr_intercept(svm, INTERCEPT_CR0_READ);
1497         set_cr_intercept(svm, INTERCEPT_CR3_READ);
1498         set_cr_intercept(svm, INTERCEPT_CR4_READ);
1499         set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1500         set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1501         set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
1502         if (!kvm_vcpu_apicv_active(&svm->vcpu))
1503                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
1504
1505         set_dr_intercepts(svm);
1506
1507         set_exception_intercept(svm, PF_VECTOR);
1508         set_exception_intercept(svm, UD_VECTOR);
1509         set_exception_intercept(svm, MC_VECTOR);
1510         set_exception_intercept(svm, AC_VECTOR);
1511         set_exception_intercept(svm, DB_VECTOR);
1512         /*
1513          * Guest access to VMware backdoor ports could legitimately
1514          * trigger #GP because of TSS I/O permission bitmap.
1515          * We intercept those #GP and allow access to them anyway
1516          * as VMware does.
1517          */
1518         if (enable_vmware_backdoor)
1519                 set_exception_intercept(svm, GP_VECTOR);
1520
1521         set_intercept(svm, INTERCEPT_INTR);
1522         set_intercept(svm, INTERCEPT_NMI);
1523         set_intercept(svm, INTERCEPT_SMI);
1524         set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
1525         set_intercept(svm, INTERCEPT_RDPMC);
1526         set_intercept(svm, INTERCEPT_CPUID);
1527         set_intercept(svm, INTERCEPT_INVD);
1528         set_intercept(svm, INTERCEPT_INVLPG);
1529         set_intercept(svm, INTERCEPT_INVLPGA);
1530         set_intercept(svm, INTERCEPT_IOIO_PROT);
1531         set_intercept(svm, INTERCEPT_MSR_PROT);
1532         set_intercept(svm, INTERCEPT_TASK_SWITCH);
1533         set_intercept(svm, INTERCEPT_SHUTDOWN);
1534         set_intercept(svm, INTERCEPT_VMRUN);
1535         set_intercept(svm, INTERCEPT_VMMCALL);
1536         set_intercept(svm, INTERCEPT_VMLOAD);
1537         set_intercept(svm, INTERCEPT_VMSAVE);
1538         set_intercept(svm, INTERCEPT_STGI);
1539         set_intercept(svm, INTERCEPT_CLGI);
1540         set_intercept(svm, INTERCEPT_SKINIT);
1541         set_intercept(svm, INTERCEPT_WBINVD);
1542         set_intercept(svm, INTERCEPT_XSETBV);
1543         set_intercept(svm, INTERCEPT_RDPRU);
1544         set_intercept(svm, INTERCEPT_RSM);
1545
1546         if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1547                 set_intercept(svm, INTERCEPT_MONITOR);
1548                 set_intercept(svm, INTERCEPT_MWAIT);
1549         }
1550
1551         if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1552                 set_intercept(svm, INTERCEPT_HLT);
1553
1554         control->iopm_base_pa = __sme_set(iopm_base);
1555         control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
1556         control->int_ctl = V_INTR_MASKING_MASK;
1557
1558         init_seg(&save->es);
1559         init_seg(&save->ss);
1560         init_seg(&save->ds);
1561         init_seg(&save->fs);
1562         init_seg(&save->gs);
1563
1564         save->cs.selector = 0xf000;
1565         save->cs.base = 0xffff0000;
1566         /* Executable/Readable Code Segment */
1567         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1568                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1569         save->cs.limit = 0xffff;
1570
1571         save->gdtr.limit = 0xffff;
1572         save->idtr.limit = 0xffff;
1573
1574         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1575         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1576
1577         svm_set_efer(&svm->vcpu, 0);
1578         save->dr6 = 0xffff0ff0;
1579         kvm_set_rflags(&svm->vcpu, 2);
1580         save->rip = 0x0000fff0;
1581         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
1582
1583         /*
1584          * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1585          * It also updates the guest-visible cr0 value.
1586          */
1587         svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1588         kvm_mmu_reset_context(&svm->vcpu);
1589
1590         save->cr4 = X86_CR4_PAE;
1591         /* rdx = ?? */
1592
1593         if (npt_enabled) {
1594                 /* Setup VMCB for Nested Paging */
1595                 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1596                 clr_intercept(svm, INTERCEPT_INVLPG);
1597                 clr_exception_intercept(svm, PF_VECTOR);
1598                 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1599                 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1600                 save->g_pat = svm->vcpu.arch.pat;
1601                 save->cr3 = 0;
1602                 save->cr4 = 0;
1603         }
1604         svm->asid_generation = 0;
1605
1606         svm->nested.vmcb = 0;
1607         svm->vcpu.arch.hflags = 0;
1608
1609         if (pause_filter_count) {
1610                 control->pause_filter_count = pause_filter_count;
1611                 if (pause_filter_thresh)
1612                         control->pause_filter_thresh = pause_filter_thresh;
1613                 set_intercept(svm, INTERCEPT_PAUSE);
1614         } else {
1615                 clr_intercept(svm, INTERCEPT_PAUSE);
1616         }
1617
1618         if (kvm_vcpu_apicv_active(&svm->vcpu))
1619                 avic_init_vmcb(svm);
1620
1621         /*
1622          * If hardware supports Virtual VMLOAD VMSAVE then enable it
1623          * in VMCB and clear intercepts to avoid #VMEXIT.
1624          */
1625         if (vls) {
1626                 clr_intercept(svm, INTERCEPT_VMLOAD);
1627                 clr_intercept(svm, INTERCEPT_VMSAVE);
1628                 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1629         }
1630
1631         if (vgif) {
1632                 clr_intercept(svm, INTERCEPT_STGI);
1633                 clr_intercept(svm, INTERCEPT_CLGI);
1634                 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1635         }
1636
1637         if (sev_guest(svm->vcpu.kvm)) {
1638                 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1639                 clr_exception_intercept(svm, UD_VECTOR);
1640         }
1641
1642         mark_all_dirty(svm->vmcb);
1643
1644         enable_gif(svm);
1645
1646 }
1647
1648 static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1649                                        unsigned int index)
1650 {
1651         u64 *avic_physical_id_table;
1652         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
1653
1654         if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1655                 return NULL;
1656
1657         avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
1658
1659         return &avic_physical_id_table[index];
1660 }
1661
1662 /**
1663  * Note:
1664  * AVIC hardware walks the nested page table to check permissions,
1665  * but does not use the SPA address specified in the leaf page
1666  * table entry since it uses  address in the AVIC_BACKING_PAGE pointer
1667  * field of the VMCB. Therefore, we set up the
1668  * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1669  */
1670 static int avic_init_access_page(struct kvm_vcpu *vcpu)
1671 {
1672         struct kvm *kvm = vcpu->kvm;
1673         int ret = 0;
1674
1675         mutex_lock(&kvm->slots_lock);
1676         if (kvm->arch.apic_access_page_done)
1677                 goto out;
1678
1679         ret = __x86_set_memory_region(kvm,
1680                                       APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1681                                       APIC_DEFAULT_PHYS_BASE,
1682                                       PAGE_SIZE);
1683         if (ret)
1684                 goto out;
1685
1686         kvm->arch.apic_access_page_done = true;
1687 out:
1688         mutex_unlock(&kvm->slots_lock);
1689         return ret;
1690 }
1691
1692 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1693 {
1694         int ret;
1695         u64 *entry, new_entry;
1696         int id = vcpu->vcpu_id;
1697         struct vcpu_svm *svm = to_svm(vcpu);
1698
1699         ret = avic_init_access_page(vcpu);
1700         if (ret)
1701                 return ret;
1702
1703         if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1704                 return -EINVAL;
1705
1706         if (!svm->vcpu.arch.apic->regs)
1707                 return -EINVAL;
1708
1709         svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1710
1711         /* Setting AVIC backing page address in the phy APIC ID table */
1712         entry = avic_get_physical_id_entry(vcpu, id);
1713         if (!entry)
1714                 return -EINVAL;
1715
1716         new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1717                               AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1718                               AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
1719         WRITE_ONCE(*entry, new_entry);
1720
1721         svm->avic_physical_id_cache = entry;
1722
1723         return 0;
1724 }
1725
1726 static void __sev_asid_free(int asid)
1727 {
1728         struct svm_cpu_data *sd;
1729         int cpu, pos;
1730
1731         pos = asid - 1;
1732         clear_bit(pos, sev_asid_bitmap);
1733
1734         for_each_possible_cpu(cpu) {
1735                 sd = per_cpu(svm_data, cpu);
1736                 sd->sev_vmcbs[pos] = NULL;
1737         }
1738 }
1739
1740 static void sev_asid_free(struct kvm *kvm)
1741 {
1742         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1743
1744         __sev_asid_free(sev->asid);
1745 }
1746
1747 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1748 {
1749         struct sev_data_decommission *decommission;
1750         struct sev_data_deactivate *data;
1751
1752         if (!handle)
1753                 return;
1754
1755         data = kzalloc(sizeof(*data), GFP_KERNEL);
1756         if (!data)
1757                 return;
1758
1759         /* deactivate handle */
1760         data->handle = handle;
1761         sev_guest_deactivate(data, NULL);
1762
1763         wbinvd_on_all_cpus();
1764         sev_guest_df_flush(NULL);
1765         kfree(data);
1766
1767         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1768         if (!decommission)
1769                 return;
1770
1771         /* decommission handle */
1772         decommission->handle = handle;
1773         sev_guest_decommission(decommission, NULL);
1774
1775         kfree(decommission);
1776 }
1777
1778 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1779                                     unsigned long ulen, unsigned long *n,
1780                                     int write)
1781 {
1782         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1783         unsigned long npages, npinned, size;
1784         unsigned long locked, lock_limit;
1785         struct page **pages;
1786         unsigned long first, last;
1787
1788         if (ulen == 0 || uaddr + ulen < uaddr)
1789                 return NULL;
1790
1791         /* Calculate number of pages. */
1792         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1793         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1794         npages = (last - first + 1);
1795
1796         locked = sev->pages_locked + npages;
1797         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1798         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1799                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1800                 return NULL;
1801         }
1802
1803         /* Avoid using vmalloc for smaller buffers. */
1804         size = npages * sizeof(struct page *);
1805         if (size > PAGE_SIZE)
1806                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1807                                   PAGE_KERNEL);
1808         else
1809                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
1810
1811         if (!pages)
1812                 return NULL;
1813
1814         /* Pin the user virtual address. */
1815         npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
1816         if (npinned != npages) {
1817                 pr_err("SEV: Failure locking %lu pages.\n", npages);
1818                 goto err;
1819         }
1820
1821         *n = npages;
1822         sev->pages_locked = locked;
1823
1824         return pages;
1825
1826 err:
1827         if (npinned > 0)
1828                 release_pages(pages, npinned);
1829
1830         kvfree(pages);
1831         return NULL;
1832 }
1833
1834 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1835                              unsigned long npages)
1836 {
1837         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1838
1839         release_pages(pages, npages);
1840         kvfree(pages);
1841         sev->pages_locked -= npages;
1842 }
1843
1844 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1845 {
1846         uint8_t *page_virtual;
1847         unsigned long i;
1848
1849         if (npages == 0 || pages == NULL)
1850                 return;
1851
1852         for (i = 0; i < npages; i++) {
1853                 page_virtual = kmap_atomic(pages[i]);
1854                 clflush_cache_range(page_virtual, PAGE_SIZE);
1855                 kunmap_atomic(page_virtual);
1856         }
1857 }
1858
1859 static void __unregister_enc_region_locked(struct kvm *kvm,
1860                                            struct enc_region *region)
1861 {
1862         /*
1863          * The guest may change the memory encryption attribute from C=0 -> C=1
1864          * or vice versa for this memory range. Lets make sure caches are
1865          * flushed to ensure that guest data gets written into memory with
1866          * correct C-bit.
1867          */
1868         sev_clflush_pages(region->pages, region->npages);
1869
1870         sev_unpin_memory(kvm, region->pages, region->npages);
1871         list_del(&region->list);
1872         kfree(region);
1873 }
1874
1875 static struct kvm *svm_vm_alloc(void)
1876 {
1877         struct kvm_svm *kvm_svm = __vmalloc(sizeof(struct kvm_svm),
1878                                             GFP_KERNEL_ACCOUNT | __GFP_ZERO,
1879                                             PAGE_KERNEL);
1880         return &kvm_svm->kvm;
1881 }
1882
1883 static void svm_vm_free(struct kvm *kvm)
1884 {
1885         vfree(to_kvm_svm(kvm));
1886 }
1887
1888 static void sev_vm_destroy(struct kvm *kvm)
1889 {
1890         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1891         struct list_head *head = &sev->regions_list;
1892         struct list_head *pos, *q;
1893
1894         if (!sev_guest(kvm))
1895                 return;
1896
1897         mutex_lock(&kvm->lock);
1898
1899         /*
1900          * if userspace was terminated before unregistering the memory regions
1901          * then lets unpin all the registered memory.
1902          */
1903         if (!list_empty(head)) {
1904                 list_for_each_safe(pos, q, head) {
1905                         __unregister_enc_region_locked(kvm,
1906                                 list_entry(pos, struct enc_region, list));
1907                 }
1908         }
1909
1910         mutex_unlock(&kvm->lock);
1911
1912         sev_unbind_asid(kvm, sev->handle);
1913         sev_asid_free(kvm);
1914 }
1915
1916 static void avic_vm_destroy(struct kvm *kvm)
1917 {
1918         unsigned long flags;
1919         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1920
1921         if (!avic)
1922                 return;
1923
1924         if (kvm_svm->avic_logical_id_table_page)
1925                 __free_page(kvm_svm->avic_logical_id_table_page);
1926         if (kvm_svm->avic_physical_id_table_page)
1927                 __free_page(kvm_svm->avic_physical_id_table_page);
1928
1929         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1930         hash_del(&kvm_svm->hnode);
1931         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1932 }
1933
1934 static void svm_vm_destroy(struct kvm *kvm)
1935 {
1936         avic_vm_destroy(kvm);
1937         sev_vm_destroy(kvm);
1938 }
1939
1940 static int avic_vm_init(struct kvm *kvm)
1941 {
1942         unsigned long flags;
1943         int err = -ENOMEM;
1944         struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
1945         struct kvm_svm *k2;
1946         struct page *p_page;
1947         struct page *l_page;
1948         u32 vm_id;
1949
1950         if (!avic)
1951                 return 0;
1952
1953         /* Allocating physical APIC ID table (4KB) */
1954         p_page = alloc_page(GFP_KERNEL_ACCOUNT);
1955         if (!p_page)
1956                 goto free_avic;
1957
1958         kvm_svm->avic_physical_id_table_page = p_page;
1959         clear_page(page_address(p_page));
1960
1961         /* Allocating logical APIC ID table (4KB) */
1962         l_page = alloc_page(GFP_KERNEL_ACCOUNT);
1963         if (!l_page)
1964                 goto free_avic;
1965
1966         kvm_svm->avic_logical_id_table_page = l_page;
1967         clear_page(page_address(l_page));
1968
1969         spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1970  again:
1971         vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1972         if (vm_id == 0) { /* id is 1-based, zero is not okay */
1973                 next_vm_id_wrapped = 1;
1974                 goto again;
1975         }
1976         /* Is it still in use? Only possible if wrapped at least once */
1977         if (next_vm_id_wrapped) {
1978                 hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
1979                         if (k2->avic_vm_id == vm_id)
1980                                 goto again;
1981                 }
1982         }
1983         kvm_svm->avic_vm_id = vm_id;
1984         hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
1985         spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1986
1987         return 0;
1988
1989 free_avic:
1990         avic_vm_destroy(kvm);
1991         return err;
1992 }
1993
1994 static inline int
1995 avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
1996 {
1997         int ret = 0;
1998         unsigned long flags;
1999         struct amd_svm_iommu_ir *ir;
2000         struct vcpu_svm *svm = to_svm(vcpu);
2001
2002         if (!kvm_arch_has_assigned_device(vcpu->kvm))
2003                 return 0;
2004
2005         /*
2006          * Here, we go through the per-vcpu ir_list to update all existing
2007          * interrupt remapping table entry targeting this vcpu.
2008          */
2009         spin_lock_irqsave(&svm->ir_list_lock, flags);
2010
2011         if (list_empty(&svm->ir_list))
2012                 goto out;
2013
2014         list_for_each_entry(ir, &svm->ir_list, node) {
2015                 ret = amd_iommu_update_ga(cpu, r, ir->data);
2016                 if (ret)
2017                         break;
2018         }
2019 out:
2020         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
2021         return ret;
2022 }
2023
2024 static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2025 {
2026         u64 entry;
2027         /* ID = 0xff (broadcast), ID > 0xff (reserved) */
2028         int h_physical_id = kvm_cpu_get_apicid(cpu);
2029         struct vcpu_svm *svm = to_svm(vcpu);
2030
2031         if (!kvm_vcpu_apicv_active(vcpu))
2032                 return;
2033
2034         /*
2035          * Since the host physical APIC id is 8 bits,
2036          * we can support host APIC ID upto 255.
2037          */
2038         if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
2039                 return;
2040
2041         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2042         WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
2043
2044         entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
2045         entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
2046
2047         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2048         if (svm->avic_is_running)
2049                 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2050
2051         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2052         avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
2053                                         svm->avic_is_running);
2054 }
2055
2056 static void avic_vcpu_put(struct kvm_vcpu *vcpu)
2057 {
2058         u64 entry;
2059         struct vcpu_svm *svm = to_svm(vcpu);
2060
2061         if (!kvm_vcpu_apicv_active(vcpu))
2062                 return;
2063
2064         entry = READ_ONCE(*(svm->avic_physical_id_cache));
2065         if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
2066                 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
2067
2068         entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
2069         WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
2070 }
2071
2072 /**
2073  * This function is called during VCPU halt/unhalt.
2074  */
2075 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
2076 {
2077         struct vcpu_svm *svm = to_svm(vcpu);
2078
2079         svm->avic_is_running = is_run;
2080         if (is_run)
2081                 avic_vcpu_load(vcpu, vcpu->cpu);
2082         else
2083                 avic_vcpu_put(vcpu);
2084 }
2085
2086 static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
2087 {
2088         struct vcpu_svm *svm = to_svm(vcpu);
2089         u32 dummy;
2090         u32 eax = 1;
2091
2092         vcpu->arch.microcode_version = 0x01000065;
2093         svm->spec_ctrl = 0;
2094         svm->virt_spec_ctrl = 0;
2095
2096         if (!init_event) {
2097                 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
2098                                            MSR_IA32_APICBASE_ENABLE;
2099                 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
2100                         svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
2101         }
2102         init_vmcb(svm);
2103
2104         kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
2105         kvm_rdx_write(vcpu, eax);
2106
2107         if (kvm_vcpu_apicv_active(vcpu) && !init_event)
2108                 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
2109 }
2110
2111 static int avic_init_vcpu(struct vcpu_svm *svm)
2112 {
2113         int ret;
2114
2115         if (!kvm_vcpu_apicv_active(&svm->vcpu))
2116                 return 0;
2117
2118         ret = avic_init_backing_page(&svm->vcpu);
2119         if (ret)
2120                 return ret;
2121
2122         INIT_LIST_HEAD(&svm->ir_list);
2123         spin_lock_init(&svm->ir_list_lock);
2124         svm->dfr_reg = APIC_DFR_FLAT;
2125
2126         return ret;
2127 }
2128
2129 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
2130 {
2131         struct vcpu_svm *svm;
2132         struct page *page;
2133         struct page *msrpm_pages;
2134         struct page *hsave_page;
2135         struct page *nested_msrpm_pages;
2136         int err;
2137
2138         BUILD_BUG_ON_MSG(offsetof(struct vcpu_svm, vcpu) != 0,
2139                 "struct kvm_vcpu must be at offset 0 for arch usercopy region");
2140
2141         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
2142         if (!svm) {
2143                 err = -ENOMEM;
2144                 goto out;
2145         }
2146
2147         svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
2148                                                      GFP_KERNEL_ACCOUNT);
2149         if (!svm->vcpu.arch.user_fpu) {
2150                 printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
2151                 err = -ENOMEM;
2152                 goto free_partial_svm;
2153         }
2154
2155         svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
2156                                                      GFP_KERNEL_ACCOUNT);
2157         if (!svm->vcpu.arch.guest_fpu) {
2158                 printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
2159                 err = -ENOMEM;
2160                 goto free_user_fpu;
2161         }
2162
2163         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
2164         if (err)
2165                 goto free_svm;
2166
2167         err = -ENOMEM;
2168         page = alloc_page(GFP_KERNEL_ACCOUNT);
2169         if (!page)
2170                 goto uninit;
2171
2172         msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2173         if (!msrpm_pages)
2174                 goto free_page1;
2175
2176         nested_msrpm_pages = alloc_pages(GFP_KERNEL_ACCOUNT, MSRPM_ALLOC_ORDER);
2177         if (!nested_msrpm_pages)
2178                 goto free_page2;
2179
2180         hsave_page = alloc_page(GFP_KERNEL_ACCOUNT);
2181         if (!hsave_page)
2182                 goto free_page3;
2183
2184         err = avic_init_vcpu(svm);
2185         if (err)
2186                 goto free_page4;
2187
2188         /* We initialize this flag to true to make sure that the is_running
2189          * bit would be set the first time the vcpu is loaded.
2190          */
2191         svm->avic_is_running = true;
2192
2193         svm->nested.hsave = page_address(hsave_page);
2194
2195         svm->msrpm = page_address(msrpm_pages);
2196         svm_vcpu_init_msrpm(svm->msrpm);
2197
2198         svm->nested.msrpm = page_address(nested_msrpm_pages);
2199         svm_vcpu_init_msrpm(svm->nested.msrpm);
2200
2201         svm->vmcb = page_address(page);
2202         clear_page(svm->vmcb);
2203         svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
2204         svm->asid_generation = 0;
2205         init_vmcb(svm);
2206
2207         svm_init_osvw(&svm->vcpu);
2208
2209         return &svm->vcpu;
2210
2211 free_page4:
2212         __free_page(hsave_page);
2213 free_page3:
2214         __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
2215 free_page2:
2216         __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
2217 free_page1:
2218         __free_page(page);
2219 uninit:
2220         kvm_vcpu_uninit(&svm->vcpu);
2221 free_svm:
2222         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2223 free_user_fpu:
2224         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2225 free_partial_svm:
2226         kmem_cache_free(kvm_vcpu_cache, svm);
2227 out:
2228         return ERR_PTR(err);
2229 }
2230
2231 static void svm_clear_current_vmcb(struct vmcb *vmcb)
2232 {
2233         int i;
2234
2235         for_each_online_cpu(i)
2236                 cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL);
2237 }
2238
2239 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2240 {
2241         struct vcpu_svm *svm = to_svm(vcpu);
2242
2243         /*
2244          * The vmcb page can be recycled, causing a false negative in
2245          * svm_vcpu_load(). So, ensure that no logical CPU has this
2246          * vmcb page recorded as its current vmcb.
2247          */
2248         svm_clear_current_vmcb(svm->vmcb);
2249
2250         __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
2251         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
2252         __free_page(virt_to_page(svm->nested.hsave));
2253         __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
2254         kvm_vcpu_uninit(vcpu);
2255         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
2256         kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
2257         kmem_cache_free(kvm_vcpu_cache, svm);
2258 }
2259
2260 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2261 {
2262         struct vcpu_svm *svm = to_svm(vcpu);
2263         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2264         int i;
2265
2266         if (unlikely(cpu != vcpu->cpu)) {
2267                 svm->asid_generation = 0;
2268                 mark_all_dirty(svm->vmcb);
2269         }
2270
2271 #ifdef CONFIG_X86_64
2272         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2273 #endif
2274         savesegment(fs, svm->host.fs);
2275         savesegment(gs, svm->host.gs);
2276         svm->host.ldt = kvm_read_ldt();
2277
2278         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2279                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2280
2281         if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2282                 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2283                 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2284                         __this_cpu_write(current_tsc_ratio, tsc_ratio);
2285                         wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2286                 }
2287         }
2288         /* This assumes that the kernel never uses MSR_TSC_AUX */
2289         if (static_cpu_has(X86_FEATURE_RDTSCP))
2290                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
2291
2292         if (sd->current_vmcb != svm->vmcb) {
2293                 sd->current_vmcb = svm->vmcb;
2294                 indirect_branch_prediction_barrier();
2295         }
2296         avic_vcpu_load(vcpu, cpu);
2297 }
2298
2299 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2300 {
2301         struct vcpu_svm *svm = to_svm(vcpu);
2302         int i;
2303
2304         avic_vcpu_put(vcpu);
2305
2306         ++vcpu->stat.host_state_reload;
2307         kvm_load_ldt(svm->host.ldt);
2308 #ifdef CONFIG_X86_64
2309         loadsegment(fs, svm->host.fs);
2310         wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
2311         load_gs_index(svm->host.gs);
2312 #else
2313 #ifdef CONFIG_X86_32_LAZY_GS
2314         loadsegment(gs, svm->host.gs);
2315 #endif
2316 #endif
2317         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
2318                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
2319 }
2320
2321 static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2322 {
2323         avic_set_running(vcpu, false);
2324 }
2325
2326 static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2327 {
2328         avic_set_running(vcpu, true);
2329 }
2330
2331 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2332 {
2333         struct vcpu_svm *svm = to_svm(vcpu);
2334         unsigned long rflags = svm->vmcb->save.rflags;
2335
2336         if (svm->nmi_singlestep) {
2337                 /* Hide our flags if they were not set by the guest */
2338                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2339                         rflags &= ~X86_EFLAGS_TF;
2340                 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2341                         rflags &= ~X86_EFLAGS_RF;
2342         }
2343         return rflags;
2344 }
2345
2346 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2347 {
2348         if (to_svm(vcpu)->nmi_singlestep)
2349                 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2350
2351        /*
2352         * Any change of EFLAGS.VM is accompanied by a reload of SS
2353         * (caused by either a task switch or an inter-privilege IRET),
2354         * so we do not need to update the CPL here.
2355         */
2356         to_svm(vcpu)->vmcb->save.rflags = rflags;
2357 }
2358
2359 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2360 {
2361         switch (reg) {
2362         case VCPU_EXREG_PDPTR:
2363                 BUG_ON(!npt_enabled);
2364                 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
2365                 break;
2366         default:
2367                 BUG();
2368         }
2369 }
2370
2371 static void svm_set_vintr(struct vcpu_svm *svm)
2372 {
2373         set_intercept(svm, INTERCEPT_VINTR);
2374 }
2375
2376 static void svm_clear_vintr(struct vcpu_svm *svm)
2377 {
2378         clr_intercept(svm, INTERCEPT_VINTR);
2379 }
2380
2381 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2382 {
2383         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2384
2385         switch (seg) {
2386         case VCPU_SREG_CS: return &save->cs;
2387         case VCPU_SREG_DS: return &save->ds;
2388         case VCPU_SREG_ES: return &save->es;
2389         case VCPU_SREG_FS: return &save->fs;
2390         case VCPU_SREG_GS: return &save->gs;
2391         case VCPU_SREG_SS: return &save->ss;
2392         case VCPU_SREG_TR: return &save->tr;
2393         case VCPU_SREG_LDTR: return &save->ldtr;
2394         }
2395         BUG();
2396         return NULL;
2397 }
2398
2399 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2400 {
2401         struct vmcb_seg *s = svm_seg(vcpu, seg);
2402
2403         return s->base;
2404 }
2405
2406 static void svm_get_segment(struct kvm_vcpu *vcpu,
2407                             struct kvm_segment *var, int seg)
2408 {
2409         struct vmcb_seg *s = svm_seg(vcpu, seg);
2410
2411         var->base = s->base;
2412         var->limit = s->limit;
2413         var->selector = s->selector;
2414         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2415         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2416         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2417         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2418         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2419         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2420         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
2421
2422         /*
2423          * AMD CPUs circa 2014 track the G bit for all segments except CS.
2424          * However, the SVM spec states that the G bit is not observed by the
2425          * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2426          * So let's synthesize a legal G bit for all segments, this helps
2427          * running KVM nested. It also helps cross-vendor migration, because
2428          * Intel's vmentry has a check on the 'G' bit.
2429          */
2430         var->g = s->limit > 0xfffff;
2431
2432         /*
2433          * AMD's VMCB does not have an explicit unusable field, so emulate it
2434          * for cross vendor migration purposes by "not present"
2435          */
2436         var->unusable = !var->present;
2437
2438         switch (seg) {
2439         case VCPU_SREG_TR:
2440                 /*
2441                  * Work around a bug where the busy flag in the tr selector
2442                  * isn't exposed
2443                  */
2444                 var->type |= 0x2;
2445                 break;
2446         case VCPU_SREG_DS:
2447         case VCPU_SREG_ES:
2448         case VCPU_SREG_FS:
2449         case VCPU_SREG_GS:
2450                 /*
2451                  * The accessed bit must always be set in the segment
2452                  * descriptor cache, although it can be cleared in the
2453                  * descriptor, the cached bit always remains at 1. Since
2454                  * Intel has a check on this, set it here to support
2455                  * cross-vendor migration.
2456                  */
2457                 if (!var->unusable)
2458                         var->type |= 0x1;
2459                 break;
2460         case VCPU_SREG_SS:
2461                 /*
2462                  * On AMD CPUs sometimes the DB bit in the segment
2463                  * descriptor is left as 1, although the whole segment has
2464                  * been made unusable. Clear it here to pass an Intel VMX
2465                  * entry check when cross vendor migrating.
2466                  */
2467                 if (var->unusable)
2468                         var->db = 0;
2469                 /* This is symmetric with svm_set_segment() */
2470                 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
2471                 break;
2472         }
2473 }
2474
2475 static int svm_get_cpl(struct kvm_vcpu *vcpu)
2476 {
2477         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2478
2479         return save->cpl;
2480 }
2481
2482 static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2483 {
2484         struct vcpu_svm *svm = to_svm(vcpu);
2485
2486         dt->size = svm->vmcb->save.idtr.limit;
2487         dt->address = svm->vmcb->save.idtr.base;
2488 }
2489
2490 static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2491 {
2492         struct vcpu_svm *svm = to_svm(vcpu);
2493
2494         svm->vmcb->save.idtr.limit = dt->size;
2495         svm->vmcb->save.idtr.base = dt->address ;
2496         mark_dirty(svm->vmcb, VMCB_DT);
2497 }
2498
2499 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2500 {
2501         struct vcpu_svm *svm = to_svm(vcpu);
2502
2503         dt->size = svm->vmcb->save.gdtr.limit;
2504         dt->address = svm->vmcb->save.gdtr.base;
2505 }
2506
2507 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
2508 {
2509         struct vcpu_svm *svm = to_svm(vcpu);
2510
2511         svm->vmcb->save.gdtr.limit = dt->size;
2512         svm->vmcb->save.gdtr.base = dt->address ;
2513         mark_dirty(svm->vmcb, VMCB_DT);
2514 }
2515
2516 static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2517 {
2518 }
2519
2520 static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2521 {
2522 }
2523
2524 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
2525 {
2526 }
2527
2528 static void update_cr0_intercept(struct vcpu_svm *svm)
2529 {
2530         ulong gcr0 = svm->vcpu.arch.cr0;
2531         u64 *hcr0 = &svm->vmcb->save.cr0;
2532
2533         *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2534                 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
2535
2536         mark_dirty(svm->vmcb, VMCB_CR);
2537
2538         if (gcr0 == *hcr0) {
2539                 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2540                 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2541         } else {
2542                 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2543                 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
2544         }
2545 }
2546
2547 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2548 {
2549         struct vcpu_svm *svm = to_svm(vcpu);
2550
2551 #ifdef CONFIG_X86_64
2552         if (vcpu->arch.efer & EFER_LME) {
2553                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
2554                         vcpu->arch.efer |= EFER_LMA;
2555                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
2556                 }
2557
2558                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
2559                         vcpu->arch.efer &= ~EFER_LMA;
2560                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
2561                 }
2562         }
2563 #endif
2564         vcpu->arch.cr0 = cr0;
2565
2566         if (!npt_enabled)
2567                 cr0 |= X86_CR0_PG | X86_CR0_WP;
2568
2569         /*
2570          * re-enable caching here because the QEMU bios
2571          * does not do it - this results in some delay at
2572          * reboot
2573          */
2574         if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2575                 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
2576         svm->vmcb->save.cr0 = cr0;
2577         mark_dirty(svm->vmcb, VMCB_CR);
2578         update_cr0_intercept(svm);
2579 }
2580
2581 static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
2582 {
2583         unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
2584         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2585
2586         if (cr4 & X86_CR4_VMXE)
2587                 return 1;
2588
2589         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
2590                 svm_flush_tlb(vcpu, true);
2591
2592         vcpu->arch.cr4 = cr4;
2593         if (!npt_enabled)
2594                 cr4 |= X86_CR4_PAE;
2595         cr4 |= host_cr4_mce;
2596         to_svm(vcpu)->vmcb->save.cr4 = cr4;
2597         mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
2598         return 0;
2599 }
2600
2601 static void svm_set_segment(struct kvm_vcpu *vcpu,
2602                             struct kvm_segment *var, int seg)
2603 {
2604         struct vcpu_svm *svm = to_svm(vcpu);
2605         struct vmcb_seg *s = svm_seg(vcpu, seg);
2606
2607         s->base = var->base;
2608         s->limit = var->limit;
2609         s->selector = var->selector;
2610         s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2611         s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2612         s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2613         s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2614         s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2615         s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2616         s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2617         s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
2618
2619         /*
2620          * This is always accurate, except if SYSRET returned to a segment
2621          * with SS.DPL != 3.  Intel does not have this quirk, and always
2622          * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2623          * would entail passing the CPL to userspace and back.
2624          */
2625         if (seg == VCPU_SREG_SS)
2626                 /* This is symmetric with svm_get_segment() */
2627                 svm->vmcb->save.cpl = (var->dpl & 3);
2628
2629         mark_dirty(svm->vmcb, VMCB_SEG);
2630 }
2631
2632 static void update_bp_intercept(struct kvm_vcpu *vcpu)
2633 {
2634         struct vcpu_svm *svm = to_svm(vcpu);
2635
2636         clr_exception_intercept(svm, BP_VECTOR);
2637
2638         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
2639                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2640                         set_exception_intercept(svm, BP_VECTOR);
2641         } else
2642                 vcpu->guest_debug = 0;
2643 }
2644
2645 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
2646 {
2647         if (sd->next_asid > sd->max_asid) {
2648                 ++sd->asid_generation;
2649                 sd->next_asid = sd->min_asid;
2650                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
2651         }
2652
2653         svm->asid_generation = sd->asid_generation;
2654         svm->vmcb->control.asid = sd->next_asid++;
2655
2656         mark_dirty(svm->vmcb, VMCB_ASID);
2657 }
2658
2659 static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2660 {
2661         return to_svm(vcpu)->vmcb->save.dr6;
2662 }
2663
2664 static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2665 {
2666         struct vcpu_svm *svm = to_svm(vcpu);
2667
2668         svm->vmcb->save.dr6 = value;
2669         mark_dirty(svm->vmcb, VMCB_DR);
2670 }
2671
2672 static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2673 {
2674         struct vcpu_svm *svm = to_svm(vcpu);
2675
2676         get_debugreg(vcpu->arch.db[0], 0);
2677         get_debugreg(vcpu->arch.db[1], 1);
2678         get_debugreg(vcpu->arch.db[2], 2);
2679         get_debugreg(vcpu->arch.db[3], 3);
2680         vcpu->arch.dr6 = svm_get_dr6(vcpu);
2681         vcpu->arch.dr7 = svm->vmcb->save.dr7;
2682
2683         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2684         set_dr_intercepts(svm);
2685 }
2686
2687 static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2688 {
2689         struct vcpu_svm *svm = to_svm(vcpu);
2690
2691         svm->vmcb->save.dr7 = value;
2692         mark_dirty(svm->vmcb, VMCB_DR);
2693 }
2694
2695 static int pf_interception(struct vcpu_svm *svm)
2696 {
2697         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2698         u64 error_code = svm->vmcb->control.exit_info_1;
2699
2700         return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2701                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2702                         svm->vmcb->control.insn_bytes : NULL,
2703                         svm->vmcb->control.insn_len);
2704 }
2705
2706 static int npf_interception(struct vcpu_svm *svm)
2707 {
2708         u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2709         u64 error_code = svm->vmcb->control.exit_info_1;
2710
2711         trace_kvm_page_fault(fault_address, error_code);
2712         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
2713                         static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2714                         svm->vmcb->control.insn_bytes : NULL,
2715                         svm->vmcb->control.insn_len);
2716 }
2717
2718 static int db_interception(struct vcpu_svm *svm)
2719 {
2720         struct kvm_run *kvm_run = svm->vcpu.run;
2721         struct kvm_vcpu *vcpu = &svm->vcpu;
2722
2723         if (!(svm->vcpu.guest_debug &
2724               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
2725                 !svm->nmi_singlestep) {
2726                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2727                 return 1;
2728         }
2729
2730         if (svm->nmi_singlestep) {
2731                 disable_nmi_singlestep(svm);
2732                 /* Make sure we check for pending NMIs upon entry */
2733                 kvm_make_request(KVM_REQ_EVENT, vcpu);
2734         }
2735
2736         if (svm->vcpu.guest_debug &
2737             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
2738                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2739                 kvm_run->debug.arch.pc =
2740                         svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2741                 kvm_run->debug.arch.exception = DB_VECTOR;
2742                 return 0;
2743         }
2744
2745         return 1;
2746 }
2747
2748 static int bp_interception(struct vcpu_svm *svm)
2749 {
2750         struct kvm_run *kvm_run = svm->vcpu.run;
2751
2752         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2753         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2754         kvm_run->debug.arch.exception = BP_VECTOR;
2755         return 0;
2756 }
2757
2758 static int ud_interception(struct vcpu_svm *svm)
2759 {
2760         return handle_ud(&svm->vcpu);
2761 }
2762
2763 static int ac_interception(struct vcpu_svm *svm)
2764 {
2765         kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2766         return 1;
2767 }
2768
2769 static int gp_interception(struct vcpu_svm *svm)
2770 {
2771         struct kvm_vcpu *vcpu = &svm->vcpu;
2772         u32 error_code = svm->vmcb->control.exit_info_1;
2773
2774         WARN_ON_ONCE(!enable_vmware_backdoor);
2775
2776         /*
2777          * VMware backdoor emulation on #GP interception only handles IN{S},
2778          * OUT{S}, and RDPMC, none of which generate a non-zero error code.
2779          */
2780         if (error_code) {
2781                 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2782                 return 1;
2783         }
2784         return kvm_emulate_instruction(vcpu, EMULTYPE_VMWARE_GP);
2785 }
2786
2787 static bool is_erratum_383(void)
2788 {
2789         int err, i;
2790         u64 value;
2791
2792         if (!erratum_383_found)
2793                 return false;
2794
2795         value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2796         if (err)
2797                 return false;
2798
2799         /* Bit 62 may or may not be set for this mce */
2800         value &= ~(1ULL << 62);
2801
2802         if (value != 0xb600000000010015ULL)
2803                 return false;
2804
2805         /* Clear MCi_STATUS registers */
2806         for (i = 0; i < 6; ++i)
2807                 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2808
2809         value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2810         if (!err) {
2811                 u32 low, high;
2812
2813                 value &= ~(1ULL << 2);
2814                 low    = lower_32_bits(value);
2815                 high   = upper_32_bits(value);
2816
2817                 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2818         }
2819
2820         /* Flush tlb to evict multi-match entries */
2821         __flush_tlb_all();
2822
2823         return true;
2824 }
2825
2826 static void svm_handle_mce(struct vcpu_svm *svm)
2827 {
2828         if (is_erratum_383()) {
2829                 /*
2830                  * Erratum 383 triggered. Guest state is corrupt so kill the
2831                  * guest.
2832                  */
2833                 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2834
2835                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
2836
2837                 return;
2838         }
2839
2840         /*
2841          * On an #MC intercept the MCE handler is not called automatically in
2842          * the host. So do it by hand here.
2843          */
2844         asm volatile (
2845                 "int $0x12\n");
2846         /* not sure if we ever come back to this point */
2847
2848         return;
2849 }
2850
2851 static int mc_interception(struct vcpu_svm *svm)
2852 {
2853         return 1;
2854 }
2855
2856 static int shutdown_interception(struct vcpu_svm *svm)
2857 {
2858         struct kvm_run *kvm_run = svm->vcpu.run;
2859
2860         /*
2861          * VMCB is undefined after a SHUTDOWN intercept
2862          * so reinitialize it.
2863          */
2864         clear_page(svm->vmcb);
2865         init_vmcb(svm);
2866
2867         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2868         return 0;
2869 }
2870
2871 static int io_interception(struct vcpu_svm *svm)
2872 {
2873         struct kvm_vcpu *vcpu = &svm->vcpu;
2874         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
2875         int size, in, string;
2876         unsigned port;
2877
2878         ++svm->vcpu.stat.io_exits;
2879         string = (io_info & SVM_IOIO_STR_MASK) != 0;
2880         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
2881         if (string)
2882                 return kvm_emulate_instruction(vcpu, 0);
2883
2884         port = io_info >> 16;
2885         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
2886         svm->next_rip = svm->vmcb->control.exit_info_2;
2887
2888         return kvm_fast_pio(&svm->vcpu, size, port, in);
2889 }
2890
2891 static int nmi_interception(struct vcpu_svm *svm)
2892 {
2893         return 1;
2894 }
2895
2896 static int intr_interception(struct vcpu_svm *svm)
2897 {
2898         ++svm->vcpu.stat.irq_exits;
2899         return 1;
2900 }
2901
2902 static int nop_on_interception(struct vcpu_svm *svm)
2903 {
2904         return 1;
2905 }
2906
2907 static int halt_interception(struct vcpu_svm *svm)
2908 {
2909         return kvm_emulate_halt(&svm->vcpu);
2910 }
2911
2912 static int vmmcall_interception(struct vcpu_svm *svm)
2913 {
2914         return kvm_emulate_hypercall(&svm->vcpu);
2915 }
2916
2917 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2918 {
2919         struct vcpu_svm *svm = to_svm(vcpu);
2920
2921         return svm->nested.nested_cr3;
2922 }
2923
2924 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2925 {
2926         struct vcpu_svm *svm = to_svm(vcpu);
2927         u64 cr3 = svm->nested.nested_cr3;
2928         u64 pdpte;
2929         int ret;
2930
2931         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
2932                                        offset_in_page(cr3) + index * 8, 8);
2933         if (ret)
2934                 return 0;
2935         return pdpte;
2936 }
2937
2938 static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2939                                    unsigned long root)
2940 {
2941         struct vcpu_svm *svm = to_svm(vcpu);
2942
2943         svm->vmcb->control.nested_cr3 = __sme_set(root);
2944         mark_dirty(svm->vmcb, VMCB_NPT);
2945 }
2946
2947 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2948                                        struct x86_exception *fault)
2949 {
2950         struct vcpu_svm *svm = to_svm(vcpu);
2951
2952         if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2953                 /*
2954                  * TODO: track the cause of the nested page fault, and
2955                  * correctly fill in the high bits of exit_info_1.
2956                  */
2957                 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2958                 svm->vmcb->control.exit_code_hi = 0;
2959                 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2960                 svm->vmcb->control.exit_info_2 = fault->address;
2961         }
2962
2963         svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2964         svm->vmcb->control.exit_info_1 |= fault->error_code;
2965
2966         /*
2967          * The present bit is always zero for page structure faults on real
2968          * hardware.
2969          */
2970         if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2971                 svm->vmcb->control.exit_info_1 &= ~1;
2972
2973         nested_svm_vmexit(svm);
2974 }
2975
2976 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
2977 {
2978         WARN_ON(mmu_is_nested(vcpu));
2979
2980         vcpu->arch.mmu = &vcpu->arch.guest_mmu;
2981         kvm_init_shadow_mmu(vcpu);
2982         vcpu->arch.mmu->set_cr3           = nested_svm_set_tdp_cr3;
2983         vcpu->arch.mmu->get_cr3           = nested_svm_get_tdp_cr3;
2984         vcpu->arch.mmu->get_pdptr         = nested_svm_get_tdp_pdptr;
2985         vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
2986         vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
2987         reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
2988         vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
2989 }
2990
2991 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2992 {
2993         vcpu->arch.mmu = &vcpu->arch.root_mmu;
2994         vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
2995 }
2996
2997 static int nested_svm_check_permissions(struct vcpu_svm *svm)
2998 {
2999         if (!(svm->vcpu.arch.efer & EFER_SVME) ||
3000             !is_paging(&svm->vcpu)) {
3001                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3002                 return 1;
3003         }
3004
3005         if (svm->vmcb->save.cpl) {
3006                 kvm_inject_gp(&svm->vcpu, 0);
3007                 return 1;
3008         }
3009
3010         return 0;
3011 }
3012
3013 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
3014                                       bool has_error_code, u32 error_code)
3015 {
3016         int vmexit;
3017
3018         if (!is_guest_mode(&svm->vcpu))
3019                 return 0;
3020
3021         vmexit = nested_svm_intercept(svm);
3022         if (vmexit != NESTED_EXIT_DONE)
3023                 return 0;
3024
3025         svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
3026         svm->vmcb->control.exit_code_hi = 0;
3027         svm->vmcb->control.exit_info_1 = error_code;
3028
3029         /*
3030          * EXITINFO2 is undefined for all exception intercepts other
3031          * than #PF.
3032          */
3033         if (svm->vcpu.arch.exception.nested_apf)
3034                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
3035         else if (svm->vcpu.arch.exception.has_payload)
3036                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
3037         else
3038                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
3039
3040         svm->nested.exit_required = true;
3041         return vmexit;
3042 }
3043
3044 /* This function returns true if it is save to enable the irq window */
3045 static inline bool nested_svm_intr(struct vcpu_svm *svm)
3046 {
3047         if (!is_guest_mode(&svm->vcpu))
3048                 return true;
3049
3050         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3051                 return true;
3052
3053         if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
3054                 return false;
3055
3056         /*
3057          * if vmexit was already requested (by intercepted exception
3058          * for instance) do not overwrite it with "external interrupt"
3059          * vmexit.
3060          */
3061         if (svm->nested.exit_required)
3062                 return false;
3063
3064         svm->vmcb->control.exit_code   = SVM_EXIT_INTR;
3065         svm->vmcb->control.exit_info_1 = 0;
3066         svm->vmcb->control.exit_info_2 = 0;
3067
3068         if (svm->nested.intercept & 1ULL) {
3069                 /*
3070                  * The #vmexit can't be emulated here directly because this
3071                  * code path runs with irqs and preemption disabled. A
3072                  * #vmexit emulation might sleep. Only signal request for
3073                  * the #vmexit here.
3074                  */
3075                 svm->nested.exit_required = true;
3076                 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
3077                 return false;
3078         }
3079
3080         return true;
3081 }
3082
3083 /* This function returns true if it is save to enable the nmi window */
3084 static inline bool nested_svm_nmi(struct vcpu_svm *svm)
3085 {
3086         if (!is_guest_mode(&svm->vcpu))
3087                 return true;
3088
3089         if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
3090                 return true;
3091
3092         svm->vmcb->control.exit_code = SVM_EXIT_NMI;
3093         svm->nested.exit_required = true;
3094
3095         return false;
3096 }
3097
3098 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
3099 {
3100         unsigned port, size, iopm_len;
3101         u16 val, mask;
3102         u8 start_bit;
3103         u64 gpa;
3104
3105         if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
3106                 return NESTED_EXIT_HOST;
3107
3108         port = svm->vmcb->control.exit_info_1 >> 16;
3109         size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
3110                 SVM_IOIO_SIZE_SHIFT;
3111         gpa  = svm->nested.vmcb_iopm + (port / 8);
3112         start_bit = port % 8;
3113         iopm_len = (start_bit + size > 8) ? 2 : 1;
3114         mask = (0xf >> (4 - size)) << start_bit;
3115         val = 0;
3116
3117         if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
3118                 return NESTED_EXIT_DONE;
3119
3120         return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3121 }
3122
3123 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
3124 {
3125         u32 offset, msr, value;
3126         int write, mask;
3127
3128         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3129                 return NESTED_EXIT_HOST;
3130
3131         msr    = svm->vcpu.arch.regs[VCPU_REGS_RCX];
3132         offset = svm_msrpm_offset(msr);
3133         write  = svm->vmcb->control.exit_info_1 & 1;
3134         mask   = 1 << ((2 * (msr & 0xf)) + write);
3135
3136         if (offset == MSR_INVALID)
3137                 return NESTED_EXIT_DONE;
3138
3139         /* Offset is in 32 bit units but need in 8 bit units */
3140         offset *= 4;
3141
3142         if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
3143                 return NESTED_EXIT_DONE;
3144
3145         return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
3146 }
3147
3148 /* DB exceptions for our internal use must not cause vmexit */
3149 static int nested_svm_intercept_db(struct vcpu_svm *svm)
3150 {
3151         unsigned long dr6;
3152
3153         /* if we're not singlestepping, it's not ours */
3154         if (!svm->nmi_singlestep)
3155                 return NESTED_EXIT_DONE;
3156
3157         /* if it's not a singlestep exception, it's not ours */
3158         if (kvm_get_dr(&svm->vcpu, 6, &dr6))
3159                 return NESTED_EXIT_DONE;
3160         if (!(dr6 & DR6_BS))
3161                 return NESTED_EXIT_DONE;
3162
3163         /* if the guest is singlestepping, it should get the vmexit */
3164         if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
3165                 disable_nmi_singlestep(svm);
3166                 return NESTED_EXIT_DONE;
3167         }
3168
3169         /* it's ours, the nested hypervisor must not see this one */
3170         return NESTED_EXIT_HOST;
3171 }
3172
3173 static int nested_svm_exit_special(struct vcpu_svm *svm)
3174 {
3175         u32 exit_code = svm->vmcb->control.exit_code;
3176
3177         switch (exit_code) {
3178         case SVM_EXIT_INTR:
3179         case SVM_EXIT_NMI:
3180         case SVM_EXIT_EXCP_BASE + MC_VECTOR:
3181                 return NESTED_EXIT_HOST;
3182         case SVM_EXIT_NPF:
3183                 /* For now we are always handling NPFs when using them */
3184                 if (npt_enabled)
3185                         return NESTED_EXIT_HOST;
3186                 break;
3187         case SVM_EXIT_EXCP_BASE + PF_VECTOR:
3188                 /* When we're shadowing, trap PFs, but not async PF */
3189                 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
3190                         return NESTED_EXIT_HOST;
3191                 break;
3192         default:
3193                 break;
3194         }
3195
3196         return NESTED_EXIT_CONTINUE;
3197 }
3198
3199 /*
3200  * If this function returns true, this #vmexit was already handled
3201  */
3202 static int nested_svm_intercept(struct vcpu_svm *svm)
3203 {
3204         u32 exit_code = svm->vmcb->control.exit_code;
3205         int vmexit = NESTED_EXIT_HOST;
3206
3207         switch (exit_code) {
3208         case SVM_EXIT_MSR:
3209                 vmexit = nested_svm_exit_handled_msr(svm);
3210                 break;
3211         case SVM_EXIT_IOIO:
3212                 vmexit = nested_svm_intercept_ioio(svm);
3213                 break;
3214         case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
3215                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
3216                 if (svm->nested.intercept_cr & bit)
3217                         vmexit = NESTED_EXIT_DONE;
3218                 break;
3219         }
3220         case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
3221                 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
3222                 if (svm->nested.intercept_dr & bit)
3223                         vmexit = NESTED_EXIT_DONE;
3224                 break;
3225         }
3226         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
3227                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
3228                 if (svm->nested.intercept_exceptions & excp_bits) {
3229                         if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
3230                                 vmexit = nested_svm_intercept_db(svm);
3231                         else
3232                                 vmexit = NESTED_EXIT_DONE;
3233                 }
3234                 /* async page fault always cause vmexit */
3235                 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
3236                          svm->vcpu.arch.exception.nested_apf != 0)
3237                         vmexit = NESTED_EXIT_DONE;
3238                 break;
3239         }
3240         case SVM_EXIT_ERR: {
3241                 vmexit = NESTED_EXIT_DONE;
3242                 break;
3243         }
3244         default: {
3245                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
3246                 if (svm->nested.intercept & exit_bits)
3247                         vmexit = NESTED_EXIT_DONE;
3248         }
3249         }
3250
3251         return vmexit;
3252 }
3253
3254 static int nested_svm_exit_handled(struct vcpu_svm *svm)
3255 {
3256         int vmexit;
3257
3258         vmexit = nested_svm_intercept(svm);
3259
3260         if (vmexit == NESTED_EXIT_DONE)
3261                 nested_svm_vmexit(svm);
3262
3263         return vmexit;
3264 }
3265
3266 static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3267 {
3268         struct vmcb_control_area *dst  = &dst_vmcb->control;
3269         struct vmcb_control_area *from = &from_vmcb->control;
3270
3271         dst->intercept_cr         = from->intercept_cr;
3272         dst->intercept_dr         = from->intercept_dr;
3273         dst->intercept_exceptions = from->intercept_exceptions;
3274         dst->intercept            = from->intercept;
3275         dst->iopm_base_pa         = from->iopm_base_pa;
3276         dst->msrpm_base_pa        = from->msrpm_base_pa;
3277         dst->tsc_offset           = from->tsc_offset;
3278         dst->asid                 = from->asid;
3279         dst->tlb_ctl              = from->tlb_ctl;
3280         dst->int_ctl              = from->int_ctl;
3281         dst->int_vector           = from->int_vector;
3282         dst->int_state            = from->int_state;
3283         dst->exit_code            = from->exit_code;
3284         dst->exit_code_hi         = from->exit_code_hi;
3285         dst->exit_info_1          = from->exit_info_1;
3286         dst->exit_info_2          = from->exit_info_2;
3287         dst->exit_int_info        = from->exit_int_info;
3288         dst->exit_int_info_err    = from->exit_int_info_err;
3289         dst->nested_ctl           = from->nested_ctl;
3290         dst->event_inj            = from->event_inj;
3291         dst->event_inj_err        = from->event_inj_err;
3292         dst->nested_cr3           = from->nested_cr3;
3293         dst->virt_ext              = from->virt_ext;
3294         dst->pause_filter_count   = from->pause_filter_count;
3295         dst->pause_filter_thresh  = from->pause_filter_thresh;
3296 }
3297
3298 static int nested_svm_vmexit(struct vcpu_svm *svm)
3299 {
3300         int rc;
3301         struct vmcb *nested_vmcb;
3302         struct vmcb *hsave = svm->nested.hsave;
3303         struct vmcb *vmcb = svm->vmcb;
3304         struct kvm_host_map map;
3305
3306         trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3307                                        vmcb->control.exit_info_1,
3308                                        vmcb->control.exit_info_2,
3309                                        vmcb->control.exit_int_info,
3310                                        vmcb->control.exit_int_info_err,
3311                                        KVM_ISA_SVM);
3312
3313         rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
3314         if (rc) {
3315                 if (rc == -EINVAL)
3316                         kvm_inject_gp(&svm->vcpu, 0);
3317                 return 1;
3318         }
3319
3320         nested_vmcb = map.hva;
3321
3322         /* Exit Guest-Mode */
3323         leave_guest_mode(&svm->vcpu);
3324         svm->nested.vmcb = 0;
3325
3326         /* Give the current vmcb to the guest */
3327         disable_gif(svm);
3328
3329         nested_vmcb->save.es     = vmcb->save.es;
3330         nested_vmcb->save.cs     = vmcb->save.cs;
3331         nested_vmcb->save.ss     = vmcb->save.ss;
3332         nested_vmcb->save.ds     = vmcb->save.ds;
3333         nested_vmcb->save.gdtr   = vmcb->save.gdtr;
3334         nested_vmcb->save.idtr   = vmcb->save.idtr;
3335         nested_vmcb->save.efer   = svm->vcpu.arch.efer;
3336         nested_vmcb->save.cr0    = kvm_read_cr0(&svm->vcpu);
3337         nested_vmcb->save.cr3    = kvm_read_cr3(&svm->vcpu);
3338         nested_vmcb->save.cr2    = vmcb->save.cr2;
3339         nested_vmcb->save.cr4    = svm->vcpu.arch.cr4;
3340         nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
3341         nested_vmcb->save.rip    = vmcb->save.rip;
3342         nested_vmcb->save.rsp    = vmcb->save.rsp;
3343         nested_vmcb->save.rax    = vmcb->save.rax;
3344         nested_vmcb->save.dr7    = vmcb->save.dr7;
3345         nested_vmcb->save.dr6    = vmcb->save.dr6;
3346         nested_vmcb->save.cpl    = vmcb->save.cpl;
3347
3348         nested_vmcb->control.int_ctl           = vmcb->control.int_ctl;
3349         nested_vmcb->control.int_vector        = vmcb->control.int_vector;
3350         nested_vmcb->control.int_state         = vmcb->control.int_state;
3351         nested_vmcb->control.exit_code         = vmcb->control.exit_code;
3352         nested_vmcb->control.exit_code_hi      = vmcb->control.exit_code_hi;
3353         nested_vmcb->control.exit_info_1       = vmcb->control.exit_info_1;
3354         nested_vmcb->control.exit_info_2       = vmcb->control.exit_info_2;
3355         nested_vmcb->control.exit_int_info     = vmcb->control.exit_int_info;
3356         nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
3357
3358         if (svm->nrips_enabled)
3359                 nested_vmcb->control.next_rip  = vmcb->control.next_rip;
3360
3361         /*
3362          * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3363          * to make sure that we do not lose injected events. So check event_inj
3364          * here and copy it to exit_int_info if it is valid.
3365          * Exit_int_info and event_inj can't be both valid because the case
3366          * below only happens on a VMRUN instruction intercept which has
3367          * no valid exit_int_info set.
3368          */
3369         if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3370                 struct vmcb_control_area *nc = &nested_vmcb->control;
3371
3372                 nc->exit_int_info     = vmcb->control.event_inj;
3373                 nc->exit_int_info_err = vmcb->control.event_inj_err;
3374         }
3375
3376         nested_vmcb->control.tlb_ctl           = 0;
3377         nested_vmcb->control.event_inj         = 0;
3378         nested_vmcb->control.event_inj_err     = 0;
3379
3380         nested_vmcb->control.pause_filter_count =
3381                 svm->vmcb->control.pause_filter_count;
3382         nested_vmcb->control.pause_filter_thresh =
3383                 svm->vmcb->control.pause_filter_thresh;
3384
3385         /* We always set V_INTR_MASKING and remember the old value in hflags */
3386         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3387                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3388
3389         /* Restore the original control entries */
3390         copy_vmcb_control_area(vmcb, hsave);
3391
3392         svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset;
3393         kvm_clear_exception_queue(&svm->vcpu);
3394         kvm_clear_interrupt_queue(&svm->vcpu);
3395
3396         svm->nested.nested_cr3 = 0;
3397
3398         /* Restore selected save entries */
3399         svm->vmcb->save.es = hsave->save.es;
3400         svm->vmcb->save.cs = hsave->save.cs;
3401         svm->vmcb->save.ss = hsave->save.ss;
3402         svm->vmcb->save.ds = hsave->save.ds;
3403         svm->vmcb->save.gdtr = hsave->save.gdtr;
3404         svm->vmcb->save.idtr = hsave->save.idtr;
3405         kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
3406         svm_set_efer(&svm->vcpu, hsave->save.efer);
3407         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3408         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3409         if (npt_enabled) {
3410                 svm->vmcb->save.cr3 = hsave->save.cr3;
3411                 svm->vcpu.arch.cr3 = hsave->save.cr3;
3412         } else {
3413                 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
3414         }
3415         kvm_rax_write(&svm->vcpu, hsave->save.rax);
3416         kvm_rsp_write(&svm->vcpu, hsave->save.rsp);
3417         kvm_rip_write(&svm->vcpu, hsave->save.rip);
3418         svm->vmcb->save.dr7 = 0;
3419         svm->vmcb->save.cpl = 0;
3420         svm->vmcb->control.exit_int_info = 0;
3421
3422         mark_all_dirty(svm->vmcb);
3423
3424         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3425
3426         nested_svm_uninit_mmu_context(&svm->vcpu);
3427         kvm_mmu_reset_context(&svm->vcpu);
3428         kvm_mmu_load(&svm->vcpu);
3429
3430         /*
3431          * Drop what we picked up for L2 via svm_complete_interrupts() so it
3432          * doesn't end up in L1.
3433          */
3434         svm->vcpu.arch.nmi_injected = false;
3435         kvm_clear_exception_queue(&svm->vcpu);
3436         kvm_clear_interrupt_queue(&svm->vcpu);
3437
3438         return 0;
3439 }
3440
3441 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
3442 {
3443         /*
3444          * This function merges the msr permission bitmaps of kvm and the
3445          * nested vmcb. It is optimized in that it only merges the parts where
3446          * the kvm msr permission bitmap may contain zero bits
3447          */
3448         int i;
3449
3450         if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3451                 return true;
3452
3453         for (i = 0; i < MSRPM_OFFSETS; i++) {
3454                 u32 value, p;
3455                 u64 offset;
3456
3457                 if (msrpm_offsets[i] == 0xffffffff)
3458                         break;
3459
3460                 p      = msrpm_offsets[i];
3461                 offset = svm->nested.vmcb_msrpm + (p * 4);
3462
3463                 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
3464                         return false;
3465
3466                 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3467         }
3468
3469         svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
3470
3471         return true;
3472 }
3473
3474 static bool nested_vmcb_checks(struct vmcb *vmcb)
3475 {
3476         if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3477                 return false;
3478
3479         if (vmcb->control.asid == 0)
3480                 return false;
3481
3482         if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3483             !npt_enabled)
3484                 return false;
3485
3486         return true;
3487 }
3488
3489 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3490                                  struct vmcb *nested_vmcb, struct kvm_host_map *map)
3491 {
3492         if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
3493                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3494         else
3495                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3496
3497         if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
3498                 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3499                 nested_svm_init_mmu_context(&svm->vcpu);
3500         }
3501
3502         /* Load the nested guest state */
3503         svm->vmcb->save.es = nested_vmcb->save.es;
3504         svm->vmcb->save.cs = nested_vmcb->save.cs;
3505         svm->vmcb->save.ss = nested_vmcb->save.ss;
3506         svm->vmcb->save.ds = nested_vmcb->save.ds;
3507         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3508         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
3509         kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
3510         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3511         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3512         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3513         if (npt_enabled) {
3514                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3515                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
3516         } else
3517                 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
3518
3519         /* Guest paging mode is active - reset mmu */
3520         kvm_mmu_reset_context(&svm->vcpu);
3521
3522         svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
3523         kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax);
3524         kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp);
3525         kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip);
3526
3527         /* In case we don't even reach vcpu_run, the fields are not updated */
3528         svm->vmcb->save.rax = nested_vmcb->save.rax;
3529         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3530         svm->vmcb->save.rip = nested_vmcb->save.rip;
3531         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3532         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3533         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3534
3535         svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
3536         svm->nested.vmcb_iopm  = nested_vmcb->control.iopm_base_pa  & ~0x0fffULL;
3537
3538         /* cache intercepts */
3539         svm->nested.intercept_cr         = nested_vmcb->control.intercept_cr;
3540         svm->nested.intercept_dr         = nested_vmcb->control.intercept_dr;
3541         svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3542         svm->nested.intercept            = nested_vmcb->control.intercept;
3543
3544         svm_flush_tlb(&svm->vcpu, true);
3545         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
3546         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3547                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3548         else
3549                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3550
3551         if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3552                 /* We only want the cr8 intercept bits of the guest */
3553                 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3554                 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3555         }
3556
3557         /* We don't want to see VMMCALLs from a nested guest */
3558         clr_intercept(svm, INTERCEPT_VMMCALL);
3559
3560         svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset;
3561         svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset;
3562
3563         svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
3564         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3565         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3566         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3567         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3568
3569         svm->vmcb->control.pause_filter_count =
3570                 nested_vmcb->control.pause_filter_count;
3571         svm->vmcb->control.pause_filter_thresh =
3572                 nested_vmcb->control.pause_filter_thresh;
3573
3574         kvm_vcpu_unmap(&svm->vcpu, map, true);
3575
3576         /* Enter Guest-Mode */
3577         enter_guest_mode(&svm->vcpu);
3578
3579         /*
3580          * Merge guest and host intercepts - must be called  with vcpu in
3581          * guest-mode to take affect here
3582          */
3583         recalc_intercepts(svm);
3584
3585         svm->nested.vmcb = vmcb_gpa;
3586
3587         enable_gif(svm);
3588
3589         mark_all_dirty(svm->vmcb);
3590 }
3591
3592 static int nested_svm_vmrun(struct vcpu_svm *svm)
3593 {
3594         int ret;
3595         struct vmcb *nested_vmcb;
3596         struct vmcb *hsave = svm->nested.hsave;
3597         struct vmcb *vmcb = svm->vmcb;
3598         struct kvm_host_map map;
3599         u64 vmcb_gpa;
3600
3601         vmcb_gpa = svm->vmcb->save.rax;
3602
3603         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
3604         if (ret == -EINVAL) {
3605                 kvm_inject_gp(&svm->vcpu, 0);
3606                 return 1;
3607         } else if (ret) {
3608                 return kvm_skip_emulated_instruction(&svm->vcpu);
3609         }
3610
3611         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3612
3613         nested_vmcb = map.hva;
3614
3615         if (!nested_vmcb_checks(nested_vmcb)) {
3616                 nested_vmcb->control.exit_code    = SVM_EXIT_ERR;
3617                 nested_vmcb->control.exit_code_hi = 0;
3618                 nested_vmcb->control.exit_info_1  = 0;
3619                 nested_vmcb->control.exit_info_2  = 0;
3620
3621                 kvm_vcpu_unmap(&svm->vcpu, &map, true);
3622
3623                 return ret;
3624         }
3625
3626         trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3627                                nested_vmcb->save.rip,
3628                                nested_vmcb->control.int_ctl,
3629                                nested_vmcb->control.event_inj,
3630                                nested_vmcb->control.nested_ctl);
3631
3632         trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3633                                     nested_vmcb->control.intercept_cr >> 16,
3634                                     nested_vmcb->control.intercept_exceptions,
3635                                     nested_vmcb->control.intercept);
3636
3637         /* Clear internal status */
3638         kvm_clear_exception_queue(&svm->vcpu);
3639         kvm_clear_interrupt_queue(&svm->vcpu);
3640
3641         /*
3642          * Save the old vmcb, so we don't need to pick what we save, but can
3643          * restore everything when a VMEXIT occurs
3644          */
3645         hsave->save.es     = vmcb->save.es;
3646         hsave->save.cs     = vmcb->save.cs;
3647         hsave->save.ss     = vmcb->save.ss;
3648         hsave->save.ds     = vmcb->save.ds;
3649         hsave->save.gdtr   = vmcb->save.gdtr;
3650         hsave->save.idtr   = vmcb->save.idtr;
3651         hsave->save.efer   = svm->vcpu.arch.efer;
3652         hsave->save.cr0    = kvm_read_cr0(&svm->vcpu);
3653         hsave->save.cr4    = svm->vcpu.arch.cr4;
3654         hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3655         hsave->save.rip    = kvm_rip_read(&svm->vcpu);
3656         hsave->save.rsp    = vmcb->save.rsp;
3657         hsave->save.rax    = vmcb->save.rax;
3658         if (npt_enabled)
3659                 hsave->save.cr3    = vmcb->save.cr3;
3660         else
3661                 hsave->save.cr3    = kvm_read_cr3(&svm->vcpu);
3662
3663         copy_vmcb_control_area(hsave, vmcb);
3664
3665         enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map);
3666
3667         if (!nested_svm_vmrun_msrpm(svm)) {
3668                 svm->vmcb->control.exit_code    = SVM_EXIT_ERR;
3669                 svm->vmcb->control.exit_code_hi = 0;
3670                 svm->vmcb->control.exit_info_1  = 0;
3671                 svm->vmcb->control.exit_info_2  = 0;
3672
3673                 nested_svm_vmexit(svm);
3674         }
3675
3676         return ret;
3677 }
3678
3679 static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
3680 {
3681         to_vmcb->save.fs = from_vmcb->save.fs;
3682         to_vmcb->save.gs = from_vmcb->save.gs;
3683         to_vmcb->save.tr = from_vmcb->save.tr;
3684         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3685         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3686         to_vmcb->save.star = from_vmcb->save.star;
3687         to_vmcb->save.lstar = from_vmcb->save.lstar;
3688         to_vmcb->save.cstar = from_vmcb->save.cstar;
3689         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3690         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3691         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3692         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
3693 }
3694
3695 static int vmload_interception(struct vcpu_svm *svm)
3696 {
3697         struct vmcb *nested_vmcb;
3698         struct kvm_host_map map;
3699         int ret;
3700
3701         if (nested_svm_check_permissions(svm))
3702                 return 1;
3703
3704         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3705         if (ret) {
3706                 if (ret == -EINVAL)
3707                         kvm_inject_gp(&svm->vcpu, 0);
3708                 return 1;
3709         }
3710
3711         nested_vmcb = map.hva;
3712
3713         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3714
3715         nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
3716         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3717
3718         return ret;
3719 }
3720
3721 static int vmsave_interception(struct vcpu_svm *svm)
3722 {
3723         struct vmcb *nested_vmcb;
3724         struct kvm_host_map map;
3725         int ret;
3726
3727         if (nested_svm_check_permissions(svm))
3728                 return 1;
3729
3730         ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map);
3731         if (ret) {
3732                 if (ret == -EINVAL)
3733                         kvm_inject_gp(&svm->vcpu, 0);
3734                 return 1;
3735         }
3736
3737         nested_vmcb = map.hva;
3738
3739         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3740
3741         nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
3742         kvm_vcpu_unmap(&svm->vcpu, &map, true);
3743
3744         return ret;
3745 }
3746
3747 static int vmrun_interception(struct vcpu_svm *svm)
3748 {
3749         if (nested_svm_check_permissions(svm))
3750                 return 1;
3751
3752         return nested_svm_vmrun(svm);
3753 }
3754
3755 static int stgi_interception(struct vcpu_svm *svm)
3756 {
3757         int ret;
3758
3759         if (nested_svm_check_permissions(svm))
3760                 return 1;
3761
3762         /*
3763          * If VGIF is enabled, the STGI intercept is only added to
3764          * detect the opening of the SMI/NMI window; remove it now.
3765          */
3766         if (vgif_enabled(svm))
3767                 clr_intercept(svm, INTERCEPT_STGI);
3768
3769         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3770         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3771
3772         enable_gif(svm);
3773
3774         return ret;
3775 }
3776
3777 static int clgi_interception(struct vcpu_svm *svm)
3778 {
3779         int ret;
3780
3781         if (nested_svm_check_permissions(svm))
3782                 return 1;
3783
3784         ret = kvm_skip_emulated_instruction(&svm->vcpu);
3785
3786         disable_gif(svm);
3787
3788         /* After a CLGI no interrupts should come */
3789         if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3790                 svm_clear_vintr(svm);
3791                 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3792                 mark_dirty(svm->vmcb, VMCB_INTR);
3793         }
3794
3795         return ret;
3796 }
3797
3798 static int invlpga_interception(struct vcpu_svm *svm)
3799 {
3800         struct kvm_vcpu *vcpu = &svm->vcpu;
3801
3802         trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu),
3803                           kvm_rax_read(&svm->vcpu));
3804
3805         /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
3806         kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu));
3807
3808         return kvm_skip_emulated_instruction(&svm->vcpu);
3809 }
3810
3811 static int skinit_interception(struct vcpu_svm *svm)
3812 {
3813         trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu));
3814
3815         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3816         return 1;
3817 }
3818
3819 static int wbinvd_interception(struct vcpu_svm *svm)
3820 {
3821         return kvm_emulate_wbinvd(&svm->vcpu);
3822 }
3823
3824 static int xsetbv_interception(struct vcpu_svm *svm)
3825 {
3826         u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3827         u32 index = kvm_rcx_read(&svm->vcpu);
3828
3829         if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3830                 return kvm_skip_emulated_instruction(&svm->vcpu);
3831         }
3832
3833         return 1;
3834 }
3835
3836 static int rdpru_interception(struct vcpu_svm *svm)
3837 {
3838         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3839         return 1;
3840 }
3841
3842 static int task_switch_interception(struct vcpu_svm *svm)
3843 {
3844         u16 tss_selector;
3845         int reason;
3846         int int_type = svm->vmcb->control.exit_int_info &
3847                 SVM_EXITINTINFO_TYPE_MASK;
3848         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
3849         uint32_t type =
3850                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3851         uint32_t idt_v =
3852                 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
3853         bool has_error_code = false;
3854         u32 error_code = 0;
3855
3856         tss_selector = (u16)svm->vmcb->control.exit_info_1;
3857
3858         if (svm->vmcb->control.exit_info_2 &
3859             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
3860                 reason = TASK_SWITCH_IRET;
3861         else if (svm->vmcb->control.exit_info_2 &
3862                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3863                 reason = TASK_SWITCH_JMP;
3864         else if (idt_v)
3865                 reason = TASK_SWITCH_GATE;
3866         else
3867                 reason = TASK_SWITCH_CALL;
3868
3869         if (reason == TASK_SWITCH_GATE) {
3870                 switch (type) {
3871                 case SVM_EXITINTINFO_TYPE_NMI:
3872                         svm->vcpu.arch.nmi_injected = false;
3873                         break;
3874                 case SVM_EXITINTINFO_TYPE_EXEPT:
3875                         if (svm->vmcb->control.exit_info_2 &
3876                             (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3877                                 has_error_code = true;
3878                                 error_code =
3879                                         (u32)svm->vmcb->control.exit_info_2;
3880                         }
3881                         kvm_clear_exception_queue(&svm->vcpu);
3882                         break;
3883                 case SVM_EXITINTINFO_TYPE_INTR:
3884                         kvm_clear_interrupt_queue(&svm->vcpu);
3885                         break;
3886                 default:
3887                         break;
3888                 }
3889         }
3890
3891         if (reason != TASK_SWITCH_GATE ||
3892             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3893             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
3894              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
3895                 if (!skip_emulated_instruction(&svm->vcpu))
3896                         return 0;
3897         }
3898
3899         if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3900                 int_vec = -1;
3901
3902         return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
3903                                has_error_code, error_code);
3904 }
3905
3906 static int cpuid_interception(struct vcpu_svm *svm)
3907 {
3908         return kvm_emulate_cpuid(&svm->vcpu);
3909 }
3910
3911 static int iret_interception(struct vcpu_svm *svm)
3912 {
3913         ++svm->vcpu.stat.nmi_window_exits;
3914         clr_intercept(svm, INTERCEPT_IRET);
3915         svm->vcpu.arch.hflags |= HF_IRET_MASK;
3916         svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
3917         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
3918         return 1;
3919 }
3920
3921 static int invlpg_interception(struct vcpu_svm *svm)
3922 {
3923         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3924                 return kvm_emulate_instruction(&svm->vcpu, 0);
3925
3926         kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
3927         return kvm_skip_emulated_instruction(&svm->vcpu);
3928 }
3929
3930 static int emulate_on_interception(struct vcpu_svm *svm)
3931 {
3932         return kvm_emulate_instruction(&svm->vcpu, 0);
3933 }
3934
3935 static int rsm_interception(struct vcpu_svm *svm)
3936 {
3937         return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2);
3938 }
3939
3940 static int rdpmc_interception(struct vcpu_svm *svm)
3941 {
3942         int err;
3943
3944         if (!nrips)
3945                 return emulate_on_interception(svm);
3946
3947         err = kvm_rdpmc(&svm->vcpu);
3948         return kvm_complete_insn_gp(&svm->vcpu, err);
3949 }
3950
3951 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3952                                             unsigned long val)
3953 {
3954         unsigned long cr0 = svm->vcpu.arch.cr0;
3955         bool ret = false;
3956         u64 intercept;
3957
3958         intercept = svm->nested.intercept;
3959
3960         if (!is_guest_mode(&svm->vcpu) ||
3961             (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3962                 return false;
3963
3964         cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3965         val &= ~SVM_CR0_SELECTIVE_MASK;
3966
3967         if (cr0 ^ val) {
3968                 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3969                 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3970         }
3971
3972         return ret;
3973 }
3974
3975 #define CR_VALID (1ULL << 63)
3976
3977 static int cr_interception(struct vcpu_svm *svm)
3978 {
3979         int reg, cr;
3980         unsigned long val;
3981         int err;
3982
3983         if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3984                 return emulate_on_interception(svm);
3985
3986         if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3987                 return emulate_on_interception(svm);
3988
3989         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3990         if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3991                 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3992         else
3993                 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
3994
3995         err = 0;
3996         if (cr >= 16) { /* mov to cr */
3997                 cr -= 16;
3998                 val = kvm_register_read(&svm->vcpu, reg);
3999                 switch (cr) {
4000                 case 0:
4001                         if (!check_selective_cr0_intercepted(svm, val))
4002                                 err = kvm_set_cr0(&svm->vcpu, val);
4003                         else
4004                                 return 1;
4005
4006                         break;
4007                 case 3:
4008                         err = kvm_set_cr3(&svm->vcpu, val);
4009                         break;
4010                 case 4:
4011                         err = kvm_set_cr4(&svm->vcpu, val);
4012                         break;
4013                 case 8:
4014                         err = kvm_set_cr8(&svm->vcpu, val);
4015                         break;
4016                 default:
4017                         WARN(1, "unhandled write to CR%d", cr);
4018                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4019                         return 1;
4020                 }
4021         } else { /* mov from cr */
4022                 switch (cr) {
4023                 case 0:
4024                         val = kvm_read_cr0(&svm->vcpu);
4025                         break;
4026                 case 2:
4027                         val = svm->vcpu.arch.cr2;
4028                         break;
4029                 case 3:
4030                         val = kvm_read_cr3(&svm->vcpu);
4031                         break;
4032                 case 4:
4033                         val = kvm_read_cr4(&svm->vcpu);
4034                         break;
4035                 case 8:
4036                         val = kvm_get_cr8(&svm->vcpu);
4037                         break;
4038                 default:
4039                         WARN(1, "unhandled read from CR%d", cr);
4040                         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
4041                         return 1;
4042                 }
4043                 kvm_register_write(&svm->vcpu, reg, val);
4044         }
4045         return kvm_complete_insn_gp(&svm->vcpu, err);
4046 }
4047
4048 static int dr_interception(struct vcpu_svm *svm)
4049 {
4050         int reg, dr;
4051         unsigned long val;
4052
4053         if (svm->vcpu.guest_debug == 0) {
4054                 /*
4055                  * No more DR vmexits; force a reload of the debug registers
4056                  * and reenter on this instruction.  The next vmexit will
4057                  * retrieve the full state of the debug registers.
4058                  */
4059                 clr_dr_intercepts(svm);
4060                 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
4061                 return 1;
4062         }
4063
4064         if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
4065                 return emulate_on_interception(svm);
4066
4067         reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
4068         dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
4069
4070         if (dr >= 16) { /* mov to DRn */
4071                 if (!kvm_require_dr(&svm->vcpu, dr - 16))
4072                         return 1;
4073                 val = kvm_register_read(&svm->vcpu, reg);
4074                 kvm_set_dr(&svm->vcpu, dr - 16, val);
4075         } else {
4076                 if (!kvm_require_dr(&svm->vcpu, dr))
4077                         return 1;
4078                 kvm_get_dr(&svm->vcpu, dr, &val);
4079                 kvm_register_write(&svm->vcpu, reg, val);
4080         }
4081
4082         return kvm_skip_emulated_instruction(&svm->vcpu);
4083 }
4084
4085 static int cr8_write_interception(struct vcpu_svm *svm)
4086 {
4087         struct kvm_run *kvm_run = svm->vcpu.run;
4088         int r;
4089
4090         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
4091         /* instruction emulation calls kvm_set_cr8() */
4092         r = cr_interception(svm);
4093         if (lapic_in_kernel(&svm->vcpu))
4094                 return r;
4095         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
4096                 return r;
4097         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
4098         return 0;
4099 }
4100
4101 static int svm_get_msr_feature(struct kvm_msr_entry *msr)
4102 {
4103         msr->data = 0;
4104
4105         switch (msr->index) {
4106         case MSR_F10H_DECFG:
4107                 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
4108                         msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
4109                 break;
4110         default:
4111                 return 1;
4112         }
4113
4114         return 0;
4115 }
4116
4117 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4118 {
4119         struct vcpu_svm *svm = to_svm(vcpu);
4120
4121         switch (msr_info->index) {
4122         case MSR_STAR:
4123                 msr_info->data = svm->vmcb->save.star;
4124                 break;
4125 #ifdef CONFIG_X86_64
4126         case MSR_LSTAR:
4127                 msr_info->data = svm->vmcb->save.lstar;
4128                 break;
4129         case MSR_CSTAR:
4130                 msr_info->data = svm->vmcb->save.cstar;
4131                 break;
4132         case MSR_KERNEL_GS_BASE:
4133                 msr_info->data = svm->vmcb->save.kernel_gs_base;
4134                 break;
4135         case MSR_SYSCALL_MASK:
4136                 msr_info->data = svm->vmcb->save.sfmask;
4137                 break;
4138 #endif
4139         case MSR_IA32_SYSENTER_CS:
4140                 msr_info->data = svm->vmcb->save.sysenter_cs;
4141                 break;
4142         case MSR_IA32_SYSENTER_EIP:
4143                 msr_info->data = svm->sysenter_eip;
4144                 break;
4145         case MSR_IA32_SYSENTER_ESP:
4146                 msr_info->data = svm->sysenter_esp;
4147                 break;
4148         case MSR_TSC_AUX:
4149                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4150                         return 1;
4151                 msr_info->data = svm->tsc_aux;
4152                 break;
4153         /*
4154          * Nobody will change the following 5 values in the VMCB so we can
4155          * safely return them on rdmsr. They will always be 0 until LBRV is
4156          * implemented.
4157          */
4158         case MSR_IA32_DEBUGCTLMSR:
4159                 msr_info->data = svm->vmcb->save.dbgctl;
4160                 break;
4161         case MSR_IA32_LASTBRANCHFROMIP:
4162                 msr_info->data = svm->vmcb->save.br_from;
4163                 break;
4164         case MSR_IA32_LASTBRANCHTOIP:
4165                 msr_info->data = svm->vmcb->save.br_to;
4166                 break;
4167         case MSR_IA32_LASTINTFROMIP:
4168                 msr_info->data = svm->vmcb->save.last_excp_from;
4169                 break;
4170         case MSR_IA32_LASTINTTOIP:
4171                 msr_info->data = svm->vmcb->save.last_excp_to;
4172                 break;
4173         case MSR_VM_HSAVE_PA:
4174                 msr_info->data = svm->nested.hsave_msr;
4175                 break;
4176         case MSR_VM_CR:
4177                 msr_info->data = svm->nested.vm_cr_msr;
4178                 break;
4179         case MSR_IA32_SPEC_CTRL:
4180                 if (!msr_info->host_initiated &&
4181                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4182                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4183                         return 1;
4184
4185                 msr_info->data = svm->spec_ctrl;
4186                 break;
4187         case MSR_AMD64_VIRT_SPEC_CTRL:
4188                 if (!msr_info->host_initiated &&
4189                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4190                         return 1;
4191
4192                 msr_info->data = svm->virt_spec_ctrl;
4193                 break;
4194         case MSR_F15H_IC_CFG: {
4195
4196                 int family, model;
4197
4198                 family = guest_cpuid_family(vcpu);
4199                 model  = guest_cpuid_model(vcpu);
4200
4201                 if (family < 0 || model < 0)
4202                         return kvm_get_msr_common(vcpu, msr_info);
4203
4204                 msr_info->data = 0;
4205
4206                 if (family == 0x15 &&
4207                     (model >= 0x2 && model < 0x20))
4208                         msr_info->data = 0x1E;
4209                 }
4210                 break;
4211         case MSR_F10H_DECFG:
4212                 msr_info->data = svm->msr_decfg;
4213                 break;
4214         default:
4215                 return kvm_get_msr_common(vcpu, msr_info);
4216         }
4217         return 0;
4218 }
4219
4220 static int rdmsr_interception(struct vcpu_svm *svm)
4221 {
4222         return kvm_emulate_rdmsr(&svm->vcpu);
4223 }
4224
4225 static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
4226 {
4227         struct vcpu_svm *svm = to_svm(vcpu);
4228         int svm_dis, chg_mask;
4229
4230         if (data & ~SVM_VM_CR_VALID_MASK)
4231                 return 1;
4232
4233         chg_mask = SVM_VM_CR_VALID_MASK;
4234
4235         if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
4236                 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
4237
4238         svm->nested.vm_cr_msr &= ~chg_mask;
4239         svm->nested.vm_cr_msr |= (data & chg_mask);
4240
4241         svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
4242
4243         /* check for svm_disable while efer.svme is set */
4244         if (svm_dis && (vcpu->arch.efer & EFER_SVME))
4245                 return 1;
4246
4247         return 0;
4248 }
4249
4250 static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4251 {
4252         struct vcpu_svm *svm = to_svm(vcpu);
4253
4254         u32 ecx = msr->index;
4255         u64 data = msr->data;
4256         switch (ecx) {
4257         case MSR_IA32_CR_PAT:
4258                 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4259                         return 1;
4260                 vcpu->arch.pat = data;
4261                 svm->vmcb->save.g_pat = data;
4262                 mark_dirty(svm->vmcb, VMCB_NPT);
4263                 break;
4264         case MSR_IA32_SPEC_CTRL:
4265                 if (!msr->host_initiated &&
4266                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS) &&
4267                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
4268                         return 1;
4269
4270                 /* The STIBP bit doesn't fault even if it's not advertised */
4271                 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD))
4272                         return 1;
4273
4274                 svm->spec_ctrl = data;
4275
4276                 if (!data)
4277                         break;
4278
4279                 /*
4280                  * For non-nested:
4281                  * When it's written (to non-zero) for the first time, pass
4282                  * it through.
4283                  *
4284                  * For nested:
4285                  * The handling of the MSR bitmap for L2 guests is done in
4286                  * nested_svm_vmrun_msrpm.
4287                  * We update the L1 MSR bit as well since it will end up
4288                  * touching the MSR anyway now.
4289                  */
4290                 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
4291                 break;
4292         case MSR_IA32_PRED_CMD:
4293                 if (!msr->host_initiated &&
4294                     !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBPB))
4295                         return 1;
4296
4297                 if (data & ~PRED_CMD_IBPB)
4298                         return 1;
4299
4300                 if (!data)
4301                         break;
4302
4303                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4304                 if (is_guest_mode(vcpu))
4305                         break;
4306                 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4307                 break;
4308         case MSR_AMD64_VIRT_SPEC_CTRL:
4309                 if (!msr->host_initiated &&
4310                     !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
4311                         return 1;
4312
4313                 if (data & ~SPEC_CTRL_SSBD)
4314                         return 1;
4315
4316                 svm->virt_spec_ctrl = data;
4317                 break;
4318         case MSR_STAR:
4319                 svm->vmcb->save.star = data;
4320                 break;
4321 #ifdef CONFIG_X86_64
4322         case MSR_LSTAR:
4323                 svm->vmcb->save.lstar = data;
4324                 break;
4325         case MSR_CSTAR:
4326                 svm->vmcb->save.cstar = data;
4327                 break;
4328         case MSR_KERNEL_GS_BASE:
4329                 svm->vmcb->save.kernel_gs_base = data;
4330                 break;
4331         case MSR_SYSCALL_MASK:
4332                 svm->vmcb->save.sfmask = data;
4333                 break;
4334 #endif
4335         case MSR_IA32_SYSENTER_CS:
4336                 svm->vmcb->save.sysenter_cs = data;
4337                 break;
4338         case MSR_IA32_SYSENTER_EIP:
4339                 svm->sysenter_eip = data;
4340                 svm->vmcb->save.sysenter_eip = data;
4341                 break;
4342         case MSR_IA32_SYSENTER_ESP:
4343                 svm->sysenter_esp = data;
4344                 svm->vmcb->save.sysenter_esp = data;
4345                 break;
4346         case MSR_TSC_AUX:
4347                 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4348                         return 1;
4349
4350                 /*
4351                  * This is rare, so we update the MSR here instead of using
4352                  * direct_access_msrs.  Doing that would require a rdmsr in
4353                  * svm_vcpu_put.
4354                  */
4355                 svm->tsc_aux = data;
4356                 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4357                 break;
4358         case MSR_IA32_DEBUGCTLMSR:
4359                 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
4360                         vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4361                                     __func__, data);
4362                         break;
4363                 }
4364                 if (data & DEBUGCTL_RESERVED_BITS)
4365                         return 1;
4366
4367                 svm->vmcb->save.dbgctl = data;
4368                 mark_dirty(svm->vmcb, VMCB_LBR);
4369                 if (data & (1ULL<<0))
4370                         svm_enable_lbrv(svm);
4371                 else
4372                         svm_disable_lbrv(svm);
4373                 break;
4374         case MSR_VM_HSAVE_PA:
4375                 svm->nested.hsave_msr = data;
4376                 break;
4377         case MSR_VM_CR:
4378                 return svm_set_vm_cr(vcpu, data);
4379         case MSR_VM_IGNNE:
4380                 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
4381                 break;
4382         case MSR_F10H_DECFG: {
4383                 struct kvm_msr_entry msr_entry;
4384
4385                 msr_entry.index = msr->index;
4386                 if (svm_get_msr_feature(&msr_entry))
4387                         return 1;
4388
4389                 /* Check the supported bits */
4390                 if (data & ~msr_entry.data)
4391                         return 1;
4392
4393                 /* Don't allow the guest to change a bit, #GP */
4394                 if (!msr->host_initiated && (data ^ msr_entry.data))
4395                         return 1;
4396
4397                 svm->msr_decfg = data;
4398                 break;
4399         }
4400         case MSR_IA32_APICBASE:
4401                 if (kvm_vcpu_apicv_active(vcpu))
4402                         avic_update_vapic_bar(to_svm(vcpu), data);
4403                 /* Fall through */
4404         default:
4405                 return kvm_set_msr_common(vcpu, msr);
4406         }
4407         return 0;
4408 }
4409
4410 static int wrmsr_interception(struct vcpu_svm *svm)
4411 {
4412         return kvm_emulate_wrmsr(&svm->vcpu);
4413 }
4414
4415 static int msr_interception(struct vcpu_svm *svm)
4416 {
4417         if (svm->vmcb->control.exit_info_1)
4418                 return wrmsr_interception(svm);
4419         else
4420                 return rdmsr_interception(svm);
4421 }
4422
4423 static int interrupt_window_interception(struct vcpu_svm *svm)
4424 {
4425         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
4426         svm_clear_vintr(svm);
4427         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
4428         mark_dirty(svm->vmcb, VMCB_INTR);
4429         ++svm->vcpu.stat.irq_window_exits;
4430         return 1;
4431 }
4432
4433 static int pause_interception(struct vcpu_svm *svm)
4434 {
4435         struct kvm_vcpu *vcpu = &svm->vcpu;
4436         bool in_kernel = (svm_get_cpl(vcpu) == 0);
4437
4438         if (pause_filter_thresh)
4439                 grow_ple_window(vcpu);
4440
4441         kvm_vcpu_on_spin(vcpu, in_kernel);
4442         return 1;
4443 }
4444
4445 static int nop_interception(struct vcpu_svm *svm)
4446 {
4447         return kvm_skip_emulated_instruction(&(svm->vcpu));
4448 }
4449
4450 static int monitor_interception(struct vcpu_svm *svm)
4451 {
4452         printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4453         return nop_interception(svm);
4454 }
4455
4456 static int mwait_interception(struct vcpu_svm *svm)
4457 {
4458         printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4459         return nop_interception(svm);
4460 }
4461
4462 enum avic_ipi_failure_cause {
4463         AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4464         AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4465         AVIC_IPI_FAILURE_INVALID_TARGET,
4466         AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4467 };
4468
4469 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4470 {
4471         u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4472         u32 icrl = svm->vmcb->control.exit_info_1;
4473         u32 id = svm->vmcb->control.exit_info_2 >> 32;
4474         u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
4475         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4476
4477         trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4478
4479         switch (id) {
4480         case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4481                 /*
4482                  * AVIC hardware handles the generation of
4483                  * IPIs when the specified Message Type is Fixed
4484                  * (also known as fixed delivery mode) and
4485                  * the Trigger Mode is edge-triggered. The hardware
4486                  * also supports self and broadcast delivery modes
4487                  * specified via the Destination Shorthand(DSH)
4488                  * field of the ICRL. Logical and physical APIC ID
4489                  * formats are supported. All other IPI types cause
4490                  * a #VMEXIT, which needs to emulated.
4491                  */
4492                 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4493                 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4494                 break;
4495         case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4496                 int i;
4497                 struct kvm_vcpu *vcpu;
4498                 struct kvm *kvm = svm->vcpu.kvm;
4499                 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4500
4501                 /*
4502                  * At this point, we expect that the AVIC HW has already
4503                  * set the appropriate IRR bits on the valid target
4504                  * vcpus. So, we just need to kick the appropriate vcpu.
4505                  */
4506                 kvm_for_each_vcpu(i, vcpu, kvm) {
4507                         bool m = kvm_apic_match_dest(vcpu, apic,
4508                                                      icrl & KVM_APIC_SHORT_MASK,
4509                                                      GET_APIC_DEST_FIELD(icrh),
4510                                                      icrl & KVM_APIC_DEST_MASK);
4511
4512                         if (m && !avic_vcpu_is_running(vcpu))
4513                                 kvm_vcpu_wake_up(vcpu);
4514                 }
4515                 break;
4516         }
4517         case AVIC_IPI_FAILURE_INVALID_TARGET:
4518                 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4519                           index, svm->vcpu.vcpu_id, icrh, icrl);
4520                 break;
4521         case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4522                 WARN_ONCE(1, "Invalid backing page\n");
4523                 break;
4524         default:
4525                 pr_err("Unknown IPI interception\n");
4526         }
4527
4528         return 1;
4529 }
4530
4531 static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4532 {
4533         struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
4534         int index;
4535         u32 *logical_apic_id_table;
4536         int dlid = GET_APIC_LOGICAL_ID(ldr);
4537
4538         if (!dlid)
4539                 return NULL;
4540
4541         if (flat) { /* flat */
4542                 index = ffs(dlid) - 1;
4543                 if (index > 7)
4544                         return NULL;
4545         } else { /* cluster */
4546                 int cluster = (dlid & 0xf0) >> 4;
4547                 int apic = ffs(dlid & 0x0f) - 1;
4548
4549                 if ((apic < 0) || (apic > 7) ||
4550                     (cluster >= 0xf))
4551                         return NULL;
4552                 index = (cluster << 2) + apic;
4553         }
4554
4555         logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
4556
4557         return &logical_apic_id_table[index];
4558 }
4559
4560 static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr)
4561 {
4562         bool flat;
4563         u32 *entry, new_entry;
4564
4565         flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4566         entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4567         if (!entry)
4568                 return -EINVAL;
4569
4570         new_entry = READ_ONCE(*entry);
4571         new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4572         new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4573         new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4574         WRITE_ONCE(*entry, new_entry);
4575
4576         return 0;
4577 }
4578
4579 static void avic_invalidate_logical_id_entry(struct kvm_vcpu *vcpu)
4580 {
4581         struct vcpu_svm *svm = to_svm(vcpu);
4582         bool flat = svm->dfr_reg == APIC_DFR_FLAT;
4583         u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat);
4584
4585         if (entry)
4586                 clear_bit(AVIC_LOGICAL_ID_ENTRY_VALID_BIT, (unsigned long *)entry);
4587 }
4588
4589 static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4590 {
4591         int ret = 0;
4592         struct vcpu_svm *svm = to_svm(vcpu);
4593         u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4594         u32 id = kvm_xapic_id(vcpu->arch.apic);
4595
4596         if (ldr == svm->ldr_reg)
4597                 return 0;
4598
4599         avic_invalidate_logical_id_entry(vcpu);
4600
4601         if (ldr)
4602                 ret = avic_ldr_write(vcpu, id, ldr);
4603
4604         if (!ret)
4605                 svm->ldr_reg = ldr;
4606
4607         return ret;
4608 }
4609
4610 static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4611 {
4612         u64 *old, *new;
4613         struct vcpu_svm *svm = to_svm(vcpu);
4614         u32 id = kvm_xapic_id(vcpu->arch.apic);
4615
4616         if (vcpu->vcpu_id == id)
4617                 return 0;
4618
4619         old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4620         new = avic_get_physical_id_entry(vcpu, id);
4621         if (!new || !old)
4622                 return 1;
4623
4624         /* We need to move physical_id_entry to new offset */
4625         *new = *old;
4626         *old = 0ULL;
4627         to_svm(vcpu)->avic_physical_id_cache = new;
4628
4629         /*
4630          * Also update the guest physical APIC ID in the logical
4631          * APIC ID table entry if already setup the LDR.
4632          */
4633         if (svm->ldr_reg)
4634                 avic_handle_ldr_update(vcpu);
4635
4636         return 0;
4637 }
4638
4639 static void avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4640 {
4641         struct vcpu_svm *svm = to_svm(vcpu);
4642         u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4643
4644         if (svm->dfr_reg == dfr)
4645                 return;
4646
4647         avic_invalidate_logical_id_entry(vcpu);
4648         svm->dfr_reg = dfr;
4649 }
4650
4651 static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4652 {
4653         struct kvm_lapic *apic = svm->vcpu.arch.apic;
4654         u32 offset = svm->vmcb->control.exit_info_1 &
4655                                 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4656
4657         switch (offset) {
4658         case APIC_ID:
4659                 if (avic_handle_apic_id_update(&svm->vcpu))
4660                         return 0;
4661                 break;
4662         case APIC_LDR:
4663                 if (avic_handle_ldr_update(&svm->vcpu))
4664                         return 0;
4665                 break;
4666         case APIC_DFR:
4667                 avic_handle_dfr_update(&svm->vcpu);
4668                 break;
4669         default:
4670                 break;
4671         }
4672
4673         kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4674
4675         return 1;
4676 }
4677
4678 static bool is_avic_unaccelerated_access_trap(u32 offset)
4679 {
4680         bool ret = false;
4681
4682         switch (offset) {
4683         case APIC_ID:
4684         case APIC_EOI:
4685         case APIC_RRR:
4686         case APIC_LDR:
4687         case APIC_DFR:
4688         case APIC_SPIV:
4689         case APIC_ESR:
4690         case APIC_ICR:
4691         case APIC_LVTT:
4692         case APIC_LVTTHMR:
4693         case APIC_LVTPC:
4694         case APIC_LVT0:
4695         case APIC_LVT1:
4696         case APIC_LVTERR:
4697         case APIC_TMICT:
4698         case APIC_TDCR:
4699                 ret = true;
4700                 break;
4701         default:
4702                 break;
4703         }
4704         return ret;
4705 }
4706
4707 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4708 {
4709         int ret = 0;
4710         u32 offset = svm->vmcb->control.exit_info_1 &
4711                      AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4712         u32 vector = svm->vmcb->control.exit_info_2 &
4713                      AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4714         bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4715                      AVIC_UNACCEL_ACCESS_WRITE_MASK;
4716         bool trap = is_avic_unaccelerated_access_trap(offset);
4717
4718         trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4719                                             trap, write, vector);
4720         if (trap) {
4721                 /* Handling Trap */
4722                 WARN_ONCE(!write, "svm: Handling trap read.\n");
4723                 ret = avic_unaccel_trap_write(svm);
4724         } else {
4725                 /* Handling Fault */
4726                 ret = kvm_emulate_instruction(&svm->vcpu, 0);
4727         }
4728
4729         return ret;
4730 }
4731
4732 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4733         [SVM_EXIT_READ_CR0]                     = cr_interception,
4734         [SVM_EXIT_READ_CR3]                     = cr_interception,
4735         [SVM_EXIT_READ_CR4]                     = cr_interception,
4736         [SVM_EXIT_READ_CR8]                     = cr_interception,
4737         [SVM_EXIT_CR0_SEL_WRITE]                = cr_interception,
4738         [SVM_EXIT_WRITE_CR0]                    = cr_interception,
4739         [SVM_EXIT_WRITE_CR3]                    = cr_interception,
4740         [SVM_EXIT_WRITE_CR4]                    = cr_interception,
4741         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
4742         [SVM_EXIT_READ_DR0]                     = dr_interception,
4743         [SVM_EXIT_READ_DR1]                     = dr_interception,
4744         [SVM_EXIT_READ_DR2]                     = dr_interception,
4745         [SVM_EXIT_READ_DR3]                     = dr_interception,
4746         [SVM_EXIT_READ_DR4]                     = dr_interception,
4747         [SVM_EXIT_READ_DR5]                     = dr_interception,
4748         [SVM_EXIT_READ_DR6]                     = dr_interception,
4749         [SVM_EXIT_READ_DR7]                     = dr_interception,
4750         [SVM_EXIT_WRITE_DR0]                    = dr_interception,
4751         [SVM_EXIT_WRITE_DR1]                    = dr_interception,
4752         [SVM_EXIT_WRITE_DR2]                    = dr_interception,
4753         [SVM_EXIT_WRITE_DR3]                    = dr_interception,
4754         [SVM_EXIT_WRITE_DR4]                    = dr_interception,
4755         [SVM_EXIT_WRITE_DR5]                    = dr_interception,
4756         [SVM_EXIT_WRITE_DR6]                    = dr_interception,
4757         [SVM_EXIT_WRITE_DR7]                    = dr_interception,
4758         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
4759         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
4760         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
4761         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
4762         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
4763         [SVM_EXIT_EXCP_BASE + AC_VECTOR]        = ac_interception,
4764         [SVM_EXIT_EXCP_BASE + GP_VECTOR]        = gp_interception,
4765         [SVM_EXIT_INTR]                         = intr_interception,
4766         [SVM_EXIT_NMI]                          = nmi_interception,
4767         [SVM_EXIT_SMI]                          = nop_on_interception,
4768         [SVM_EXIT_INIT]                         = nop_on_interception,
4769         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
4770         [SVM_EXIT_RDPMC]                        = rdpmc_interception,
4771         [SVM_EXIT_CPUID]                        = cpuid_interception,
4772         [SVM_EXIT_IRET]                         = iret_interception,
4773         [SVM_EXIT_INVD]                         = emulate_on_interception,
4774         [SVM_EXIT_PAUSE]                        = pause_interception,
4775         [SVM_EXIT_HLT]                          = halt_interception,
4776         [SVM_EXIT_INVLPG]                       = invlpg_interception,
4777         [SVM_EXIT_INVLPGA]                      = invlpga_interception,
4778         [SVM_EXIT_IOIO]                         = io_interception,
4779         [SVM_EXIT_MSR]                          = msr_interception,
4780         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
4781         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
4782         [SVM_EXIT_VMRUN]                        = vmrun_interception,
4783         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
4784         [SVM_EXIT_VMLOAD]                       = vmload_interception,
4785         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
4786         [SVM_EXIT_STGI]                         = stgi_interception,
4787         [SVM_EXIT_CLGI]                         = clgi_interception,
4788         [SVM_EXIT_SKINIT]                       = skinit_interception,
4789         [SVM_EXIT_WBINVD]                       = wbinvd_interception,
4790         [SVM_EXIT_MONITOR]                      = monitor_interception,
4791         [SVM_EXIT_MWAIT]                        = mwait_interception,
4792         [SVM_EXIT_XSETBV]                       = xsetbv_interception,
4793         [SVM_EXIT_RDPRU]                        = rdpru_interception,
4794         [SVM_EXIT_NPF]                          = npf_interception,
4795         [SVM_EXIT_RSM]                          = rsm_interception,
4796         [SVM_EXIT_AVIC_INCOMPLETE_IPI]          = avic_incomplete_ipi_interception,
4797         [SVM_EXIT_AVIC_UNACCELERATED_ACCESS]    = avic_unaccelerated_access_interception,
4798 };
4799
4800 static void dump_vmcb(struct kvm_vcpu *vcpu)
4801 {
4802         struct vcpu_svm *svm = to_svm(vcpu);
4803         struct vmcb_control_area *control = &svm->vmcb->control;
4804         struct vmcb_save_area *save = &svm->vmcb->save;
4805
4806         if (!dump_invalid_vmcb) {
4807                 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
4808                 return;
4809         }
4810
4811         pr_err("VMCB Control Area:\n");
4812         pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4813         pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4814         pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4815         pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4816         pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4817         pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4818         pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4819         pr_err("%-20s%d\n", "pause filter threshold:",
4820                control->pause_filter_thresh);
4821         pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4822         pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4823         pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4824         pr_err("%-20s%d\n", "asid:", control->asid);
4825         pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4826         pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4827         pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4828         pr_err("%-20s%08x\n", "int_state:", control->int_state);
4829         pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4830         pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4831         pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4832         pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4833         pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4834         pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4835         pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
4836         pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
4837         pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4838         pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
4839         pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
4840         pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
4841         pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4842         pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4843         pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
4844         pr_err("VMCB State Save Area:\n");
4845         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4846                "es:",
4847                save->es.selector, save->es.attrib,
4848                save->es.limit, save->es.base);
4849         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4850                "cs:",
4851                save->cs.selector, save->cs.attrib,
4852                save->cs.limit, save->cs.base);
4853         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4854                "ss:",
4855                save->ss.selector, save->ss.attrib,
4856                save->ss.limit, save->ss.base);
4857         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4858                "ds:",
4859                save->ds.selector, save->ds.attrib,
4860                save->ds.limit, save->ds.base);
4861         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4862                "fs:",
4863                save->fs.selector, save->fs.attrib,
4864                save->fs.limit, save->fs.base);
4865         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4866                "gs:",
4867                save->gs.selector, save->gs.attrib,
4868                save->gs.limit, save->gs.base);
4869         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4870                "gdtr:",
4871                save->gdtr.selector, save->gdtr.attrib,
4872                save->gdtr.limit, save->gdtr.base);
4873         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4874                "ldtr:",
4875                save->ldtr.selector, save->ldtr.attrib,
4876                save->ldtr.limit, save->ldtr.base);
4877         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4878                "idtr:",
4879                save->idtr.selector, save->idtr.attrib,
4880                save->idtr.limit, save->idtr.base);
4881         pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4882                "tr:",
4883                save->tr.selector, save->tr.attrib,
4884                save->tr.limit, save->tr.base);
4885         pr_err("cpl:            %d                efer:         %016llx\n",
4886                 save->cpl, save->efer);
4887         pr_err("%-15s %016llx %-13s %016llx\n",
4888                "cr0:", save->cr0, "cr2:", save->cr2);
4889         pr_err("%-15s %016llx %-13s %016llx\n",
4890                "cr3:", save->cr3, "cr4:", save->cr4);
4891         pr_err("%-15s %016llx %-13s %016llx\n",
4892                "dr6:", save->dr6, "dr7:", save->dr7);
4893         pr_err("%-15s %016llx %-13s %016llx\n",
4894                "rip:", save->rip, "rflags:", save->rflags);
4895         pr_err("%-15s %016llx %-13s %016llx\n",
4896                "rsp:", save->rsp, "rax:", save->rax);
4897         pr_err("%-15s %016llx %-13s %016llx\n",
4898                "star:", save->star, "lstar:", save->lstar);
4899         pr_err("%-15s %016llx %-13s %016llx\n",
4900                "cstar:", save->cstar, "sfmask:", save->sfmask);
4901         pr_err("%-15s %016llx %-13s %016llx\n",
4902                "kernel_gs_base:", save->kernel_gs_base,
4903                "sysenter_cs:", save->sysenter_cs);
4904         pr_err("%-15s %016llx %-13s %016llx\n",
4905                "sysenter_esp:", save->sysenter_esp,
4906                "sysenter_eip:", save->sysenter_eip);
4907         pr_err("%-15s %016llx %-13s %016llx\n",
4908                "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4909         pr_err("%-15s %016llx %-13s %016llx\n",
4910                "br_from:", save->br_from, "br_to:", save->br_to);
4911         pr_err("%-15s %016llx %-13s %016llx\n",
4912                "excp_from:", save->last_excp_from,
4913                "excp_to:", save->last_excp_to);
4914 }
4915
4916 static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4917 {
4918         struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4919
4920         *info1 = control->exit_info_1;
4921         *info2 = control->exit_info_2;
4922 }
4923
4924 static int handle_exit(struct kvm_vcpu *vcpu)
4925 {
4926         struct vcpu_svm *svm = to_svm(vcpu);
4927         struct kvm_run *kvm_run = vcpu->run;
4928         u32 exit_code = svm->vmcb->control.exit_code;
4929
4930         trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4931
4932         if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
4933                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4934         if (npt_enabled)
4935                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
4936
4937         if (unlikely(svm->nested.exit_required)) {
4938                 nested_svm_vmexit(svm);
4939                 svm->nested.exit_required = false;
4940
4941                 return 1;
4942         }
4943
4944         if (is_guest_mode(vcpu)) {
4945                 int vmexit;
4946
4947                 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4948                                         svm->vmcb->control.exit_info_1,
4949                                         svm->vmcb->control.exit_info_2,
4950                                         svm->vmcb->control.exit_int_info,
4951                                         svm->vmcb->control.exit_int_info_err,
4952                                         KVM_ISA_SVM);
4953
4954                 vmexit = nested_svm_exit_special(svm);
4955
4956                 if (vmexit == NESTED_EXIT_CONTINUE)
4957                         vmexit = nested_svm_exit_handled(svm);
4958
4959                 if (vmexit == NESTED_EXIT_DONE)
4960                         return 1;
4961         }
4962
4963         svm_complete_interrupts(svm);
4964
4965         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4966                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4967                 kvm_run->fail_entry.hardware_entry_failure_reason
4968                         = svm->vmcb->control.exit_code;
4969                 dump_vmcb(vcpu);
4970                 return 0;
4971         }
4972
4973         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
4974             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
4975             exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4976             exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
4977                 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
4978                        "exit_code 0x%x\n",
4979                        __func__, svm->vmcb->control.exit_int_info,
4980                        exit_code);
4981
4982         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
4983             || !svm_exit_handlers[exit_code]) {
4984                 vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
4985                 dump_vmcb(vcpu);
4986                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4987                 vcpu->run->internal.suberror =
4988                         KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
4989                 vcpu->run->internal.ndata = 1;
4990                 vcpu->run->internal.data[0] = exit_code;
4991                 return 0;
4992         }
4993
4994         return svm_exit_handlers[exit_code](svm);
4995 }
4996
4997 static void reload_tss(struct kvm_vcpu *vcpu)
4998 {
4999         int cpu = raw_smp_processor_id();
5000
5001         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5002         sd->tss_desc->type = 9; /* available 32/64-bit TSS */
5003         load_TR_desc();
5004 }
5005
5006 static void pre_sev_run(struct vcpu_svm *svm, int cpu)
5007 {
5008         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5009         int asid = sev_get_asid(svm->vcpu.kvm);
5010
5011         /* Assign the asid allocated with this SEV guest */
5012         svm->vmcb->control.asid = asid;
5013
5014         /*
5015          * Flush guest TLB:
5016          *
5017          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
5018          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
5019          */
5020         if (sd->sev_vmcbs[asid] == svm->vmcb &&
5021             svm->last_cpu == cpu)
5022                 return;
5023
5024         svm->last_cpu = cpu;
5025         sd->sev_vmcbs[asid] = svm->vmcb;
5026         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5027         mark_dirty(svm->vmcb, VMCB_ASID);
5028 }
5029
5030 static void pre_svm_run(struct vcpu_svm *svm)
5031 {
5032         int cpu = raw_smp_processor_id();
5033
5034         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
5035
5036         if (sev_guest(svm->vcpu.kvm))
5037                 return pre_sev_run(svm, cpu);
5038
5039         /* FIXME: handle wraparound of asid_generation */
5040         if (svm->asid_generation != sd->asid_generation)
5041                 new_asid(svm, sd);
5042 }
5043
5044 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
5045 {
5046         struct vcpu_svm *svm = to_svm(vcpu);
5047
5048         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
5049         vcpu->arch.hflags |= HF_NMI_MASK;
5050         set_intercept(svm, INTERCEPT_IRET);
5051         ++vcpu->stat.nmi_injections;
5052 }
5053
5054 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
5055 {
5056         struct vmcb_control_area *control;
5057
5058         /* The following fields are ignored when AVIC is enabled */
5059         control = &svm->vmcb->control;
5060         control->int_vector = irq;
5061         control->int_ctl &= ~V_INTR_PRIO_MASK;
5062         control->int_ctl |= V_IRQ_MASK |
5063                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
5064         mark_dirty(svm->vmcb, VMCB_INTR);
5065 }
5066
5067 static void svm_set_irq(struct kvm_vcpu *vcpu)
5068 {
5069         struct vcpu_svm *svm = to_svm(vcpu);
5070
5071         BUG_ON(!(gif_set(svm)));
5072
5073         trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
5074         ++vcpu->stat.irq_injections;
5075
5076         svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
5077                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
5078 }
5079
5080 static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
5081 {
5082         return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
5083 }
5084
5085 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
5086 {
5087         struct vcpu_svm *svm = to_svm(vcpu);
5088
5089         if (svm_nested_virtualize_tpr(vcpu) ||
5090             kvm_vcpu_apicv_active(vcpu))
5091                 return;
5092
5093         clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5094
5095         if (irr == -1)
5096                 return;
5097
5098         if (tpr >= irr)
5099                 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
5100 }
5101
5102 static void svm_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
5103 {
5104         return;
5105 }
5106
5107 static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
5108 {
5109         return avic && irqchip_split(vcpu->kvm);
5110 }
5111
5112 static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
5113 {
5114 }
5115
5116 static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
5117 {
5118 }
5119
5120 /* Note: Currently only used by Hyper-V. */
5121 static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
5122 {
5123         struct vcpu_svm *svm = to_svm(vcpu);
5124         struct vmcb *vmcb = svm->vmcb;
5125
5126         if (kvm_vcpu_apicv_active(vcpu))
5127                 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
5128         else
5129                 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
5130         mark_dirty(vmcb, VMCB_AVIC);
5131 }
5132
5133 static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
5134 {
5135         return;
5136 }
5137
5138 static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
5139 {
5140         kvm_lapic_set_irr(vec, vcpu->arch.apic);
5141         smp_mb__after_atomic();
5142
5143         if (avic_vcpu_is_running(vcpu)) {
5144                 int cpuid = vcpu->cpu;
5145
5146                 if (cpuid != get_cpu())
5147                         wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
5148                 put_cpu();
5149         } else
5150                 kvm_vcpu_wake_up(vcpu);
5151 }
5152
5153 static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
5154 {
5155         return false;
5156 }
5157
5158 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5159 {
5160         unsigned long flags;
5161         struct amd_svm_iommu_ir *cur;
5162
5163         spin_lock_irqsave(&svm->ir_list_lock, flags);
5164         list_for_each_entry(cur, &svm->ir_list, node) {
5165                 if (cur->data != pi->ir_data)
5166                         continue;
5167                 list_del(&cur->node);
5168                 kfree(cur);
5169                 break;
5170         }
5171         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5172 }
5173
5174 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
5175 {
5176         int ret = 0;
5177         unsigned long flags;
5178         struct amd_svm_iommu_ir *ir;
5179
5180         /**
5181          * In some cases, the existing irte is updaed and re-set,
5182          * so we need to check here if it's already been * added
5183          * to the ir_list.
5184          */
5185         if (pi->ir_data && (pi->prev_ga_tag != 0)) {
5186                 struct kvm *kvm = svm->vcpu.kvm;
5187                 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
5188                 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
5189                 struct vcpu_svm *prev_svm;
5190
5191                 if (!prev_vcpu) {
5192                         ret = -EINVAL;
5193                         goto out;
5194                 }
5195
5196                 prev_svm = to_svm(prev_vcpu);
5197                 svm_ir_list_del(prev_svm, pi);
5198         }
5199
5200         /**
5201          * Allocating new amd_iommu_pi_data, which will get
5202          * add to the per-vcpu ir_list.
5203          */
5204         ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL_ACCOUNT);
5205         if (!ir) {
5206                 ret = -ENOMEM;
5207                 goto out;
5208         }
5209         ir->data = pi->ir_data;
5210
5211         spin_lock_irqsave(&svm->ir_list_lock, flags);
5212         list_add(&ir->node, &svm->ir_list);
5213         spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5214 out:
5215         return ret;
5216 }
5217
5218 /**
5219  * Note:
5220  * The HW cannot support posting multicast/broadcast
5221  * interrupts to a vCPU. So, we still use legacy interrupt
5222  * remapping for these kind of interrupts.
5223  *
5224  * For lowest-priority interrupts, we only support
5225  * those with single CPU as the destination, e.g. user
5226  * configures the interrupts via /proc/irq or uses
5227  * irqbalance to make the interrupts single-CPU.
5228  */
5229 static int
5230 get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
5231                  struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
5232 {
5233         struct kvm_lapic_irq irq;
5234         struct kvm_vcpu *vcpu = NULL;
5235
5236         kvm_set_msi_irq(kvm, e, &irq);
5237
5238         if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu) ||
5239             !kvm_irq_is_postable(&irq)) {
5240                 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
5241                          __func__, irq.vector);
5242                 return -1;
5243         }
5244
5245         pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
5246                  irq.vector);
5247         *svm = to_svm(vcpu);
5248         vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
5249         vcpu_info->vector = irq.vector;
5250
5251         return 0;
5252 }
5253
5254 /*
5255  * svm_update_pi_irte - set IRTE for Posted-Interrupts
5256  *
5257  * @kvm: kvm
5258  * @host_irq: host irq of the interrupt
5259  * @guest_irq: gsi of the interrupt
5260  * @set: set or unset PI
5261  * returns 0 on success, < 0 on failure
5262  */
5263 static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5264                               uint32_t guest_irq, bool set)
5265 {
5266         struct kvm_kernel_irq_routing_entry *e;
5267         struct kvm_irq_routing_table *irq_rt;
5268         int idx, ret = -EINVAL;
5269
5270         if (!kvm_arch_has_assigned_device(kvm) ||
5271             !irq_remapping_cap(IRQ_POSTING_CAP))
5272                 return 0;
5273
5274         pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
5275                  __func__, host_irq, guest_irq, set);
5276
5277         idx = srcu_read_lock(&kvm->irq_srcu);
5278         irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
5279         WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
5280
5281         hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
5282                 struct vcpu_data vcpu_info;
5283                 struct vcpu_svm *svm = NULL;
5284
5285                 if (e->type != KVM_IRQ_ROUTING_MSI)
5286                         continue;
5287
5288                 /**
5289                  * Here, we setup with legacy mode in the following cases:
5290                  * 1. When cannot target interrupt to a specific vcpu.
5291                  * 2. Unsetting posted interrupt.
5292                  * 3. APIC virtialization is disabled for the vcpu.
5293                  * 4. IRQ has incompatible delivery mode (SMI, INIT, etc)
5294                  */
5295                 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
5296                     kvm_vcpu_apicv_active(&svm->vcpu)) {
5297                         struct amd_iommu_pi_data pi;
5298
5299                         /* Try to enable guest_mode in IRTE */
5300                         pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
5301                                             AVIC_HPA_MASK);
5302                         pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
5303                                                      svm->vcpu.vcpu_id);
5304                         pi.is_guest_mode = true;
5305                         pi.vcpu_data = &vcpu_info;
5306                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5307
5308                         /**
5309                          * Here, we successfully setting up vcpu affinity in
5310                          * IOMMU guest mode. Now, we need to store the posted
5311                          * interrupt information in a per-vcpu ir_list so that
5312                          * we can reference to them directly when we update vcpu
5313                          * scheduling information in IOMMU irte.
5314                          */
5315                         if (!ret && pi.is_guest_mode)
5316                                 svm_ir_list_add(svm, &pi);
5317                 } else {
5318                         /* Use legacy mode in IRTE */
5319                         struct amd_iommu_pi_data pi;
5320
5321                         /**
5322                          * Here, pi is used to:
5323                          * - Tell IOMMU to use legacy mode for this interrupt.
5324                          * - Retrieve ga_tag of prior interrupt remapping data.
5325                          */
5326                         pi.is_guest_mode = false;
5327                         ret = irq_set_vcpu_affinity(host_irq, &pi);
5328
5329                         /**
5330                          * Check if the posted interrupt was previously
5331                          * setup with the guest_mode by checking if the ga_tag
5332                          * was cached. If so, we need to clean up the per-vcpu
5333                          * ir_list.
5334                          */
5335                         if (!ret && pi.prev_ga_tag) {
5336                                 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
5337                                 struct kvm_vcpu *vcpu;
5338
5339                                 vcpu = kvm_get_vcpu_by_id(kvm, id);
5340                                 if (vcpu)
5341                                         svm_ir_list_del(to_svm(vcpu), &pi);
5342                         }
5343                 }
5344
5345                 if (!ret && svm) {
5346                         trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id,
5347                                                  e->gsi, vcpu_info.vector,
5348                                                  vcpu_info.pi_desc_addr, set);
5349                 }
5350
5351                 if (ret < 0) {
5352                         pr_err("%s: failed to update PI IRTE\n", __func__);
5353                         goto out;
5354                 }
5355         }
5356
5357         ret = 0;
5358 out:
5359         srcu_read_unlock(&kvm->irq_srcu, idx);
5360         return ret;
5361 }
5362
5363 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
5364 {
5365         struct vcpu_svm *svm = to_svm(vcpu);
5366         struct vmcb *vmcb = svm->vmcb;
5367         int ret;
5368         ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5369               !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5370         ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5371
5372         return ret;
5373 }
5374
5375 static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5376 {
5377         struct vcpu_svm *svm = to_svm(vcpu);
5378
5379         return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5380 }
5381
5382 static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5383 {
5384         struct vcpu_svm *svm = to_svm(vcpu);
5385
5386         if (masked) {
5387                 svm->vcpu.arch.hflags |= HF_NMI_MASK;
5388                 set_intercept(svm, INTERCEPT_IRET);
5389         } else {
5390                 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
5391                 clr_intercept(svm, INTERCEPT_IRET);
5392         }
5393 }
5394
5395 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5396 {
5397         struct vcpu_svm *svm = to_svm(vcpu);
5398         struct vmcb *vmcb = svm->vmcb;
5399         int ret;
5400
5401         if (!gif_set(svm) ||
5402              (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5403                 return 0;
5404
5405         ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
5406
5407         if (is_guest_mode(vcpu))
5408                 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5409
5410         return ret;
5411 }
5412
5413 static void enable_irq_window(struct kvm_vcpu *vcpu)
5414 {
5415         struct vcpu_svm *svm = to_svm(vcpu);
5416
5417         if (kvm_vcpu_apicv_active(vcpu))
5418                 return;
5419
5420         /*
5421          * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5422          * 1, because that's a separate STGI/VMRUN intercept.  The next time we
5423          * get that intercept, this function will be called again though and
5424          * we'll get the vintr intercept. However, if the vGIF feature is
5425          * enabled, the STGI interception will not occur. Enable the irq
5426          * window under the assumption that the hardware will set the GIF.
5427          */
5428         if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
5429                 svm_set_vintr(svm);
5430                 svm_inject_irq(svm, 0x0);
5431         }
5432 }
5433
5434 static void enable_nmi_window(struct kvm_vcpu *vcpu)
5435 {
5436         struct vcpu_svm *svm = to_svm(vcpu);
5437
5438         if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5439             == HF_NMI_MASK)
5440                 return; /* IRET will cause a vm exit */
5441
5442         if (!gif_set(svm)) {
5443                 if (vgif_enabled(svm))
5444                         set_intercept(svm, INTERCEPT_STGI);
5445                 return; /* STGI will cause a vm exit */
5446         }
5447
5448         if (svm->nested.exit_required)
5449                 return; /* we're not going to run the guest yet */
5450
5451         /*
5452          * Something prevents NMI from been injected. Single step over possible
5453          * problem (IRET or exception injection or interrupt shadow)
5454          */
5455         svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
5456         svm->nmi_singlestep = true;
5457         svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
5458 }
5459
5460 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5461 {
5462         return 0;
5463 }
5464
5465 static int svm_set_identity_map_addr(struct kvm *kvm, u64 ident_addr)
5466 {
5467         return 0;
5468 }
5469
5470 static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
5471 {
5472         struct vcpu_svm *svm = to_svm(vcpu);
5473
5474         if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5475                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5476         else
5477                 svm->asid_generation--;
5478 }
5479
5480 static void svm_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t gva)
5481 {
5482         struct vcpu_svm *svm = to_svm(vcpu);
5483
5484         invlpga(gva, svm->vmcb->control.asid);
5485 }
5486
5487 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5488 {
5489 }
5490
5491 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5492 {
5493         struct vcpu_svm *svm = to_svm(vcpu);
5494
5495         if (svm_nested_virtualize_tpr(vcpu))
5496                 return;
5497
5498         if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
5499                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
5500                 kvm_set_cr8(vcpu, cr8);
5501         }
5502 }
5503
5504 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5505 {
5506         struct vcpu_svm *svm = to_svm(vcpu);
5507         u64 cr8;
5508
5509         if (svm_nested_virtualize_tpr(vcpu) ||
5510             kvm_vcpu_apicv_active(vcpu))
5511                 return;
5512
5513         cr8 = kvm_get_cr8(vcpu);
5514         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5515         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5516 }
5517
5518 static void svm_complete_interrupts(struct vcpu_svm *svm)
5519 {
5520         u8 vector;
5521         int type;
5522         u32 exitintinfo = svm->vmcb->control.exit_int_info;
5523         unsigned int3_injected = svm->int3_injected;
5524
5525         svm->int3_injected = 0;
5526
5527         /*
5528          * If we've made progress since setting HF_IRET_MASK, we've
5529          * executed an IRET and can allow NMI injection.
5530          */
5531         if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5532             && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
5533                 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
5534                 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5535         }
5536
5537         svm->vcpu.arch.nmi_injected = false;
5538         kvm_clear_exception_queue(&svm->vcpu);
5539         kvm_clear_interrupt_queue(&svm->vcpu);
5540
5541         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5542                 return;
5543
5544         kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5545
5546         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5547         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5548
5549         switch (type) {
5550         case SVM_EXITINTINFO_TYPE_NMI:
5551                 svm->vcpu.arch.nmi_injected = true;
5552                 break;
5553         case SVM_EXITINTINFO_TYPE_EXEPT:
5554                 /*
5555                  * In case of software exceptions, do not reinject the vector,
5556                  * but re-execute the instruction instead. Rewind RIP first
5557                  * if we emulated INT3 before.
5558                  */
5559                 if (kvm_exception_is_soft(vector)) {
5560                         if (vector == BP_VECTOR && int3_injected &&
5561                             kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5562                                 kvm_rip_write(&svm->vcpu,
5563                                               kvm_rip_read(&svm->vcpu) -
5564                                               int3_injected);
5565                         break;
5566                 }
5567                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5568                         u32 err = svm->vmcb->control.exit_int_info_err;
5569                         kvm_requeue_exception_e(&svm->vcpu, vector, err);
5570
5571                 } else
5572                         kvm_requeue_exception(&svm->vcpu, vector);
5573                 break;
5574         case SVM_EXITINTINFO_TYPE_INTR:
5575                 kvm_queue_interrupt(&svm->vcpu, vector, false);
5576                 break;
5577         default:
5578                 break;
5579         }
5580 }
5581
5582 static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5583 {
5584         struct vcpu_svm *svm = to_svm(vcpu);
5585         struct vmcb_control_area *control = &svm->vmcb->control;
5586
5587         control->exit_int_info = control->event_inj;
5588         control->exit_int_info_err = control->event_inj_err;
5589         control->event_inj = 0;
5590         svm_complete_interrupts(svm);
5591 }
5592
5593 static void svm_vcpu_run(struct kvm_vcpu *vcpu)
5594 {
5595         struct vcpu_svm *svm = to_svm(vcpu);
5596
5597         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5598         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5599         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5600
5601         /*
5602          * A vmexit emulation is required before the vcpu can be executed
5603          * again.
5604          */
5605         if (unlikely(svm->nested.exit_required))
5606                 return;
5607
5608         /*
5609          * Disable singlestep if we're injecting an interrupt/exception.
5610          * We don't want our modified rflags to be pushed on the stack where
5611          * we might not be able to easily reset them if we disabled NMI
5612          * singlestep later.
5613          */
5614         if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5615                 /*
5616                  * Event injection happens before external interrupts cause a
5617                  * vmexit and interrupts are disabled here, so smp_send_reschedule
5618                  * is enough to force an immediate vmexit.
5619                  */
5620                 disable_nmi_singlestep(svm);
5621                 smp_send_reschedule(vcpu->cpu);
5622         }
5623
5624         pre_svm_run(svm);
5625
5626         sync_lapic_to_cr8(vcpu);
5627
5628         svm->vmcb->save.cr2 = vcpu->arch.cr2;
5629
5630         clgi();
5631         kvm_load_guest_xcr0(vcpu);
5632
5633         if (lapic_in_kernel(vcpu) &&
5634                 vcpu->arch.apic->lapic_timer.timer_advance_ns)
5635                 kvm_wait_lapic_expire(vcpu);
5636
5637         /*
5638          * If this vCPU has touched SPEC_CTRL, restore the guest's value if
5639          * it's non-zero. Since vmentry is serialising on affected CPUs, there
5640          * is no need to worry about the conditional branch over the wrmsr
5641          * being speculatively taken.
5642          */
5643         x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
5644
5645         local_irq_enable();
5646
5647         asm volatile (
5648                 "push %%" _ASM_BP "; \n\t"
5649                 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5650                 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5651                 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5652                 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5653                 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5654                 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
5655 #ifdef CONFIG_X86_64
5656                 "mov %c[r8](%[svm]),  %%r8  \n\t"
5657                 "mov %c[r9](%[svm]),  %%r9  \n\t"
5658                 "mov %c[r10](%[svm]), %%r10 \n\t"
5659                 "mov %c[r11](%[svm]), %%r11 \n\t"
5660                 "mov %c[r12](%[svm]), %%r12 \n\t"
5661                 "mov %c[r13](%[svm]), %%r13 \n\t"
5662                 "mov %c[r14](%[svm]), %%r14 \n\t"
5663                 "mov %c[r15](%[svm]), %%r15 \n\t"
5664 #endif
5665
5666                 /* Enter guest mode */
5667                 "push %%" _ASM_AX " \n\t"
5668                 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
5669                 __ex("vmload %%" _ASM_AX) "\n\t"
5670                 __ex("vmrun %%" _ASM_AX) "\n\t"
5671                 __ex("vmsave %%" _ASM_AX) "\n\t"
5672                 "pop %%" _ASM_AX " \n\t"
5673
5674                 /* Save guest registers, load host registers */
5675                 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5676                 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5677                 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5678                 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5679                 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5680                 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
5681 #ifdef CONFIG_X86_64
5682                 "mov %%r8,  %c[r8](%[svm]) \n\t"
5683                 "mov %%r9,  %c[r9](%[svm]) \n\t"
5684                 "mov %%r10, %c[r10](%[svm]) \n\t"
5685                 "mov %%r11, %c[r11](%[svm]) \n\t"
5686                 "mov %%r12, %c[r12](%[svm]) \n\t"
5687                 "mov %%r13, %c[r13](%[svm]) \n\t"
5688                 "mov %%r14, %c[r14](%[svm]) \n\t"
5689                 "mov %%r15, %c[r15](%[svm]) \n\t"
5690                 /*
5691                 * Clear host registers marked as clobbered to prevent
5692                 * speculative use.
5693                 */
5694                 "xor %%r8d, %%r8d \n\t"
5695                 "xor %%r9d, %%r9d \n\t"
5696                 "xor %%r10d, %%r10d \n\t"
5697                 "xor %%r11d, %%r11d \n\t"
5698                 "xor %%r12d, %%r12d \n\t"
5699                 "xor %%r13d, %%r13d \n\t"
5700                 "xor %%r14d, %%r14d \n\t"
5701                 "xor %%r15d, %%r15d \n\t"
5702 #endif
5703                 "xor %%ebx, %%ebx \n\t"
5704                 "xor %%ecx, %%ecx \n\t"
5705                 "xor %%edx, %%edx \n\t"
5706                 "xor %%esi, %%esi \n\t"
5707                 "xor %%edi, %%edi \n\t"
5708                 "pop %%" _ASM_BP
5709                 :
5710                 : [svm]"a"(svm),
5711                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
5712                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5713                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5714                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5715                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5716                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5717                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
5718 #ifdef CONFIG_X86_64
5719                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5720                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5721                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5722                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5723                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5724                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5725                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5726                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
5727 #endif
5728                 : "cc", "memory"
5729 #ifdef CONFIG_X86_64
5730                 , "rbx", "rcx", "rdx", "rsi", "rdi"
5731                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
5732 #else
5733                 , "ebx", "ecx", "edx", "esi", "edi"
5734 #endif
5735                 );
5736
5737         /* Eliminate branch target predictions from guest mode */
5738         vmexit_fill_RSB();
5739
5740 #ifdef CONFIG_X86_64
5741         wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5742 #else
5743         loadsegment(fs, svm->host.fs);
5744 #ifndef CONFIG_X86_32_LAZY_GS
5745         loadsegment(gs, svm->host.gs);
5746 #endif
5747 #endif
5748
5749         /*
5750          * We do not use IBRS in the kernel. If this vCPU has used the
5751          * SPEC_CTRL MSR it may have left it on; save the value and
5752          * turn it off. This is much more efficient than blindly adding
5753          * it to the atomic save/restore list. Especially as the former
5754          * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
5755          *
5756          * For non-nested case:
5757          * If the L01 MSR bitmap does not intercept the MSR, then we need to
5758          * save it.
5759          *
5760          * For nested case:
5761          * If the L02 MSR bitmap does not intercept the MSR, then we need to
5762          * save it.
5763          */
5764         if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
5765                 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
5766
5767         reload_tss(vcpu);
5768
5769         local_irq_disable();
5770
5771         x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
5772
5773         vcpu->arch.cr2 = svm->vmcb->save.cr2;
5774         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5775         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5776         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5777
5778         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5779                 kvm_before_interrupt(&svm->vcpu);
5780
5781         kvm_put_guest_xcr0(vcpu);
5782         stgi();
5783
5784         /* Any pending NMI will happen here */
5785
5786         if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5787                 kvm_after_interrupt(&svm->vcpu);
5788
5789         sync_cr8_to_lapic(vcpu);
5790
5791         svm->next_rip = 0;
5792
5793         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5794
5795         /* if exit due to PF check for async PF */
5796         if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
5797                 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
5798
5799         if (npt_enabled) {
5800                 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5801                 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5802         }
5803
5804         /*
5805          * We need to handle MC intercepts here before the vcpu has a chance to
5806          * change the physical cpu
5807          */
5808         if (unlikely(svm->vmcb->control.exit_code ==
5809                      SVM_EXIT_EXCP_BASE + MC_VECTOR))
5810                 svm_handle_mce(svm);
5811
5812         mark_all_clean(svm->vmcb);
5813 }
5814 STACK_FRAME_NON_STANDARD(svm_vcpu_run);
5815
5816 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5817 {
5818         struct vcpu_svm *svm = to_svm(vcpu);
5819
5820         svm->vmcb->save.cr3 = __sme_set(root);
5821         mark_dirty(svm->vmcb, VMCB_CR);
5822 }
5823
5824 static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5825 {
5826         struct vcpu_svm *svm = to_svm(vcpu);
5827
5828         svm->vmcb->control.nested_cr3 = __sme_set(root);
5829         mark_dirty(svm->vmcb, VMCB_NPT);
5830
5831         /* Also sync guest cr3 here in case we live migrate */
5832         svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
5833         mark_dirty(svm->vmcb, VMCB_CR);
5834 }
5835
5836 static int is_disabled(void)
5837 {
5838         u64 vm_cr;
5839
5840         rdmsrl(MSR_VM_CR, vm_cr);
5841         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5842                 return 1;
5843
5844         return 0;
5845 }
5846
5847 static void
5848 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5849 {
5850         /*
5851          * Patch in the VMMCALL instruction:
5852          */
5853         hypercall[0] = 0x0f;
5854         hypercall[1] = 0x01;
5855         hypercall[2] = 0xd9;
5856 }
5857
5858 static int __init svm_check_processor_compat(void)
5859 {
5860         return 0;
5861 }
5862
5863 static bool svm_cpu_has_accelerated_tpr(void)
5864 {
5865         return false;
5866 }
5867
5868 static bool svm_has_emulated_msr(int index)
5869 {
5870         switch (index) {
5871         case MSR_IA32_MCG_EXT_CTL:
5872         case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
5873                 return false;
5874         default:
5875                 break;
5876         }
5877
5878         return true;
5879 }
5880
5881 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5882 {
5883         return 0;
5884 }
5885
5886 static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5887 {
5888         struct vcpu_svm *svm = to_svm(vcpu);
5889
5890         /* Update nrips enabled cache */
5891         svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
5892
5893         if (!kvm_vcpu_apicv_active(vcpu))
5894                 return;
5895
5896         guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
5897 }
5898
5899 #define F(x) bit(X86_FEATURE_##x)
5900
5901 static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5902 {
5903         switch (func) {
5904         case 0x1:
5905                 if (avic)
5906                         entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5907                 break;
5908         case 0x80000001:
5909                 if (nested)
5910                         entry->ecx |= (1 << 2); /* Set SVM bit */
5911                 break;
5912         case 0x80000008:
5913                 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
5914                      boot_cpu_has(X86_FEATURE_AMD_SSBD))
5915                         entry->ebx |= F(VIRT_SSBD);
5916                 break;
5917         case 0x8000000A:
5918                 entry->eax = 1; /* SVM revision 1 */
5919                 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5920                                    ASID emulation to nested SVM */
5921                 entry->ecx = 0; /* Reserved */
5922                 entry->edx = 0; /* Per default do not support any
5923                                    additional features */
5924
5925                 /* Support next_rip if host supports it */
5926                 if (boot_cpu_has(X86_FEATURE_NRIPS))
5927                         entry->edx |= F(NRIPS);
5928
5929                 /* Support NPT for the guest if enabled */
5930                 if (npt_enabled)
5931                         entry->edx |= F(NPT);
5932
5933                 break;
5934         case 0x8000001F:
5935                 /* Support memory encryption cpuid if host supports it */
5936                 if (boot_cpu_has(X86_FEATURE_SEV))
5937                         cpuid(0x8000001f, &entry->eax, &entry->ebx,
5938                                 &entry->ecx, &entry->edx);
5939
5940         }
5941 }
5942
5943 static int svm_get_lpage_level(void)
5944 {
5945         return PT_PDPE_LEVEL;
5946 }
5947
5948 static bool svm_rdtscp_supported(void)
5949 {
5950         return boot_cpu_has(X86_FEATURE_RDTSCP);
5951 }
5952
5953 static bool svm_invpcid_supported(void)
5954 {
5955         return false;
5956 }
5957
5958 static bool svm_mpx_supported(void)
5959 {
5960         return false;
5961 }
5962
5963 static bool svm_xsaves_supported(void)
5964 {
5965         return false;
5966 }
5967
5968 static bool svm_umip_emulated(void)
5969 {
5970         return false;
5971 }
5972
5973 static bool svm_pt_supported(void)
5974 {
5975         return false;
5976 }
5977
5978 static bool svm_has_wbinvd_exit(void)
5979 {
5980         return true;
5981 }
5982
5983 #define PRE_EX(exit)  { .exit_code = (exit), \
5984                         .stage = X86_ICPT_PRE_EXCEPT, }
5985 #define POST_EX(exit) { .exit_code = (exit), \
5986                         .stage = X86_ICPT_POST_EXCEPT, }
5987 #define POST_MEM(exit) { .exit_code = (exit), \
5988                         .stage = X86_ICPT_POST_MEMACCESS, }
5989
5990 static const struct __x86_intercept {
5991         u32 exit_code;
5992         enum x86_intercept_stage stage;
5993 } x86_intercept_map[] = {
5994         [x86_intercept_cr_read]         = POST_EX(SVM_EXIT_READ_CR0),
5995         [x86_intercept_cr_write]        = POST_EX(SVM_EXIT_WRITE_CR0),
5996         [x86_intercept_clts]            = POST_EX(SVM_EXIT_WRITE_CR0),
5997         [x86_intercept_lmsw]            = POST_EX(SVM_EXIT_WRITE_CR0),
5998         [x86_intercept_smsw]            = POST_EX(SVM_EXIT_READ_CR0),
5999         [x86_intercept_dr_read]         = POST_EX(SVM_EXIT_READ_DR0),
6000         [x86_intercept_dr_write]        = POST_EX(SVM_EXIT_WRITE_DR0),
6001         [x86_intercept_sldt]            = POST_EX(SVM_EXIT_LDTR_READ),
6002         [x86_intercept_str]             = POST_EX(SVM_EXIT_TR_READ),
6003         [x86_intercept_lldt]            = POST_EX(SVM_EXIT_LDTR_WRITE),
6004         [x86_intercept_ltr]             = POST_EX(SVM_EXIT_TR_WRITE),
6005         [x86_intercept_sgdt]            = POST_EX(SVM_EXIT_GDTR_READ),
6006         [x86_intercept_sidt]            = POST_EX(SVM_EXIT_IDTR_READ),
6007         [x86_intercept_lgdt]            = POST_EX(SVM_EXIT_GDTR_WRITE),
6008         [x86_intercept_lidt]            = POST_EX(SVM_EXIT_IDTR_WRITE),
6009         [x86_intercept_vmrun]           = POST_EX(SVM_EXIT_VMRUN),
6010         [x86_intercept_vmmcall]         = POST_EX(SVM_EXIT_VMMCALL),
6011         [x86_intercept_vmload]          = POST_EX(SVM_EXIT_VMLOAD),
6012         [x86_intercept_vmsave]          = POST_EX(SVM_EXIT_VMSAVE),
6013         [x86_intercept_stgi]            = POST_EX(SVM_EXIT_STGI),
6014         [x86_intercept_clgi]            = POST_EX(SVM_EXIT_CLGI),
6015         [x86_intercept_skinit]          = POST_EX(SVM_EXIT_SKINIT),
6016         [x86_intercept_invlpga]         = POST_EX(SVM_EXIT_INVLPGA),
6017         [x86_intercept_rdtscp]          = POST_EX(SVM_EXIT_RDTSCP),
6018         [x86_intercept_monitor]         = POST_MEM(SVM_EXIT_MONITOR),
6019         [x86_intercept_mwait]           = POST_EX(SVM_EXIT_MWAIT),
6020         [x86_intercept_invlpg]          = POST_EX(SVM_EXIT_INVLPG),
6021         [x86_intercept_invd]            = POST_EX(SVM_EXIT_INVD),
6022         [x86_intercept_wbinvd]          = POST_EX(SVM_EXIT_WBINVD),
6023         [x86_intercept_wrmsr]           = POST_EX(SVM_EXIT_MSR),
6024         [x86_intercept_rdtsc]           = POST_EX(SVM_EXIT_RDTSC),
6025         [x86_intercept_rdmsr]           = POST_EX(SVM_EXIT_MSR),
6026         [x86_intercept_rdpmc]           = POST_EX(SVM_EXIT_RDPMC),
6027         [x86_intercept_cpuid]           = PRE_EX(SVM_EXIT_CPUID),
6028         [x86_intercept_rsm]             = PRE_EX(SVM_EXIT_RSM),
6029         [x86_intercept_pause]           = PRE_EX(SVM_EXIT_PAUSE),
6030         [x86_intercept_pushf]           = PRE_EX(SVM_EXIT_PUSHF),
6031         [x86_intercept_popf]            = PRE_EX(SVM_EXIT_POPF),
6032         [x86_intercept_intn]            = PRE_EX(SVM_EXIT_SWINT),
6033         [x86_intercept_iret]            = PRE_EX(SVM_EXIT_IRET),
6034         [x86_intercept_icebp]           = PRE_EX(SVM_EXIT_ICEBP),
6035         [x86_intercept_hlt]             = POST_EX(SVM_EXIT_HLT),
6036         [x86_intercept_in]              = POST_EX(SVM_EXIT_IOIO),
6037         [x86_intercept_ins]             = POST_EX(SVM_EXIT_IOIO),
6038         [x86_intercept_out]             = POST_EX(SVM_EXIT_IOIO),
6039         [x86_intercept_outs]            = POST_EX(SVM_EXIT_IOIO),
6040         [x86_intercept_xsetbv]          = PRE_EX(SVM_EXIT_XSETBV),
6041 };
6042
6043 #undef PRE_EX
6044 #undef POST_EX
6045 #undef POST_MEM
6046
6047 static int svm_check_intercept(struct kvm_vcpu *vcpu,
6048                                struct x86_instruction_info *info,
6049                                enum x86_intercept_stage stage)
6050 {
6051         struct vcpu_svm *svm = to_svm(vcpu);
6052         int vmexit, ret = X86EMUL_CONTINUE;
6053         struct __x86_intercept icpt_info;
6054         struct vmcb *vmcb = svm->vmcb;
6055
6056         if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
6057                 goto out;
6058
6059         icpt_info = x86_intercept_map[info->intercept];
6060
6061         if (stage != icpt_info.stage)
6062                 goto out;
6063
6064         switch (icpt_info.exit_code) {
6065         case SVM_EXIT_READ_CR0:
6066                 if (info->intercept == x86_intercept_cr_read)
6067                         icpt_info.exit_code += info->modrm_reg;
6068                 break;
6069         case SVM_EXIT_WRITE_CR0: {
6070                 unsigned long cr0, val;
6071                 u64 intercept;
6072
6073                 if (info->intercept == x86_intercept_cr_write)
6074                         icpt_info.exit_code += info->modrm_reg;
6075
6076                 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
6077                     info->intercept == x86_intercept_clts)
6078                         break;
6079
6080                 intercept = svm->nested.intercept;
6081
6082                 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
6083                         break;
6084
6085                 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
6086                 val = info->src_val  & ~SVM_CR0_SELECTIVE_MASK;
6087
6088                 if (info->intercept == x86_intercept_lmsw) {
6089                         cr0 &= 0xfUL;
6090                         val &= 0xfUL;
6091                         /* lmsw can't clear PE - catch this here */
6092                         if (cr0 & X86_CR0_PE)
6093                                 val |= X86_CR0_PE;
6094                 }
6095
6096                 if (cr0 ^ val)
6097                         icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
6098
6099                 break;
6100         }
6101         case SVM_EXIT_READ_DR0:
6102         case SVM_EXIT_WRITE_DR0:
6103                 icpt_info.exit_code += info->modrm_reg;
6104                 break;
6105         case SVM_EXIT_MSR:
6106                 if (info->intercept == x86_intercept_wrmsr)
6107                         vmcb->control.exit_info_1 = 1;
6108                 else
6109                         vmcb->control.exit_info_1 = 0;
6110                 break;
6111         case SVM_EXIT_PAUSE:
6112                 /*
6113                  * We get this for NOP only, but pause
6114                  * is rep not, check this here
6115                  */
6116                 if (info->rep_prefix != REPE_PREFIX)
6117                         goto out;
6118                 break;
6119         case SVM_EXIT_IOIO: {
6120                 u64 exit_info;
6121                 u32 bytes;
6122
6123                 if (info->intercept == x86_intercept_in ||
6124                     info->intercept == x86_intercept_ins) {
6125                         exit_info = ((info->src_val & 0xffff) << 16) |
6126                                 SVM_IOIO_TYPE_MASK;
6127                         bytes = info->dst_bytes;
6128                 } else {
6129                         exit_info = (info->dst_val & 0xffff) << 16;
6130                         bytes = info->src_bytes;
6131                 }
6132
6133                 if (info->intercept == x86_intercept_outs ||
6134                     info->intercept == x86_intercept_ins)
6135                         exit_info |= SVM_IOIO_STR_MASK;
6136
6137                 if (info->rep_prefix)
6138                         exit_info |= SVM_IOIO_REP_MASK;
6139
6140                 bytes = min(bytes, 4u);
6141
6142                 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
6143
6144                 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
6145
6146                 vmcb->control.exit_info_1 = exit_info;
6147                 vmcb->control.exit_info_2 = info->next_rip;
6148
6149                 break;
6150         }
6151         default:
6152                 break;
6153         }
6154
6155         /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
6156         if (static_cpu_has(X86_FEATURE_NRIPS))
6157                 vmcb->control.next_rip  = info->next_rip;
6158         vmcb->control.exit_code = icpt_info.exit_code;
6159         vmexit = nested_svm_exit_handled(svm);
6160
6161         ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
6162                                            : X86EMUL_CONTINUE;
6163
6164 out:
6165         return ret;
6166 }
6167
6168 static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
6169 {
6170
6171 }
6172
6173 static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
6174 {
6175         if (pause_filter_thresh)
6176                 shrink_ple_window(vcpu);
6177 }
6178
6179 static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
6180 {
6181         if (avic_handle_apic_id_update(vcpu) != 0)
6182                 return;
6183         avic_handle_dfr_update(vcpu);
6184         avic_handle_ldr_update(vcpu);
6185 }
6186
6187 static void svm_setup_mce(struct kvm_vcpu *vcpu)
6188 {
6189         /* [63:9] are reserved. */
6190         vcpu->arch.mcg_cap &= 0x1ff;
6191 }
6192
6193 static int svm_smi_allowed(struct kvm_vcpu *vcpu)
6194 {
6195         struct vcpu_svm *svm = to_svm(vcpu);
6196
6197         /* Per APM Vol.2 15.22.2 "Response to SMI" */
6198         if (!gif_set(svm))
6199                 return 0;
6200
6201         if (is_guest_mode(&svm->vcpu) &&
6202             svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
6203                 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
6204                 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
6205                 svm->nested.exit_required = true;
6206                 return 0;
6207         }
6208
6209         return 1;
6210 }
6211
6212 static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
6213 {
6214         struct vcpu_svm *svm = to_svm(vcpu);
6215         int ret;
6216
6217         if (is_guest_mode(vcpu)) {
6218                 /* FED8h - SVM Guest */
6219                 put_smstate(u64, smstate, 0x7ed8, 1);
6220                 /* FEE0h - SVM Guest VMCB Physical Address */
6221                 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
6222
6223                 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
6224                 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
6225                 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
6226
6227                 ret = nested_svm_vmexit(svm);
6228                 if (ret)
6229                         return ret;
6230         }
6231         return 0;
6232 }
6233
6234 static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
6235 {
6236         struct vcpu_svm *svm = to_svm(vcpu);
6237         struct vmcb *nested_vmcb;
6238         struct kvm_host_map map;
6239         u64 guest;
6240         u64 vmcb;
6241
6242         guest = GET_SMSTATE(u64, smstate, 0x7ed8);
6243         vmcb = GET_SMSTATE(u64, smstate, 0x7ee0);
6244
6245         if (guest) {
6246                 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL)
6247                         return 1;
6248                 nested_vmcb = map.hva;
6249                 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map);
6250         }
6251         return 0;
6252 }
6253
6254 static int enable_smi_window(struct kvm_vcpu *vcpu)
6255 {
6256         struct vcpu_svm *svm = to_svm(vcpu);
6257
6258         if (!gif_set(svm)) {
6259                 if (vgif_enabled(svm))
6260                         set_intercept(svm, INTERCEPT_STGI);
6261                 /* STGI will cause a vm exit */
6262                 return 1;
6263         }
6264         return 0;
6265 }
6266
6267 static int sev_asid_new(void)
6268 {
6269         int pos;
6270
6271         /*
6272          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
6273          */
6274         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
6275         if (pos >= max_sev_asid)
6276                 return -EBUSY;
6277
6278         set_bit(pos, sev_asid_bitmap);
6279         return pos + 1;
6280 }
6281
6282 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6283 {
6284         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6285         int asid, ret;
6286
6287         ret = -EBUSY;
6288         if (unlikely(sev->active))
6289                 return ret;
6290
6291         asid = sev_asid_new();
6292         if (asid < 0)
6293                 return ret;
6294
6295         ret = sev_platform_init(&argp->error);
6296         if (ret)
6297                 goto e_free;
6298
6299         sev->active = true;
6300         sev->asid = asid;
6301         INIT_LIST_HEAD(&sev->regions_list);
6302
6303         return 0;
6304
6305 e_free:
6306         __sev_asid_free(asid);
6307         return ret;
6308 }
6309
6310 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
6311 {
6312         struct sev_data_activate *data;
6313         int asid = sev_get_asid(kvm);
6314         int ret;
6315
6316         wbinvd_on_all_cpus();
6317
6318         ret = sev_guest_df_flush(error);
6319         if (ret)
6320                 return ret;
6321
6322         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6323         if (!data)
6324                 return -ENOMEM;
6325
6326         /* activate ASID on the given handle */
6327         data->handle = handle;
6328         data->asid   = asid;
6329         ret = sev_guest_activate(data, error);
6330         kfree(data);
6331
6332         return ret;
6333 }
6334
6335 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
6336 {
6337         struct fd f;
6338         int ret;
6339
6340         f = fdget(fd);
6341         if (!f.file)
6342                 return -EBADF;
6343
6344         ret = sev_issue_cmd_external_user(f.file, id, data, error);
6345
6346         fdput(f);
6347         return ret;
6348 }
6349
6350 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
6351 {
6352         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6353
6354         return __sev_issue_cmd(sev->fd, id, data, error);
6355 }
6356
6357 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
6358 {
6359         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6360         struct sev_data_launch_start *start;
6361         struct kvm_sev_launch_start params;
6362         void *dh_blob, *session_blob;
6363         int *error = &argp->error;
6364         int ret;
6365
6366         if (!sev_guest(kvm))
6367                 return -ENOTTY;
6368
6369         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6370                 return -EFAULT;
6371
6372         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
6373         if (!start)
6374                 return -ENOMEM;
6375
6376         dh_blob = NULL;
6377         if (params.dh_uaddr) {
6378                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
6379                 if (IS_ERR(dh_blob)) {
6380                         ret = PTR_ERR(dh_blob);
6381                         goto e_free;
6382                 }
6383
6384                 start->dh_cert_address = __sme_set(__pa(dh_blob));
6385                 start->dh_cert_len = params.dh_len;
6386         }
6387
6388         session_blob = NULL;
6389         if (params.session_uaddr) {
6390                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
6391                 if (IS_ERR(session_blob)) {
6392                         ret = PTR_ERR(session_blob);
6393                         goto e_free_dh;
6394                 }
6395
6396                 start->session_address = __sme_set(__pa(session_blob));
6397                 start->session_len = params.session_len;
6398         }
6399
6400         start->handle = params.handle;
6401         start->policy = params.policy;
6402
6403         /* create memory encryption context */
6404         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
6405         if (ret)
6406                 goto e_free_session;
6407
6408         /* Bind ASID to this guest */
6409         ret = sev_bind_asid(kvm, start->handle, error);
6410         if (ret)
6411                 goto e_free_session;
6412
6413         /* return handle to userspace */
6414         params.handle = start->handle;
6415         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
6416                 sev_unbind_asid(kvm, start->handle);
6417                 ret = -EFAULT;
6418                 goto e_free_session;
6419         }
6420
6421         sev->handle = start->handle;
6422         sev->fd = argp->sev_fd;
6423
6424 e_free_session:
6425         kfree(session_blob);
6426 e_free_dh:
6427         kfree(dh_blob);
6428 e_free:
6429         kfree(start);
6430         return ret;
6431 }
6432
6433 static unsigned long get_num_contig_pages(unsigned long idx,
6434                                 struct page **inpages, unsigned long npages)
6435 {
6436         unsigned long paddr, next_paddr;
6437         unsigned long i = idx + 1, pages = 1;
6438
6439         /* find the number of contiguous pages starting from idx */
6440         paddr = __sme_page_pa(inpages[idx]);
6441         while (i < npages) {
6442                 next_paddr = __sme_page_pa(inpages[i++]);
6443                 if ((paddr + PAGE_SIZE) == next_paddr) {
6444                         pages++;
6445                         paddr = next_paddr;
6446                         continue;
6447                 }
6448                 break;
6449         }
6450
6451         return pages;
6452 }
6453
6454 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6455 {
6456         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
6457         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6458         struct kvm_sev_launch_update_data params;
6459         struct sev_data_launch_update_data *data;
6460         struct page **inpages;
6461         int ret;
6462
6463         if (!sev_guest(kvm))
6464                 return -ENOTTY;
6465
6466         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6467                 return -EFAULT;
6468
6469         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6470         if (!data)
6471                 return -ENOMEM;
6472
6473         vaddr = params.uaddr;
6474         size = params.len;
6475         vaddr_end = vaddr + size;
6476
6477         /* Lock the user memory. */
6478         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6479         if (!inpages) {
6480                 ret = -ENOMEM;
6481                 goto e_free;
6482         }
6483
6484         /*
6485          * The LAUNCH_UPDATE command will perform in-place encryption of the
6486          * memory content (i.e it will write the same memory region with C=1).
6487          * It's possible that the cache may contain the data with C=0, i.e.,
6488          * unencrypted so invalidate it first.
6489          */
6490         sev_clflush_pages(inpages, npages);
6491
6492         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6493                 int offset, len;
6494
6495                 /*
6496                  * If the user buffer is not page-aligned, calculate the offset
6497                  * within the page.
6498                  */
6499                 offset = vaddr & (PAGE_SIZE - 1);
6500
6501                 /* Calculate the number of pages that can be encrypted in one go. */
6502                 pages = get_num_contig_pages(i, inpages, npages);
6503
6504                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6505
6506                 data->handle = sev->handle;
6507                 data->len = len;
6508                 data->address = __sme_page_pa(inpages[i]) + offset;
6509                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6510                 if (ret)
6511                         goto e_unpin;
6512
6513                 size -= len;
6514                 next_vaddr = vaddr + len;
6515         }
6516
6517 e_unpin:
6518         /* content of memory is updated, mark pages dirty */
6519         for (i = 0; i < npages; i++) {
6520                 set_page_dirty_lock(inpages[i]);
6521                 mark_page_accessed(inpages[i]);
6522         }
6523         /* unlock the user pages */
6524         sev_unpin_memory(kvm, inpages, npages);
6525 e_free:
6526         kfree(data);
6527         return ret;
6528 }
6529
6530 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6531 {
6532         void __user *measure = (void __user *)(uintptr_t)argp->data;
6533         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6534         struct sev_data_launch_measure *data;
6535         struct kvm_sev_launch_measure params;
6536         void __user *p = NULL;
6537         void *blob = NULL;
6538         int ret;
6539
6540         if (!sev_guest(kvm))
6541                 return -ENOTTY;
6542
6543         if (copy_from_user(&params, measure, sizeof(params)))
6544                 return -EFAULT;
6545
6546         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6547         if (!data)
6548                 return -ENOMEM;
6549
6550         /* User wants to query the blob length */
6551         if (!params.len)
6552                 goto cmd;
6553
6554         p = (void __user *)(uintptr_t)params.uaddr;
6555         if (p) {
6556                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6557                         ret = -EINVAL;
6558                         goto e_free;
6559                 }
6560
6561                 ret = -ENOMEM;
6562                 blob = kmalloc(params.len, GFP_KERNEL);
6563                 if (!blob)
6564                         goto e_free;
6565
6566                 data->address = __psp_pa(blob);
6567                 data->len = params.len;
6568         }
6569
6570 cmd:
6571         data->handle = sev->handle;
6572         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6573
6574         /*
6575          * If we query the session length, FW responded with expected data.
6576          */
6577         if (!params.len)
6578                 goto done;
6579
6580         if (ret)
6581                 goto e_free_blob;
6582
6583         if (blob) {
6584                 if (copy_to_user(p, blob, params.len))
6585                         ret = -EFAULT;
6586         }
6587
6588 done:
6589         params.len = data->len;
6590         if (copy_to_user(measure, &params, sizeof(params)))
6591                 ret = -EFAULT;
6592 e_free_blob:
6593         kfree(blob);
6594 e_free:
6595         kfree(data);
6596         return ret;
6597 }
6598
6599 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6600 {
6601         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6602         struct sev_data_launch_finish *data;
6603         int ret;
6604
6605         if (!sev_guest(kvm))
6606                 return -ENOTTY;
6607
6608         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6609         if (!data)
6610                 return -ENOMEM;
6611
6612         data->handle = sev->handle;
6613         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6614
6615         kfree(data);
6616         return ret;
6617 }
6618
6619 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6620 {
6621         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6622         struct kvm_sev_guest_status params;
6623         struct sev_data_guest_status *data;
6624         int ret;
6625
6626         if (!sev_guest(kvm))
6627                 return -ENOTTY;
6628
6629         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6630         if (!data)
6631                 return -ENOMEM;
6632
6633         data->handle = sev->handle;
6634         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6635         if (ret)
6636                 goto e_free;
6637
6638         params.policy = data->policy;
6639         params.state = data->state;
6640         params.handle = data->handle;
6641
6642         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6643                 ret = -EFAULT;
6644 e_free:
6645         kfree(data);
6646         return ret;
6647 }
6648
6649 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6650                                unsigned long dst, int size,
6651                                int *error, bool enc)
6652 {
6653         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6654         struct sev_data_dbg *data;
6655         int ret;
6656
6657         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6658         if (!data)
6659                 return -ENOMEM;
6660
6661         data->handle = sev->handle;
6662         data->dst_addr = dst;
6663         data->src_addr = src;
6664         data->len = size;
6665
6666         ret = sev_issue_cmd(kvm,
6667                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6668                             data, error);
6669         kfree(data);
6670         return ret;
6671 }
6672
6673 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6674                              unsigned long dst_paddr, int sz, int *err)
6675 {
6676         int offset;
6677
6678         /*
6679          * Its safe to read more than we are asked, caller should ensure that
6680          * destination has enough space.
6681          */
6682         src_paddr = round_down(src_paddr, 16);
6683         offset = src_paddr & 15;
6684         sz = round_up(sz + offset, 16);
6685
6686         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6687 }
6688
6689 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6690                                   unsigned long __user dst_uaddr,
6691                                   unsigned long dst_paddr,
6692                                   int size, int *err)
6693 {
6694         struct page *tpage = NULL;
6695         int ret, offset;
6696
6697         /* if inputs are not 16-byte then use intermediate buffer */
6698         if (!IS_ALIGNED(dst_paddr, 16) ||
6699             !IS_ALIGNED(paddr,     16) ||
6700             !IS_ALIGNED(size,      16)) {
6701                 tpage = (void *)alloc_page(GFP_KERNEL);
6702                 if (!tpage)
6703                         return -ENOMEM;
6704
6705                 dst_paddr = __sme_page_pa(tpage);
6706         }
6707
6708         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6709         if (ret)
6710                 goto e_free;
6711
6712         if (tpage) {
6713                 offset = paddr & 15;
6714                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6715                                  page_address(tpage) + offset, size))
6716                         ret = -EFAULT;
6717         }
6718
6719 e_free:
6720         if (tpage)
6721                 __free_page(tpage);
6722
6723         return ret;
6724 }
6725
6726 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6727                                   unsigned long __user vaddr,
6728                                   unsigned long dst_paddr,
6729                                   unsigned long __user dst_vaddr,
6730                                   int size, int *error)
6731 {
6732         struct page *src_tpage = NULL;
6733         struct page *dst_tpage = NULL;
6734         int ret, len = size;
6735
6736         /* If source buffer is not aligned then use an intermediate buffer */
6737         if (!IS_ALIGNED(vaddr, 16)) {
6738                 src_tpage = alloc_page(GFP_KERNEL);
6739                 if (!src_tpage)
6740                         return -ENOMEM;
6741
6742                 if (copy_from_user(page_address(src_tpage),
6743                                 (void __user *)(uintptr_t)vaddr, size)) {
6744                         __free_page(src_tpage);
6745                         return -EFAULT;
6746                 }
6747
6748                 paddr = __sme_page_pa(src_tpage);
6749         }
6750
6751         /*
6752          *  If destination buffer or length is not aligned then do read-modify-write:
6753          *   - decrypt destination in an intermediate buffer
6754          *   - copy the source buffer in an intermediate buffer
6755          *   - use the intermediate buffer as source buffer
6756          */
6757         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6758                 int dst_offset;
6759
6760                 dst_tpage = alloc_page(GFP_KERNEL);
6761                 if (!dst_tpage) {
6762                         ret = -ENOMEM;
6763                         goto e_free;
6764                 }
6765
6766                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6767                                         __sme_page_pa(dst_tpage), size, error);
6768                 if (ret)
6769                         goto e_free;
6770
6771                 /*
6772                  *  If source is kernel buffer then use memcpy() otherwise
6773                  *  copy_from_user().
6774                  */
6775                 dst_offset = dst_paddr & 15;
6776
6777                 if (src_tpage)
6778                         memcpy(page_address(dst_tpage) + dst_offset,
6779                                page_address(src_tpage), size);
6780                 else {
6781                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
6782                                            (void __user *)(uintptr_t)vaddr, size)) {
6783                                 ret = -EFAULT;
6784                                 goto e_free;
6785                         }
6786                 }
6787
6788                 paddr = __sme_page_pa(dst_tpage);
6789                 dst_paddr = round_down(dst_paddr, 16);
6790                 len = round_up(size, 16);
6791         }
6792
6793         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6794
6795 e_free:
6796         if (src_tpage)
6797                 __free_page(src_tpage);
6798         if (dst_tpage)
6799                 __free_page(dst_tpage);
6800         return ret;
6801 }
6802
6803 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6804 {
6805         unsigned long vaddr, vaddr_end, next_vaddr;
6806         unsigned long dst_vaddr;
6807         struct page **src_p, **dst_p;
6808         struct kvm_sev_dbg debug;
6809         unsigned long n;
6810         unsigned int size;
6811         int ret;
6812
6813         if (!sev_guest(kvm))
6814                 return -ENOTTY;
6815
6816         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6817                 return -EFAULT;
6818
6819         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
6820                 return -EINVAL;
6821         if (!debug.dst_uaddr)
6822                 return -EINVAL;
6823
6824         vaddr = debug.src_uaddr;
6825         size = debug.len;
6826         vaddr_end = vaddr + size;
6827         dst_vaddr = debug.dst_uaddr;
6828
6829         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6830                 int len, s_off, d_off;
6831
6832                 /* lock userspace source and destination page */
6833                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6834                 if (!src_p)
6835                         return -EFAULT;
6836
6837                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6838                 if (!dst_p) {
6839                         sev_unpin_memory(kvm, src_p, n);
6840                         return -EFAULT;
6841                 }
6842
6843                 /*
6844                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6845                  * memory content (i.e it will write the same memory region with C=1).
6846                  * It's possible that the cache may contain the data with C=0, i.e.,
6847                  * unencrypted so invalidate it first.
6848                  */
6849                 sev_clflush_pages(src_p, 1);
6850                 sev_clflush_pages(dst_p, 1);
6851
6852                 /*
6853                  * Since user buffer may not be page aligned, calculate the
6854                  * offset within the page.
6855                  */
6856                 s_off = vaddr & ~PAGE_MASK;
6857                 d_off = dst_vaddr & ~PAGE_MASK;
6858                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6859
6860                 if (dec)
6861                         ret = __sev_dbg_decrypt_user(kvm,
6862                                                      __sme_page_pa(src_p[0]) + s_off,
6863                                                      dst_vaddr,
6864                                                      __sme_page_pa(dst_p[0]) + d_off,
6865                                                      len, &argp->error);
6866                 else
6867                         ret = __sev_dbg_encrypt_user(kvm,
6868                                                      __sme_page_pa(src_p[0]) + s_off,
6869                                                      vaddr,
6870                                                      __sme_page_pa(dst_p[0]) + d_off,
6871                                                      dst_vaddr,
6872                                                      len, &argp->error);
6873
6874                 sev_unpin_memory(kvm, src_p, n);
6875                 sev_unpin_memory(kvm, dst_p, n);
6876
6877                 if (ret)
6878                         goto err;
6879
6880                 next_vaddr = vaddr + len;
6881                 dst_vaddr = dst_vaddr + len;
6882                 size -= len;
6883         }
6884 err:
6885         return ret;
6886 }
6887
6888 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6889 {
6890         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6891         struct sev_data_launch_secret *data;
6892         struct kvm_sev_launch_secret params;
6893         struct page **pages;
6894         void *blob, *hdr;
6895         unsigned long n;
6896         int ret, offset;
6897
6898         if (!sev_guest(kvm))
6899                 return -ENOTTY;
6900
6901         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6902                 return -EFAULT;
6903
6904         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6905         if (!pages)
6906                 return -ENOMEM;
6907
6908         /*
6909          * The secret must be copied into contiguous memory region, lets verify
6910          * that userspace memory pages are contiguous before we issue command.
6911          */
6912         if (get_num_contig_pages(0, pages, n) != n) {
6913                 ret = -EINVAL;
6914                 goto e_unpin_memory;
6915         }
6916
6917         ret = -ENOMEM;
6918         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
6919         if (!data)
6920                 goto e_unpin_memory;
6921
6922         offset = params.guest_uaddr & (PAGE_SIZE - 1);
6923         data->guest_address = __sme_page_pa(pages[0]) + offset;
6924         data->guest_len = params.guest_len;
6925
6926         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6927         if (IS_ERR(blob)) {
6928                 ret = PTR_ERR(blob);
6929                 goto e_free;
6930         }
6931
6932         data->trans_address = __psp_pa(blob);
6933         data->trans_len = params.trans_len;
6934
6935         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6936         if (IS_ERR(hdr)) {
6937                 ret = PTR_ERR(hdr);
6938                 goto e_free_blob;
6939         }
6940         data->hdr_address = __psp_pa(hdr);
6941         data->hdr_len = params.hdr_len;
6942
6943         data->handle = sev->handle;
6944         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6945
6946         kfree(hdr);
6947
6948 e_free_blob:
6949         kfree(blob);
6950 e_free:
6951         kfree(data);
6952 e_unpin_memory:
6953         sev_unpin_memory(kvm, pages, n);
6954         return ret;
6955 }
6956
6957 static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6958 {
6959         struct kvm_sev_cmd sev_cmd;
6960         int r;
6961
6962         if (!svm_sev_enabled())
6963                 return -ENOTTY;
6964
6965         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6966                 return -EFAULT;
6967
6968         mutex_lock(&kvm->lock);
6969
6970         switch (sev_cmd.id) {
6971         case KVM_SEV_INIT:
6972                 r = sev_guest_init(kvm, &sev_cmd);
6973                 break;
6974         case KVM_SEV_LAUNCH_START:
6975                 r = sev_launch_start(kvm, &sev_cmd);
6976                 break;
6977         case KVM_SEV_LAUNCH_UPDATE_DATA:
6978                 r = sev_launch_update_data(kvm, &sev_cmd);
6979                 break;
6980         case KVM_SEV_LAUNCH_MEASURE:
6981                 r = sev_launch_measure(kvm, &sev_cmd);
6982                 break;
6983         case KVM_SEV_LAUNCH_FINISH:
6984                 r = sev_launch_finish(kvm, &sev_cmd);
6985                 break;
6986         case KVM_SEV_GUEST_STATUS:
6987                 r = sev_guest_status(kvm, &sev_cmd);
6988                 break;
6989         case KVM_SEV_DBG_DECRYPT:
6990                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6991                 break;
6992         case KVM_SEV_DBG_ENCRYPT:
6993                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6994                 break;
6995         case KVM_SEV_LAUNCH_SECRET:
6996                 r = sev_launch_secret(kvm, &sev_cmd);
6997                 break;
6998         default:
6999                 r = -EINVAL;
7000                 goto out;
7001         }
7002
7003         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
7004                 r = -EFAULT;
7005
7006 out:
7007         mutex_unlock(&kvm->lock);
7008         return r;
7009 }
7010
7011 static int svm_register_enc_region(struct kvm *kvm,
7012                                    struct kvm_enc_region *range)
7013 {
7014         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7015         struct enc_region *region;
7016         int ret = 0;
7017
7018         if (!sev_guest(kvm))
7019                 return -ENOTTY;
7020
7021         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
7022                 return -EINVAL;
7023
7024         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
7025         if (!region)
7026                 return -ENOMEM;
7027
7028         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
7029         if (!region->pages) {
7030                 ret = -ENOMEM;
7031                 goto e_free;
7032         }
7033
7034         /*
7035          * The guest may change the memory encryption attribute from C=0 -> C=1
7036          * or vice versa for this memory range. Lets make sure caches are
7037          * flushed to ensure that guest data gets written into memory with
7038          * correct C-bit.
7039          */
7040         sev_clflush_pages(region->pages, region->npages);
7041
7042         region->uaddr = range->addr;
7043         region->size = range->size;
7044
7045         mutex_lock(&kvm->lock);
7046         list_add_tail(&region->list, &sev->regions_list);
7047         mutex_unlock(&kvm->lock);
7048
7049         return ret;
7050
7051 e_free:
7052         kfree(region);
7053         return ret;
7054 }
7055
7056 static struct enc_region *
7057 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
7058 {
7059         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
7060         struct list_head *head = &sev->regions_list;
7061         struct enc_region *i;
7062
7063         list_for_each_entry(i, head, list) {
7064                 if (i->uaddr == range->addr &&
7065                     i->size == range->size)
7066                         return i;
7067         }
7068
7069         return NULL;
7070 }
7071
7072
7073 static int svm_unregister_enc_region(struct kvm *kvm,
7074                                      struct kvm_enc_region *range)
7075 {
7076         struct enc_region *region;
7077         int ret;
7078
7079         mutex_lock(&kvm->lock);
7080
7081         if (!sev_guest(kvm)) {
7082                 ret = -ENOTTY;
7083                 goto failed;
7084         }
7085
7086         region = find_enc_region(kvm, range);
7087         if (!region) {
7088                 ret = -EINVAL;
7089                 goto failed;
7090         }
7091
7092         __unregister_enc_region_locked(kvm, region);
7093
7094         mutex_unlock(&kvm->lock);
7095         return 0;
7096
7097 failed:
7098         mutex_unlock(&kvm->lock);
7099         return ret;
7100 }
7101
7102 static bool svm_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
7103 {
7104         unsigned long cr4 = kvm_read_cr4(vcpu);
7105         bool smep = cr4 & X86_CR4_SMEP;
7106         bool smap = cr4 & X86_CR4_SMAP;
7107         bool is_user = svm_get_cpl(vcpu) == 3;
7108
7109         /*
7110          * Detect and workaround Errata 1096 Fam_17h_00_0Fh.
7111          *
7112          * Errata:
7113          * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
7114          * possible that CPU microcode implementing DecodeAssist will fail
7115          * to read bytes of instruction which caused #NPF. In this case,
7116          * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
7117          * return 0 instead of the correct guest instruction bytes.
7118          *
7119          * This happens because CPU microcode reading instruction bytes
7120          * uses a special opcode which attempts to read data using CPL=0
7121          * priviledges. The microcode reads CS:RIP and if it hits a SMAP
7122          * fault, it gives up and returns no instruction bytes.
7123          *
7124          * Detection:
7125          * We reach here in case CPU supports DecodeAssist, raised #NPF and
7126          * returned 0 in GuestIntrBytes field of the VMCB.
7127          * First, errata can only be triggered in case vCPU CR4.SMAP=1.
7128          * Second, if vCPU CR4.SMEP=1, errata could only be triggered
7129          * in case vCPU CPL==3 (Because otherwise guest would have triggered
7130          * a SMEP fault instead of #NPF).
7131          * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
7132          * As most guests enable SMAP if they have also enabled SMEP, use above
7133          * logic in order to attempt minimize false-positive of detecting errata
7134          * while still preserving all cases semantic correctness.
7135          *
7136          * Workaround:
7137          * To determine what instruction the guest was executing, the hypervisor
7138          * will have to decode the instruction at the instruction pointer.
7139          *
7140          * In non SEV guest, hypervisor will be able to read the guest
7141          * memory to decode the instruction pointer when insn_len is zero
7142          * so we return true to indicate that decoding is possible.
7143          *
7144          * But in the SEV guest, the guest memory is encrypted with the
7145          * guest specific key and hypervisor will not be able to decode the
7146          * instruction pointer so we will not able to workaround it. Lets
7147          * print the error and request to kill the guest.
7148          */
7149         if (smap && (!smep || is_user)) {
7150                 if (!sev_guest(vcpu->kvm))
7151                         return true;
7152
7153                 pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
7154                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
7155         }
7156
7157         return false;
7158 }
7159
7160 static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
7161 {
7162         struct vcpu_svm *svm = to_svm(vcpu);
7163
7164         /*
7165          * TODO: Last condition latch INIT signals on vCPU when
7166          * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
7167          * To properly emulate the INIT intercept, SVM should implement
7168          * kvm_x86_ops->check_nested_events() and call nested_svm_vmexit()
7169          * there if an INIT signal is pending.
7170          */
7171         return !gif_set(svm) ||
7172                    (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT));
7173 }
7174
7175 static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
7176         .cpu_has_kvm_support = has_svm,
7177         .disabled_by_bios = is_disabled,
7178         .hardware_setup = svm_hardware_setup,
7179         .hardware_unsetup = svm_hardware_unsetup,
7180         .check_processor_compatibility = svm_check_processor_compat,
7181         .hardware_enable = svm_hardware_enable,
7182         .hardware_disable = svm_hardware_disable,
7183         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
7184         .has_emulated_msr = svm_has_emulated_msr,
7185
7186         .vcpu_create = svm_create_vcpu,
7187         .vcpu_free = svm_free_vcpu,
7188         .vcpu_reset = svm_vcpu_reset,
7189
7190         .vm_alloc = svm_vm_alloc,
7191         .vm_free = svm_vm_free,
7192         .vm_init = avic_vm_init,
7193         .vm_destroy = svm_vm_destroy,
7194
7195         .prepare_guest_switch = svm_prepare_guest_switch,
7196         .vcpu_load = svm_vcpu_load,
7197         .vcpu_put = svm_vcpu_put,
7198         .vcpu_blocking = svm_vcpu_blocking,
7199         .vcpu_unblocking = svm_vcpu_unblocking,
7200
7201         .update_bp_intercept = update_bp_intercept,
7202         .get_msr_feature = svm_get_msr_feature,
7203         .get_msr = svm_get_msr,
7204         .set_msr = svm_set_msr,
7205         .get_segment_base = svm_get_segment_base,
7206         .get_segment = svm_get_segment,
7207         .set_segment = svm_set_segment,
7208         .get_cpl = svm_get_cpl,
7209         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
7210         .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
7211         .decache_cr3 = svm_decache_cr3,
7212         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
7213         .set_cr0 = svm_set_cr0,
7214         .set_cr3 = svm_set_cr3,
7215         .set_cr4 = svm_set_cr4,
7216         .set_efer = svm_set_efer,
7217         .get_idt = svm_get_idt,
7218         .set_idt = svm_set_idt,
7219         .get_gdt = svm_get_gdt,
7220         .set_gdt = svm_set_gdt,
7221         .get_dr6 = svm_get_dr6,
7222         .set_dr6 = svm_set_dr6,
7223         .set_dr7 = svm_set_dr7,
7224         .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
7225         .cache_reg = svm_cache_reg,
7226         .get_rflags = svm_get_rflags,
7227         .set_rflags = svm_set_rflags,
7228
7229         .tlb_flush = svm_flush_tlb,
7230         .tlb_flush_gva = svm_flush_tlb_gva,
7231
7232         .run = svm_vcpu_run,
7233         .handle_exit = handle_exit,
7234         .skip_emulated_instruction = skip_emulated_instruction,
7235         .set_interrupt_shadow = svm_set_interrupt_shadow,
7236         .get_interrupt_shadow = svm_get_interrupt_shadow,
7237         .patch_hypercall = svm_patch_hypercall,
7238         .set_irq = svm_set_irq,
7239         .set_nmi = svm_inject_nmi,
7240         .queue_exception = svm_queue_exception,
7241         .cancel_injection = svm_cancel_injection,
7242         .interrupt_allowed = svm_interrupt_allowed,
7243         .nmi_allowed = svm_nmi_allowed,
7244         .get_nmi_mask = svm_get_nmi_mask,
7245         .set_nmi_mask = svm_set_nmi_mask,
7246         .enable_nmi_window = enable_nmi_window,
7247         .enable_irq_window = enable_irq_window,
7248         .update_cr8_intercept = update_cr8_intercept,
7249         .set_virtual_apic_mode = svm_set_virtual_apic_mode,
7250         .get_enable_apicv = svm_get_enable_apicv,
7251         .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
7252         .load_eoi_exitmap = svm_load_eoi_exitmap,
7253         .hwapic_irr_update = svm_hwapic_irr_update,
7254         .hwapic_isr_update = svm_hwapic_isr_update,
7255         .sync_pir_to_irr = kvm_lapic_find_highest_irr,
7256         .apicv_post_state_restore = avic_post_state_restore,
7257
7258         .set_tss_addr = svm_set_tss_addr,
7259         .set_identity_map_addr = svm_set_identity_map_addr,
7260         .get_tdp_level = get_npt_level,
7261         .get_mt_mask = svm_get_mt_mask,
7262
7263         .get_exit_info = svm_get_exit_info,
7264
7265         .get_lpage_level = svm_get_lpage_level,
7266
7267         .cpuid_update = svm_cpuid_update,
7268
7269         .rdtscp_supported = svm_rdtscp_supported,
7270         .invpcid_supported = svm_invpcid_supported,
7271         .mpx_supported = svm_mpx_supported,
7272         .xsaves_supported = svm_xsaves_supported,
7273         .umip_emulated = svm_umip_emulated,
7274         .pt_supported = svm_pt_supported,
7275
7276         .set_supported_cpuid = svm_set_supported_cpuid,
7277
7278         .has_wbinvd_exit = svm_has_wbinvd_exit,
7279
7280         .read_l1_tsc_offset = svm_read_l1_tsc_offset,
7281         .write_l1_tsc_offset = svm_write_l1_tsc_offset,
7282
7283         .set_tdp_cr3 = set_tdp_cr3,
7284
7285         .check_intercept = svm_check_intercept,
7286         .handle_exit_irqoff = svm_handle_exit_irqoff,
7287
7288         .request_immediate_exit = __kvm_request_immediate_exit,
7289
7290         .sched_in = svm_sched_in,
7291
7292         .pmu_ops = &amd_pmu_ops,
7293         .deliver_posted_interrupt = svm_deliver_avic_intr,
7294         .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
7295         .update_pi_irte = svm_update_pi_irte,
7296         .setup_mce = svm_setup_mce,
7297
7298         .smi_allowed = svm_smi_allowed,
7299         .pre_enter_smm = svm_pre_enter_smm,
7300         .pre_leave_smm = svm_pre_leave_smm,
7301         .enable_smi_window = enable_smi_window,
7302
7303         .mem_enc_op = svm_mem_enc_op,
7304         .mem_enc_reg_region = svm_register_enc_region,
7305         .mem_enc_unreg_region = svm_unregister_enc_region,
7306
7307         .nested_enable_evmcs = NULL,
7308         .nested_get_evmcs_version = NULL,
7309
7310         .need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
7311
7312         .apic_init_signal_blocked = svm_apic_init_signal_blocked,
7313 };
7314
7315 static int __init svm_init(void)
7316 {
7317         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
7318                         __alignof__(struct vcpu_svm), THIS_MODULE);
7319 }
7320
7321 static void __exit svm_exit(void)
7322 {
7323         kvm_exit();
7324 }
7325
7326 module_init(svm_init)
7327 module_exit(svm_exit)