Merge tag 'kvm-x86-pmu-6.6-fixes' of https://github.com/kvm-x86/linux into HEAD
[platform/kernel/linux-starfive.git] / arch / x86 / kvm / x86.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * derived from drivers/kvm/kvm_main.c
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright (C) 2008 Qumranet, Inc.
9  * Copyright IBM Corporation, 2008
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  *
12  * Authors:
13  *   Avi Kivity   <avi@qumranet.com>
14  *   Yaniv Kamay  <yaniv@qumranet.com>
15  *   Amit Shah    <amit.shah@qumranet.com>
16  *   Ben-Ami Yassour <benami@il.ibm.com>
17  */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kvm_host.h>
21 #include "irq.h"
22 #include "ioapic.h"
23 #include "mmu.h"
24 #include "i8254.h"
25 #include "tss.h"
26 #include "kvm_cache_regs.h"
27 #include "kvm_emulate.h"
28 #include "mmu/page_track.h"
29 #include "x86.h"
30 #include "cpuid.h"
31 #include "pmu.h"
32 #include "hyperv.h"
33 #include "lapic.h"
34 #include "xen.h"
35 #include "smm.h"
36
37 #include <linux/clocksource.h>
38 #include <linux/interrupt.h>
39 #include <linux/kvm.h>
40 #include <linux/fs.h>
41 #include <linux/vmalloc.h>
42 #include <linux/export.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mman.h>
45 #include <linux/highmem.h>
46 #include <linux/iommu.h>
47 #include <linux/cpufreq.h>
48 #include <linux/user-return-notifier.h>
49 #include <linux/srcu.h>
50 #include <linux/slab.h>
51 #include <linux/perf_event.h>
52 #include <linux/uaccess.h>
53 #include <linux/hash.h>
54 #include <linux/pci.h>
55 #include <linux/timekeeper_internal.h>
56 #include <linux/pvclock_gtod.h>
57 #include <linux/kvm_irqfd.h>
58 #include <linux/irqbypass.h>
59 #include <linux/sched/stat.h>
60 #include <linux/sched/isolation.h>
61 #include <linux/mem_encrypt.h>
62 #include <linux/entry-kvm.h>
63 #include <linux/suspend.h>
64 #include <linux/smp.h>
65
66 #include <trace/events/ipi.h>
67 #include <trace/events/kvm.h>
68
69 #include <asm/debugreg.h>
70 #include <asm/msr.h>
71 #include <asm/desc.h>
72 #include <asm/mce.h>
73 #include <asm/pkru.h>
74 #include <linux/kernel_stat.h>
75 #include <asm/fpu/api.h>
76 #include <asm/fpu/xcr.h>
77 #include <asm/fpu/xstate.h>
78 #include <asm/pvclock.h>
79 #include <asm/div64.h>
80 #include <asm/irq_remapping.h>
81 #include <asm/mshyperv.h>
82 #include <asm/hypervisor.h>
83 #include <asm/tlbflush.h>
84 #include <asm/intel_pt.h>
85 #include <asm/emulate_prefix.h>
86 #include <asm/sgx.h>
87 #include <clocksource/hyperv_timer.h>
88
89 #define CREATE_TRACE_POINTS
90 #include "trace.h"
91
92 #define MAX_IO_MSRS 256
93 #define KVM_MAX_MCE_BANKS 32
94
95 struct kvm_caps kvm_caps __read_mostly = {
96         .supported_mce_cap = MCG_CTL_P | MCG_SER_P,
97 };
98 EXPORT_SYMBOL_GPL(kvm_caps);
99
100 #define  ERR_PTR_USR(e)  ((void __user *)ERR_PTR(e))
101
102 #define emul_to_vcpu(ctxt) \
103         ((struct kvm_vcpu *)(ctxt)->vcpu)
104
105 /* EFER defaults:
106  * - enable syscall per default because its emulated by KVM
107  * - enable LME and LMA per default on 64 bit KVM
108  */
109 #ifdef CONFIG_X86_64
110 static
111 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
112 #else
113 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
114 #endif
115
116 static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
117
118 #define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
119
120 #define KVM_CAP_PMU_VALID_MASK KVM_PMU_CAP_DISABLE
121
122 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
123                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
124
125 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
126 static void process_nmi(struct kvm_vcpu *vcpu);
127 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
128 static void store_regs(struct kvm_vcpu *vcpu);
129 static int sync_regs(struct kvm_vcpu *vcpu);
130 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
131
132 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
133 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
134
135 static DEFINE_MUTEX(vendor_module_lock);
136 struct kvm_x86_ops kvm_x86_ops __read_mostly;
137
138 #define KVM_X86_OP(func)                                             \
139         DEFINE_STATIC_CALL_NULL(kvm_x86_##func,                      \
140                                 *(((struct kvm_x86_ops *)0)->func));
141 #define KVM_X86_OP_OPTIONAL KVM_X86_OP
142 #define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
143 #include <asm/kvm-x86-ops.h>
144 EXPORT_STATIC_CALL_GPL(kvm_x86_get_cs_db_l_bits);
145 EXPORT_STATIC_CALL_GPL(kvm_x86_cache_reg);
146
147 static bool __read_mostly ignore_msrs = 0;
148 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
149
150 bool __read_mostly report_ignored_msrs = true;
151 module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
152 EXPORT_SYMBOL_GPL(report_ignored_msrs);
153
154 unsigned int min_timer_period_us = 200;
155 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
156
157 static bool __read_mostly kvmclock_periodic_sync = true;
158 module_param(kvmclock_periodic_sync, bool, S_IRUGO);
159
160 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
161 static u32 __read_mostly tsc_tolerance_ppm = 250;
162 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
163
164 /*
165  * lapic timer advance (tscdeadline mode only) in nanoseconds.  '-1' enables
166  * adaptive tuning starting from default advancement of 1000ns.  '0' disables
167  * advancement entirely.  Any other value is used as-is and disables adaptive
168  * tuning, i.e. allows privileged userspace to set an exact advancement time.
169  */
170 static int __read_mostly lapic_timer_advance_ns = -1;
171 module_param(lapic_timer_advance_ns, int, S_IRUGO | S_IWUSR);
172
173 static bool __read_mostly vector_hashing = true;
174 module_param(vector_hashing, bool, S_IRUGO);
175
176 bool __read_mostly enable_vmware_backdoor = false;
177 module_param(enable_vmware_backdoor, bool, S_IRUGO);
178 EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
179
180 /*
181  * Flags to manipulate forced emulation behavior (any non-zero value will
182  * enable forced emulation).
183  */
184 #define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
185 static int __read_mostly force_emulation_prefix;
186 module_param(force_emulation_prefix, int, 0644);
187
188 int __read_mostly pi_inject_timer = -1;
189 module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
190
191 /* Enable/disable PMU virtualization */
192 bool __read_mostly enable_pmu = true;
193 EXPORT_SYMBOL_GPL(enable_pmu);
194 module_param(enable_pmu, bool, 0444);
195
196 bool __read_mostly eager_page_split = true;
197 module_param(eager_page_split, bool, 0644);
198
199 /* Enable/disable SMT_RSB bug mitigation */
200 static bool __read_mostly mitigate_smt_rsb;
201 module_param(mitigate_smt_rsb, bool, 0444);
202
203 /*
204  * Restoring the host value for MSRs that are only consumed when running in
205  * usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
206  * returns to userspace, i.e. the kernel can run with the guest's value.
207  */
208 #define KVM_MAX_NR_USER_RETURN_MSRS 16
209
210 struct kvm_user_return_msrs {
211         struct user_return_notifier urn;
212         bool registered;
213         struct kvm_user_return_msr_values {
214                 u64 host;
215                 u64 curr;
216         } values[KVM_MAX_NR_USER_RETURN_MSRS];
217 };
218
219 u32 __read_mostly kvm_nr_uret_msrs;
220 EXPORT_SYMBOL_GPL(kvm_nr_uret_msrs);
221 static u32 __read_mostly kvm_uret_msrs_list[KVM_MAX_NR_USER_RETURN_MSRS];
222 static struct kvm_user_return_msrs __percpu *user_return_msrs;
223
224 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
225                                 | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
226                                 | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
227                                 | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
228
229 u64 __read_mostly host_efer;
230 EXPORT_SYMBOL_GPL(host_efer);
231
232 bool __read_mostly allow_smaller_maxphyaddr = 0;
233 EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr);
234
235 bool __read_mostly enable_apicv = true;
236 EXPORT_SYMBOL_GPL(enable_apicv);
237
238 u64 __read_mostly host_xss;
239 EXPORT_SYMBOL_GPL(host_xss);
240
241 u64 __read_mostly host_arch_capabilities;
242 EXPORT_SYMBOL_GPL(host_arch_capabilities);
243
244 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
245         KVM_GENERIC_VM_STATS(),
246         STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
247         STATS_DESC_COUNTER(VM, mmu_pte_write),
248         STATS_DESC_COUNTER(VM, mmu_pde_zapped),
249         STATS_DESC_COUNTER(VM, mmu_flooded),
250         STATS_DESC_COUNTER(VM, mmu_recycled),
251         STATS_DESC_COUNTER(VM, mmu_cache_miss),
252         STATS_DESC_ICOUNTER(VM, mmu_unsync),
253         STATS_DESC_ICOUNTER(VM, pages_4k),
254         STATS_DESC_ICOUNTER(VM, pages_2m),
255         STATS_DESC_ICOUNTER(VM, pages_1g),
256         STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
257         STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
258         STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)
259 };
260
261 const struct kvm_stats_header kvm_vm_stats_header = {
262         .name_size = KVM_STATS_NAME_SIZE,
263         .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
264         .id_offset = sizeof(struct kvm_stats_header),
265         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
266         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
267                        sizeof(kvm_vm_stats_desc),
268 };
269
270 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
271         KVM_GENERIC_VCPU_STATS(),
272         STATS_DESC_COUNTER(VCPU, pf_taken),
273         STATS_DESC_COUNTER(VCPU, pf_fixed),
274         STATS_DESC_COUNTER(VCPU, pf_emulate),
275         STATS_DESC_COUNTER(VCPU, pf_spurious),
276         STATS_DESC_COUNTER(VCPU, pf_fast),
277         STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
278         STATS_DESC_COUNTER(VCPU, pf_guest),
279         STATS_DESC_COUNTER(VCPU, tlb_flush),
280         STATS_DESC_COUNTER(VCPU, invlpg),
281         STATS_DESC_COUNTER(VCPU, exits),
282         STATS_DESC_COUNTER(VCPU, io_exits),
283         STATS_DESC_COUNTER(VCPU, mmio_exits),
284         STATS_DESC_COUNTER(VCPU, signal_exits),
285         STATS_DESC_COUNTER(VCPU, irq_window_exits),
286         STATS_DESC_COUNTER(VCPU, nmi_window_exits),
287         STATS_DESC_COUNTER(VCPU, l1d_flush),
288         STATS_DESC_COUNTER(VCPU, halt_exits),
289         STATS_DESC_COUNTER(VCPU, request_irq_exits),
290         STATS_DESC_COUNTER(VCPU, irq_exits),
291         STATS_DESC_COUNTER(VCPU, host_state_reload),
292         STATS_DESC_COUNTER(VCPU, fpu_reload),
293         STATS_DESC_COUNTER(VCPU, insn_emulation),
294         STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
295         STATS_DESC_COUNTER(VCPU, hypercalls),
296         STATS_DESC_COUNTER(VCPU, irq_injections),
297         STATS_DESC_COUNTER(VCPU, nmi_injections),
298         STATS_DESC_COUNTER(VCPU, req_event),
299         STATS_DESC_COUNTER(VCPU, nested_run),
300         STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
301         STATS_DESC_COUNTER(VCPU, directed_yield_successful),
302         STATS_DESC_COUNTER(VCPU, preemption_reported),
303         STATS_DESC_COUNTER(VCPU, preemption_other),
304         STATS_DESC_IBOOLEAN(VCPU, guest_mode),
305         STATS_DESC_COUNTER(VCPU, notify_window_exits),
306 };
307
308 const struct kvm_stats_header kvm_vcpu_stats_header = {
309         .name_size = KVM_STATS_NAME_SIZE,
310         .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
311         .id_offset = sizeof(struct kvm_stats_header),
312         .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
313         .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
314                        sizeof(kvm_vcpu_stats_desc),
315 };
316
317 u64 __read_mostly host_xcr0;
318
319 static struct kmem_cache *x86_emulator_cache;
320
321 /*
322  * When called, it means the previous get/set msr reached an invalid msr.
323  * Return true if we want to ignore/silent this failed msr access.
324  */
325 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
326 {
327         const char *op = write ? "wrmsr" : "rdmsr";
328
329         if (ignore_msrs) {
330                 if (report_ignored_msrs)
331                         kvm_pr_unimpl("ignored %s: 0x%x data 0x%llx\n",
332                                       op, msr, data);
333                 /* Mask the error */
334                 return true;
335         } else {
336                 kvm_debug_ratelimited("unhandled %s: 0x%x data 0x%llx\n",
337                                       op, msr, data);
338                 return false;
339         }
340 }
341
342 static struct kmem_cache *kvm_alloc_emulator_cache(void)
343 {
344         unsigned int useroffset = offsetof(struct x86_emulate_ctxt, src);
345         unsigned int size = sizeof(struct x86_emulate_ctxt);
346
347         return kmem_cache_create_usercopy("x86_emulator", size,
348                                           __alignof__(struct x86_emulate_ctxt),
349                                           SLAB_ACCOUNT, useroffset,
350                                           size - useroffset, NULL);
351 }
352
353 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
354
355 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
356 {
357         int i;
358         for (i = 0; i < ASYNC_PF_PER_VCPU; i++)
359                 vcpu->arch.apf.gfns[i] = ~0;
360 }
361
362 static void kvm_on_user_return(struct user_return_notifier *urn)
363 {
364         unsigned slot;
365         struct kvm_user_return_msrs *msrs
366                 = container_of(urn, struct kvm_user_return_msrs, urn);
367         struct kvm_user_return_msr_values *values;
368         unsigned long flags;
369
370         /*
371          * Disabling irqs at this point since the following code could be
372          * interrupted and executed through kvm_arch_hardware_disable()
373          */
374         local_irq_save(flags);
375         if (msrs->registered) {
376                 msrs->registered = false;
377                 user_return_notifier_unregister(urn);
378         }
379         local_irq_restore(flags);
380         for (slot = 0; slot < kvm_nr_uret_msrs; ++slot) {
381                 values = &msrs->values[slot];
382                 if (values->host != values->curr) {
383                         wrmsrl(kvm_uret_msrs_list[slot], values->host);
384                         values->curr = values->host;
385                 }
386         }
387 }
388
389 static int kvm_probe_user_return_msr(u32 msr)
390 {
391         u64 val;
392         int ret;
393
394         preempt_disable();
395         ret = rdmsrl_safe(msr, &val);
396         if (ret)
397                 goto out;
398         ret = wrmsrl_safe(msr, val);
399 out:
400         preempt_enable();
401         return ret;
402 }
403
404 int kvm_add_user_return_msr(u32 msr)
405 {
406         BUG_ON(kvm_nr_uret_msrs >= KVM_MAX_NR_USER_RETURN_MSRS);
407
408         if (kvm_probe_user_return_msr(msr))
409                 return -1;
410
411         kvm_uret_msrs_list[kvm_nr_uret_msrs] = msr;
412         return kvm_nr_uret_msrs++;
413 }
414 EXPORT_SYMBOL_GPL(kvm_add_user_return_msr);
415
416 int kvm_find_user_return_msr(u32 msr)
417 {
418         int i;
419
420         for (i = 0; i < kvm_nr_uret_msrs; ++i) {
421                 if (kvm_uret_msrs_list[i] == msr)
422                         return i;
423         }
424         return -1;
425 }
426 EXPORT_SYMBOL_GPL(kvm_find_user_return_msr);
427
428 static void kvm_user_return_msr_cpu_online(void)
429 {
430         unsigned int cpu = smp_processor_id();
431         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
432         u64 value;
433         int i;
434
435         for (i = 0; i < kvm_nr_uret_msrs; ++i) {
436                 rdmsrl_safe(kvm_uret_msrs_list[i], &value);
437                 msrs->values[i].host = value;
438                 msrs->values[i].curr = value;
439         }
440 }
441
442 int kvm_set_user_return_msr(unsigned slot, u64 value, u64 mask)
443 {
444         unsigned int cpu = smp_processor_id();
445         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
446         int err;
447
448         value = (value & mask) | (msrs->values[slot].host & ~mask);
449         if (value == msrs->values[slot].curr)
450                 return 0;
451         err = wrmsrl_safe(kvm_uret_msrs_list[slot], value);
452         if (err)
453                 return 1;
454
455         msrs->values[slot].curr = value;
456         if (!msrs->registered) {
457                 msrs->urn.on_user_return = kvm_on_user_return;
458                 user_return_notifier_register(&msrs->urn);
459                 msrs->registered = true;
460         }
461         return 0;
462 }
463 EXPORT_SYMBOL_GPL(kvm_set_user_return_msr);
464
465 static void drop_user_return_notifiers(void)
466 {
467         unsigned int cpu = smp_processor_id();
468         struct kvm_user_return_msrs *msrs = per_cpu_ptr(user_return_msrs, cpu);
469
470         if (msrs->registered)
471                 kvm_on_user_return(&msrs->urn);
472 }
473
474 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
475 {
476         return vcpu->arch.apic_base;
477 }
478
479 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu)
480 {
481         return kvm_apic_mode(kvm_get_apic_base(vcpu));
482 }
483 EXPORT_SYMBOL_GPL(kvm_get_apic_mode);
484
485 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
486 {
487         enum lapic_mode old_mode = kvm_get_apic_mode(vcpu);
488         enum lapic_mode new_mode = kvm_apic_mode(msr_info->data);
489         u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff |
490                 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE);
491
492         if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID)
493                 return 1;
494         if (!msr_info->host_initiated) {
495                 if (old_mode == LAPIC_MODE_X2APIC && new_mode == LAPIC_MODE_XAPIC)
496                         return 1;
497                 if (old_mode == LAPIC_MODE_DISABLED && new_mode == LAPIC_MODE_X2APIC)
498                         return 1;
499         }
500
501         kvm_lapic_set_base(vcpu, msr_info->data);
502         kvm_recalculate_apic_map(vcpu->kvm);
503         return 0;
504 }
505
506 /*
507  * Handle a fault on a hardware virtualization (VMX or SVM) instruction.
508  *
509  * Hardware virtualization extension instructions may fault if a reboot turns
510  * off virtualization while processes are running.  Usually after catching the
511  * fault we just panic; during reboot instead the instruction is ignored.
512  */
513 noinstr void kvm_spurious_fault(void)
514 {
515         /* Fault while not rebooting.  We want the trace. */
516         BUG_ON(!kvm_rebooting);
517 }
518 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
519
520 #define EXCPT_BENIGN            0
521 #define EXCPT_CONTRIBUTORY      1
522 #define EXCPT_PF                2
523
524 static int exception_class(int vector)
525 {
526         switch (vector) {
527         case PF_VECTOR:
528                 return EXCPT_PF;
529         case DE_VECTOR:
530         case TS_VECTOR:
531         case NP_VECTOR:
532         case SS_VECTOR:
533         case GP_VECTOR:
534                 return EXCPT_CONTRIBUTORY;
535         default:
536                 break;
537         }
538         return EXCPT_BENIGN;
539 }
540
541 #define EXCPT_FAULT             0
542 #define EXCPT_TRAP              1
543 #define EXCPT_ABORT             2
544 #define EXCPT_INTERRUPT         3
545 #define EXCPT_DB                4
546
547 static int exception_type(int vector)
548 {
549         unsigned int mask;
550
551         if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
552                 return EXCPT_INTERRUPT;
553
554         mask = 1 << vector;
555
556         /*
557          * #DBs can be trap-like or fault-like, the caller must check other CPU
558          * state, e.g. DR6, to determine whether a #DB is a trap or fault.
559          */
560         if (mask & (1 << DB_VECTOR))
561                 return EXCPT_DB;
562
563         if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
564                 return EXCPT_TRAP;
565
566         if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
567                 return EXCPT_ABORT;
568
569         /* Reserved exceptions will result in fault */
570         return EXCPT_FAULT;
571 }
572
573 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
574                                    struct kvm_queued_exception *ex)
575 {
576         if (!ex->has_payload)
577                 return;
578
579         switch (ex->vector) {
580         case DB_VECTOR:
581                 /*
582                  * "Certain debug exceptions may clear bit 0-3.  The
583                  * remaining contents of the DR6 register are never
584                  * cleared by the processor".
585                  */
586                 vcpu->arch.dr6 &= ~DR_TRAP_BITS;
587                 /*
588                  * In order to reflect the #DB exception payload in guest
589                  * dr6, three components need to be considered: active low
590                  * bit, FIXED_1 bits and active high bits (e.g. DR6_BD,
591                  * DR6_BS and DR6_BT)
592                  * DR6_ACTIVE_LOW contains the FIXED_1 and active low bits.
593                  * In the target guest dr6:
594                  * FIXED_1 bits should always be set.
595                  * Active low bits should be cleared if 1-setting in payload.
596                  * Active high bits should be set if 1-setting in payload.
597                  *
598                  * Note, the payload is compatible with the pending debug
599                  * exceptions/exit qualification under VMX, that active_low bits
600                  * are active high in payload.
601                  * So they need to be flipped for DR6.
602                  */
603                 vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
604                 vcpu->arch.dr6 |= ex->payload;
605                 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
606
607                 /*
608                  * The #DB payload is defined as compatible with the 'pending
609                  * debug exceptions' field under VMX, not DR6. While bit 12 is
610                  * defined in the 'pending debug exceptions' field (enabled
611                  * breakpoint), it is reserved and must be zero in DR6.
612                  */
613                 vcpu->arch.dr6 &= ~BIT(12);
614                 break;
615         case PF_VECTOR:
616                 vcpu->arch.cr2 = ex->payload;
617                 break;
618         }
619
620         ex->has_payload = false;
621         ex->payload = 0;
622 }
623 EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
624
625 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
626                                        bool has_error_code, u32 error_code,
627                                        bool has_payload, unsigned long payload)
628 {
629         struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
630
631         ex->vector = vector;
632         ex->injected = false;
633         ex->pending = true;
634         ex->has_error_code = has_error_code;
635         ex->error_code = error_code;
636         ex->has_payload = has_payload;
637         ex->payload = payload;
638 }
639
640 /* Forcibly leave the nested mode in cases like a vCPU reset */
641 static void kvm_leave_nested(struct kvm_vcpu *vcpu)
642 {
643         kvm_x86_ops.nested_ops->leave_nested(vcpu);
644 }
645
646 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
647                 unsigned nr, bool has_error, u32 error_code,
648                 bool has_payload, unsigned long payload, bool reinject)
649 {
650         u32 prev_nr;
651         int class1, class2;
652
653         kvm_make_request(KVM_REQ_EVENT, vcpu);
654
655         /*
656          * If the exception is destined for L2 and isn't being reinjected,
657          * morph it to a VM-Exit if L1 wants to intercept the exception.  A
658          * previously injected exception is not checked because it was checked
659          * when it was original queued, and re-checking is incorrect if _L1_
660          * injected the exception, in which case it's exempt from interception.
661          */
662         if (!reinject && is_guest_mode(vcpu) &&
663             kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
664                 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
665                                            has_payload, payload);
666                 return;
667         }
668
669         if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
670         queue:
671                 if (reinject) {
672                         /*
673                          * On VM-Entry, an exception can be pending if and only
674                          * if event injection was blocked by nested_run_pending.
675                          * In that case, however, vcpu_enter_guest() requests an
676                          * immediate exit, and the guest shouldn't proceed far
677                          * enough to need reinjection.
678                          */
679                         WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
680                         vcpu->arch.exception.injected = true;
681                         if (WARN_ON_ONCE(has_payload)) {
682                                 /*
683                                  * A reinjected event has already
684                                  * delivered its payload.
685                                  */
686                                 has_payload = false;
687                                 payload = 0;
688                         }
689                 } else {
690                         vcpu->arch.exception.pending = true;
691                         vcpu->arch.exception.injected = false;
692                 }
693                 vcpu->arch.exception.has_error_code = has_error;
694                 vcpu->arch.exception.vector = nr;
695                 vcpu->arch.exception.error_code = error_code;
696                 vcpu->arch.exception.has_payload = has_payload;
697                 vcpu->arch.exception.payload = payload;
698                 if (!is_guest_mode(vcpu))
699                         kvm_deliver_exception_payload(vcpu,
700                                                       &vcpu->arch.exception);
701                 return;
702         }
703
704         /* to check exception */
705         prev_nr = vcpu->arch.exception.vector;
706         if (prev_nr == DF_VECTOR) {
707                 /* triple fault -> shutdown */
708                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
709                 return;
710         }
711         class1 = exception_class(prev_nr);
712         class2 = exception_class(nr);
713         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
714             (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
715                 /*
716                  * Synthesize #DF.  Clear the previously injected or pending
717                  * exception so as not to incorrectly trigger shutdown.
718                  */
719                 vcpu->arch.exception.injected = false;
720                 vcpu->arch.exception.pending = false;
721
722                 kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
723         } else {
724                 /* replace previous exception with a new one in a hope
725                    that instruction re-execution will regenerate lost
726                    exception */
727                 goto queue;
728         }
729 }
730
731 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
732 {
733         kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
734 }
735 EXPORT_SYMBOL_GPL(kvm_queue_exception);
736
737 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
738 {
739         kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
740 }
741 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
742
743 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
744                            unsigned long payload)
745 {
746         kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
747 }
748 EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
749
750 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
751                                     u32 error_code, unsigned long payload)
752 {
753         kvm_multiple_exception(vcpu, nr, true, error_code,
754                                true, payload, false);
755 }
756
757 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
758 {
759         if (err)
760                 kvm_inject_gp(vcpu, 0);
761         else
762                 return kvm_skip_emulated_instruction(vcpu);
763
764         return 1;
765 }
766 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
767
768 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
769 {
770         if (err) {
771                 kvm_inject_gp(vcpu, 0);
772                 return 1;
773         }
774
775         return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
776                                        EMULTYPE_COMPLETE_USER_EXIT);
777 }
778
779 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
780 {
781         ++vcpu->stat.pf_guest;
782
783         /*
784          * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
785          * whether or not L1 wants to intercept "regular" #PF.
786          */
787         if (is_guest_mode(vcpu) && fault->async_page_fault)
788                 kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
789                                            true, fault->error_code,
790                                            true, fault->address);
791         else
792                 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
793                                         fault->address);
794 }
795
796 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
797                                     struct x86_exception *fault)
798 {
799         struct kvm_mmu *fault_mmu;
800         WARN_ON_ONCE(fault->vector != PF_VECTOR);
801
802         fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
803                                                vcpu->arch.walk_mmu;
804
805         /*
806          * Invalidate the TLB entry for the faulting address, if it exists,
807          * else the access will fault indefinitely (and to emulate hardware).
808          */
809         if ((fault->error_code & PFERR_PRESENT_MASK) &&
810             !(fault->error_code & PFERR_RSVD_MASK))
811                 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address,
812                                         KVM_MMU_ROOT_CURRENT);
813
814         fault_mmu->inject_page_fault(vcpu, fault);
815 }
816 EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
817
818 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
819 {
820         atomic_inc(&vcpu->arch.nmi_queued);
821         kvm_make_request(KVM_REQ_NMI, vcpu);
822 }
823
824 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
825 {
826         kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
827 }
828 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
829
830 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
831 {
832         kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
833 }
834 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
835
836 /*
837  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
838  * a #GP and return false.
839  */
840 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
841 {
842         if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl)
843                 return true;
844         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
845         return false;
846 }
847
848 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr)
849 {
850         if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE))
851                 return true;
852
853         kvm_queue_exception(vcpu, UD_VECTOR);
854         return false;
855 }
856 EXPORT_SYMBOL_GPL(kvm_require_dr);
857
858 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
859 {
860         return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2);
861 }
862
863 /*
864  * Load the pae pdptrs.  Return 1 if they are all valid, 0 otherwise.
865  */
866 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
867 {
868         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
869         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
870         gpa_t real_gpa;
871         int i;
872         int ret;
873         u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
874
875         /*
876          * If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
877          * to an L1 GPA.
878          */
879         real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
880                                      PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
881         if (real_gpa == INVALID_GPA)
882                 return 0;
883
884         /* Note the offset, PDPTRs are 32 byte aligned when using PAE paging. */
885         ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte,
886                                        cr3 & GENMASK(11, 5), sizeof(pdpte));
887         if (ret < 0)
888                 return 0;
889
890         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
891                 if ((pdpte[i] & PT_PRESENT_MASK) &&
892                     (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
893                         return 0;
894                 }
895         }
896
897         /*
898          * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
899          * Shadow page roots need to be reconstructed instead.
900          */
901         if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
902                 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT);
903
904         memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
905         kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
906         kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
907         vcpu->arch.pdptrs_from_userspace = false;
908
909         return 1;
910 }
911 EXPORT_SYMBOL_GPL(load_pdptrs);
912
913 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
914 {
915 #ifdef CONFIG_X86_64
916         if (cr0 & 0xffffffff00000000UL)
917                 return false;
918 #endif
919
920         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
921                 return false;
922
923         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
924                 return false;
925
926         return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0);
927 }
928
929 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0)
930 {
931         /*
932          * CR0.WP is incorporated into the MMU role, but only for non-nested,
933          * indirect shadow MMUs.  If paging is disabled, no updates are needed
934          * as there are no permission bits to emulate.  If TDP is enabled, the
935          * MMU's metadata needs to be updated, e.g. so that emulating guest
936          * translations does the right thing, but there's no need to unload the
937          * root as CR0.WP doesn't affect SPTEs.
938          */
939         if ((cr0 ^ old_cr0) == X86_CR0_WP) {
940                 if (!(cr0 & X86_CR0_PG))
941                         return;
942
943                 if (tdp_enabled) {
944                         kvm_init_mmu(vcpu);
945                         return;
946                 }
947         }
948
949         if ((cr0 ^ old_cr0) & X86_CR0_PG) {
950                 kvm_clear_async_pf_completion_queue(vcpu);
951                 kvm_async_pf_hash_reset(vcpu);
952
953                 /*
954                  * Clearing CR0.PG is defined to flush the TLB from the guest's
955                  * perspective.
956                  */
957                 if (!(cr0 & X86_CR0_PG))
958                         kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
959         }
960
961         if ((cr0 ^ old_cr0) & KVM_MMU_CR0_ROLE_BITS)
962                 kvm_mmu_reset_context(vcpu);
963
964         if (((cr0 ^ old_cr0) & X86_CR0_CD) &&
965             kvm_arch_has_noncoherent_dma(vcpu->kvm) &&
966             !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
967                 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL);
968 }
969 EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
970
971 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
972 {
973         unsigned long old_cr0 = kvm_read_cr0(vcpu);
974
975         if (!kvm_is_valid_cr0(vcpu, cr0))
976                 return 1;
977
978         cr0 |= X86_CR0_ET;
979
980         /* Write to CR0 reserved bits are ignored, even on Intel. */
981         cr0 &= ~CR0_RESERVED_BITS;
982
983 #ifdef CONFIG_X86_64
984         if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) &&
985             (cr0 & X86_CR0_PG)) {
986                 int cs_db, cs_l;
987
988                 if (!is_pae(vcpu))
989                         return 1;
990                 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
991                 if (cs_l)
992                         return 1;
993         }
994 #endif
995         if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
996             is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
997             !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
998                 return 1;
999
1000         if (!(cr0 & X86_CR0_PG) &&
1001             (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)))
1002                 return 1;
1003
1004         static_call(kvm_x86_set_cr0)(vcpu, cr0);
1005
1006         kvm_post_set_cr0(vcpu, old_cr0, cr0);
1007
1008         return 0;
1009 }
1010 EXPORT_SYMBOL_GPL(kvm_set_cr0);
1011
1012 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1013 {
1014         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
1015 }
1016 EXPORT_SYMBOL_GPL(kvm_lmsw);
1017
1018 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
1019 {
1020         if (vcpu->arch.guest_state_protected)
1021                 return;
1022
1023         if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1024
1025                 if (vcpu->arch.xcr0 != host_xcr0)
1026                         xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
1027
1028                 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
1029                     vcpu->arch.ia32_xss != host_xss)
1030                         wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
1031         }
1032
1033         if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1034             vcpu->arch.pkru != vcpu->arch.host_pkru &&
1035             ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1036              kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE)))
1037                 write_pkru(vcpu->arch.pkru);
1038 }
1039 EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
1040
1041 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
1042 {
1043         if (vcpu->arch.guest_state_protected)
1044                 return;
1045
1046         if (cpu_feature_enabled(X86_FEATURE_PKU) &&
1047             ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) ||
1048              kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) {
1049                 vcpu->arch.pkru = rdpkru();
1050                 if (vcpu->arch.pkru != vcpu->arch.host_pkru)
1051                         write_pkru(vcpu->arch.host_pkru);
1052         }
1053
1054         if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) {
1055
1056                 if (vcpu->arch.xcr0 != host_xcr0)
1057                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
1058
1059                 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
1060                     vcpu->arch.ia32_xss != host_xss)
1061                         wrmsrl(MSR_IA32_XSS, host_xss);
1062         }
1063
1064 }
1065 EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
1066
1067 #ifdef CONFIG_X86_64
1068 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
1069 {
1070         return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
1071 }
1072 #endif
1073
1074 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1075 {
1076         u64 xcr0 = xcr;
1077         u64 old_xcr0 = vcpu->arch.xcr0;
1078         u64 valid_bits;
1079
1080         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
1081         if (index != XCR_XFEATURE_ENABLED_MASK)
1082                 return 1;
1083         if (!(xcr0 & XFEATURE_MASK_FP))
1084                 return 1;
1085         if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
1086                 return 1;
1087
1088         /*
1089          * Do not allow the guest to set bits that we do not support
1090          * saving.  However, xcr0 bit 0 is always set, even if the
1091          * emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
1092          */
1093         valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
1094         if (xcr0 & ~valid_bits)
1095                 return 1;
1096
1097         if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
1098             (!(xcr0 & XFEATURE_MASK_BNDCSR)))
1099                 return 1;
1100
1101         if (xcr0 & XFEATURE_MASK_AVX512) {
1102                 if (!(xcr0 & XFEATURE_MASK_YMM))
1103                         return 1;
1104                 if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
1105                         return 1;
1106         }
1107
1108         if ((xcr0 & XFEATURE_MASK_XTILE) &&
1109             ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
1110                 return 1;
1111
1112         vcpu->arch.xcr0 = xcr0;
1113
1114         if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
1115                 kvm_update_cpuid_runtime(vcpu);
1116         return 0;
1117 }
1118
1119 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
1120 {
1121         /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
1122         if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
1123             __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
1124                 kvm_inject_gp(vcpu, 0);
1125                 return 1;
1126         }
1127
1128         return kvm_skip_emulated_instruction(vcpu);
1129 }
1130 EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
1131
1132 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1133 {
1134         if (cr4 & cr4_reserved_bits)
1135                 return false;
1136
1137         if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
1138                 return false;
1139
1140         return true;
1141 }
1142 EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);
1143
1144 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1145 {
1146         return __kvm_is_valid_cr4(vcpu, cr4) &&
1147                static_call(kvm_x86_is_valid_cr4)(vcpu, cr4);
1148 }
1149
1150 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4)
1151 {
1152         if ((cr4 ^ old_cr4) & KVM_MMU_CR4_ROLE_BITS)
1153                 kvm_mmu_reset_context(vcpu);
1154
1155         /*
1156          * If CR4.PCIDE is changed 0 -> 1, there is no need to flush the TLB
1157          * according to the SDM; however, stale prev_roots could be reused
1158          * incorrectly in the future after a MOV to CR3 with NOFLUSH=1, so we
1159          * free them all.  This is *not* a superset of KVM_REQ_TLB_FLUSH_GUEST
1160          * or KVM_REQ_TLB_FLUSH_CURRENT, because the hardware TLB is not flushed,
1161          * so fall through.
1162          */
1163         if (!tdp_enabled &&
1164             (cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE))
1165                 kvm_mmu_unload(vcpu);
1166
1167         /*
1168          * The TLB has to be flushed for all PCIDs if any of the following
1169          * (architecturally required) changes happen:
1170          * - CR4.PCIDE is changed from 1 to 0
1171          * - CR4.PGE is toggled
1172          *
1173          * This is a superset of KVM_REQ_TLB_FLUSH_CURRENT.
1174          */
1175         if (((cr4 ^ old_cr4) & X86_CR4_PGE) ||
1176             (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
1177                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1178
1179         /*
1180          * The TLB has to be flushed for the current PCID if any of the
1181          * following (architecturally required) changes happen:
1182          * - CR4.SMEP is changed from 0 to 1
1183          * - CR4.PAE is toggled
1184          */
1185         else if (((cr4 ^ old_cr4) & X86_CR4_PAE) ||
1186                  ((cr4 & X86_CR4_SMEP) && !(old_cr4 & X86_CR4_SMEP)))
1187                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1188
1189 }
1190 EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
1191
1192 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1193 {
1194         unsigned long old_cr4 = kvm_read_cr4(vcpu);
1195
1196         if (!kvm_is_valid_cr4(vcpu, cr4))
1197                 return 1;
1198
1199         if (is_long_mode(vcpu)) {
1200                 if (!(cr4 & X86_CR4_PAE))
1201                         return 1;
1202                 if ((cr4 ^ old_cr4) & X86_CR4_LA57)
1203                         return 1;
1204         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
1205                    && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
1206                    && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
1207                 return 1;
1208
1209         if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
1210                 /* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
1211                 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
1212                         return 1;
1213         }
1214
1215         static_call(kvm_x86_set_cr4)(vcpu, cr4);
1216
1217         kvm_post_set_cr4(vcpu, old_cr4, cr4);
1218
1219         return 0;
1220 }
1221 EXPORT_SYMBOL_GPL(kvm_set_cr4);
1222
1223 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid)
1224 {
1225         struct kvm_mmu *mmu = vcpu->arch.mmu;
1226         unsigned long roots_to_free = 0;
1227         int i;
1228
1229         /*
1230          * MOV CR3 and INVPCID are usually not intercepted when using TDP, but
1231          * this is reachable when running EPT=1 and unrestricted_guest=0,  and
1232          * also via the emulator.  KVM's TDP page tables are not in the scope of
1233          * the invalidation, but the guest's TLB entries need to be flushed as
1234          * the CPU may have cached entries in its TLB for the target PCID.
1235          */
1236         if (unlikely(tdp_enabled)) {
1237                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1238                 return;
1239         }
1240
1241         /*
1242          * If neither the current CR3 nor any of the prev_roots use the given
1243          * PCID, then nothing needs to be done here because a resync will
1244          * happen anyway before switching to any other CR3.
1245          */
1246         if (kvm_get_active_pcid(vcpu) == pcid) {
1247                 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
1248                 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1249         }
1250
1251         /*
1252          * If PCID is disabled, there is no need to free prev_roots even if the
1253          * PCIDs for them are also 0, because MOV to CR3 always flushes the TLB
1254          * with PCIDE=0.
1255          */
1256         if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))
1257                 return;
1258
1259         for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
1260                 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid)
1261                         roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
1262
1263         kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
1264 }
1265
1266 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1267 {
1268         bool skip_tlb_flush = false;
1269         unsigned long pcid = 0;
1270 #ifdef CONFIG_X86_64
1271         if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) {
1272                 skip_tlb_flush = cr3 & X86_CR3_PCID_NOFLUSH;
1273                 cr3 &= ~X86_CR3_PCID_NOFLUSH;
1274                 pcid = cr3 & X86_CR3_PCID_MASK;
1275         }
1276 #endif
1277
1278         /* PDPTRs are always reloaded for PAE paging. */
1279         if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu))
1280                 goto handle_tlb_flush;
1281
1282         /*
1283          * Do not condition the GPA check on long mode, this helper is used to
1284          * stuff CR3, e.g. for RSM emulation, and there is no guarantee that
1285          * the current vCPU mode is accurate.
1286          */
1287         if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
1288                 return 1;
1289
1290         if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
1291                 return 1;
1292
1293         if (cr3 != kvm_read_cr3(vcpu))
1294                 kvm_mmu_new_pgd(vcpu, cr3);
1295
1296         vcpu->arch.cr3 = cr3;
1297         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1298         /* Do not call post_set_cr3, we do not get here for confidential guests.  */
1299
1300 handle_tlb_flush:
1301         /*
1302          * A load of CR3 that flushes the TLB flushes only the current PCID,
1303          * even if PCID is disabled, in which case PCID=0 is flushed.  It's a
1304          * moot point in the end because _disabling_ PCID will flush all PCIDs,
1305          * and it's impossible to use a non-zero PCID when PCID is disabled,
1306          * i.e. only PCID=0 can be relevant.
1307          */
1308         if (!skip_tlb_flush)
1309                 kvm_invalidate_pcid(vcpu, pcid);
1310
1311         return 0;
1312 }
1313 EXPORT_SYMBOL_GPL(kvm_set_cr3);
1314
1315 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
1316 {
1317         if (cr8 & CR8_RESERVED_BITS)
1318                 return 1;
1319         if (lapic_in_kernel(vcpu))
1320                 kvm_lapic_set_tpr(vcpu, cr8);
1321         else
1322                 vcpu->arch.cr8 = cr8;
1323         return 0;
1324 }
1325 EXPORT_SYMBOL_GPL(kvm_set_cr8);
1326
1327 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
1328 {
1329         if (lapic_in_kernel(vcpu))
1330                 return kvm_lapic_get_cr8(vcpu);
1331         else
1332                 return vcpu->arch.cr8;
1333 }
1334 EXPORT_SYMBOL_GPL(kvm_get_cr8);
1335
1336 static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
1337 {
1338         int i;
1339
1340         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1341                 for (i = 0; i < KVM_NR_DB_REGS; i++)
1342                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
1343         }
1344 }
1345
1346 void kvm_update_dr7(struct kvm_vcpu *vcpu)
1347 {
1348         unsigned long dr7;
1349
1350         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1351                 dr7 = vcpu->arch.guest_debug_dr7;
1352         else
1353                 dr7 = vcpu->arch.dr7;
1354         static_call(kvm_x86_set_dr7)(vcpu, dr7);
1355         vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED;
1356         if (dr7 & DR7_BP_EN_MASK)
1357                 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
1358 }
1359 EXPORT_SYMBOL_GPL(kvm_update_dr7);
1360
1361 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
1362 {
1363         u64 fixed = DR6_FIXED_1;
1364
1365         if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM))
1366                 fixed |= DR6_RTM;
1367
1368         if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT))
1369                 fixed |= DR6_BUS_LOCK;
1370         return fixed;
1371 }
1372
1373 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
1374 {
1375         size_t size = ARRAY_SIZE(vcpu->arch.db);
1376
1377         switch (dr) {
1378         case 0 ... 3:
1379                 vcpu->arch.db[array_index_nospec(dr, size)] = val;
1380                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1381                         vcpu->arch.eff_db[dr] = val;
1382                 break;
1383         case 4:
1384         case 6:
1385                 if (!kvm_dr6_valid(val))
1386                         return 1; /* #GP */
1387                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
1388                 break;
1389         case 5:
1390         default: /* 7 */
1391                 if (!kvm_dr7_valid(val))
1392                         return 1; /* #GP */
1393                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
1394                 kvm_update_dr7(vcpu);
1395                 break;
1396         }
1397
1398         return 0;
1399 }
1400 EXPORT_SYMBOL_GPL(kvm_set_dr);
1401
1402 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
1403 {
1404         size_t size = ARRAY_SIZE(vcpu->arch.db);
1405
1406         switch (dr) {
1407         case 0 ... 3:
1408                 *val = vcpu->arch.db[array_index_nospec(dr, size)];
1409                 break;
1410         case 4:
1411         case 6:
1412                 *val = vcpu->arch.dr6;
1413                 break;
1414         case 5:
1415         default: /* 7 */
1416                 *val = vcpu->arch.dr7;
1417                 break;
1418         }
1419 }
1420 EXPORT_SYMBOL_GPL(kvm_get_dr);
1421
1422 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu)
1423 {
1424         u32 ecx = kvm_rcx_read(vcpu);
1425         u64 data;
1426
1427         if (kvm_pmu_rdpmc(vcpu, ecx, &data)) {
1428                 kvm_inject_gp(vcpu, 0);
1429                 return 1;
1430         }
1431
1432         kvm_rax_write(vcpu, (u32)data);
1433         kvm_rdx_write(vcpu, data >> 32);
1434         return kvm_skip_emulated_instruction(vcpu);
1435 }
1436 EXPORT_SYMBOL_GPL(kvm_emulate_rdpmc);
1437
1438 /*
1439  * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track
1440  * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS,
1441  * KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.  msrs_to_save holds MSRs that
1442  * require host support, i.e. should be probed via RDMSR.  emulated_msrs holds
1443  * MSRs that KVM emulates without strictly requiring host support.
1444  * msr_based_features holds MSRs that enumerate features, i.e. are effectively
1445  * CPUID leafs.  Note, msr_based_features isn't mutually exclusive with
1446  * msrs_to_save and emulated_msrs.
1447  */
1448
1449 static const u32 msrs_to_save_base[] = {
1450         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
1451         MSR_STAR,
1452 #ifdef CONFIG_X86_64
1453         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
1454 #endif
1455         MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
1456         MSR_IA32_FEAT_CTL, MSR_IA32_BNDCFGS, MSR_TSC_AUX,
1457         MSR_IA32_SPEC_CTRL, MSR_IA32_TSX_CTRL,
1458         MSR_IA32_RTIT_CTL, MSR_IA32_RTIT_STATUS, MSR_IA32_RTIT_CR3_MATCH,
1459         MSR_IA32_RTIT_OUTPUT_BASE, MSR_IA32_RTIT_OUTPUT_MASK,
1460         MSR_IA32_RTIT_ADDR0_A, MSR_IA32_RTIT_ADDR0_B,
1461         MSR_IA32_RTIT_ADDR1_A, MSR_IA32_RTIT_ADDR1_B,
1462         MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B,
1463         MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B,
1464         MSR_IA32_UMWAIT_CONTROL,
1465
1466         MSR_IA32_XFD, MSR_IA32_XFD_ERR,
1467 };
1468
1469 static const u32 msrs_to_save_pmu[] = {
1470         MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
1471         MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
1472         MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
1473         MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL,
1474         MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
1475
1476         /* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */
1477         MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
1478         MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
1479         MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
1480         MSR_ARCH_PERFMON_PERFCTR0 + 6, MSR_ARCH_PERFMON_PERFCTR0 + 7,
1481         MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
1482         MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
1483         MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
1484         MSR_ARCH_PERFMON_EVENTSEL0 + 6, MSR_ARCH_PERFMON_EVENTSEL0 + 7,
1485
1486         MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
1487         MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
1488
1489         /* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */
1490         MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
1491         MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
1492         MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
1493         MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
1494
1495         MSR_AMD64_PERF_CNTR_GLOBAL_CTL,
1496         MSR_AMD64_PERF_CNTR_GLOBAL_STATUS,
1497         MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR,
1498 };
1499
1500 static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_base) +
1501                         ARRAY_SIZE(msrs_to_save_pmu)];
1502 static unsigned num_msrs_to_save;
1503
1504 static const u32 emulated_msrs_all[] = {
1505         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
1506         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
1507         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
1508         HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
1509         HV_X64_MSR_TSC_FREQUENCY, HV_X64_MSR_APIC_FREQUENCY,
1510         HV_X64_MSR_CRASH_P0, HV_X64_MSR_CRASH_P1, HV_X64_MSR_CRASH_P2,
1511         HV_X64_MSR_CRASH_P3, HV_X64_MSR_CRASH_P4, HV_X64_MSR_CRASH_CTL,
1512         HV_X64_MSR_RESET,
1513         HV_X64_MSR_VP_INDEX,
1514         HV_X64_MSR_VP_RUNTIME,
1515         HV_X64_MSR_SCONTROL,
1516         HV_X64_MSR_STIMER0_CONFIG,
1517         HV_X64_MSR_VP_ASSIST_PAGE,
1518         HV_X64_MSR_REENLIGHTENMENT_CONTROL, HV_X64_MSR_TSC_EMULATION_CONTROL,
1519         HV_X64_MSR_TSC_EMULATION_STATUS, HV_X64_MSR_TSC_INVARIANT_CONTROL,
1520         HV_X64_MSR_SYNDBG_OPTIONS,
1521         HV_X64_MSR_SYNDBG_CONTROL, HV_X64_MSR_SYNDBG_STATUS,
1522         HV_X64_MSR_SYNDBG_SEND_BUFFER, HV_X64_MSR_SYNDBG_RECV_BUFFER,
1523         HV_X64_MSR_SYNDBG_PENDING_BUFFER,
1524
1525         MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
1526         MSR_KVM_PV_EOI_EN, MSR_KVM_ASYNC_PF_INT, MSR_KVM_ASYNC_PF_ACK,
1527
1528         MSR_IA32_TSC_ADJUST,
1529         MSR_IA32_TSC_DEADLINE,
1530         MSR_IA32_ARCH_CAPABILITIES,
1531         MSR_IA32_PERF_CAPABILITIES,
1532         MSR_IA32_MISC_ENABLE,
1533         MSR_IA32_MCG_STATUS,
1534         MSR_IA32_MCG_CTL,
1535         MSR_IA32_MCG_EXT_CTL,
1536         MSR_IA32_SMBASE,
1537         MSR_SMI_COUNT,
1538         MSR_PLATFORM_INFO,
1539         MSR_MISC_FEATURES_ENABLES,
1540         MSR_AMD64_VIRT_SPEC_CTRL,
1541         MSR_AMD64_TSC_RATIO,
1542         MSR_IA32_POWER_CTL,
1543         MSR_IA32_UCODE_REV,
1544
1545         /*
1546          * KVM always supports the "true" VMX control MSRs, even if the host
1547          * does not.  The VMX MSRs as a whole are considered "emulated" as KVM
1548          * doesn't strictly require them to exist in the host (ignoring that
1549          * KVM would refuse to load in the first place if the core set of MSRs
1550          * aren't supported).
1551          */
1552         MSR_IA32_VMX_BASIC,
1553         MSR_IA32_VMX_TRUE_PINBASED_CTLS,
1554         MSR_IA32_VMX_TRUE_PROCBASED_CTLS,
1555         MSR_IA32_VMX_TRUE_EXIT_CTLS,
1556         MSR_IA32_VMX_TRUE_ENTRY_CTLS,
1557         MSR_IA32_VMX_MISC,
1558         MSR_IA32_VMX_CR0_FIXED0,
1559         MSR_IA32_VMX_CR4_FIXED0,
1560         MSR_IA32_VMX_VMCS_ENUM,
1561         MSR_IA32_VMX_PROCBASED_CTLS2,
1562         MSR_IA32_VMX_EPT_VPID_CAP,
1563         MSR_IA32_VMX_VMFUNC,
1564
1565         MSR_K7_HWCR,
1566         MSR_KVM_POLL_CONTROL,
1567 };
1568
1569 static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
1570 static unsigned num_emulated_msrs;
1571
1572 /*
1573  * List of MSRs that control the existence of MSR-based features, i.e. MSRs
1574  * that are effectively CPUID leafs.  VMX MSRs are also included in the set of
1575  * feature MSRs, but are handled separately to allow expedited lookups.
1576  */
1577 static const u32 msr_based_features_all_except_vmx[] = {
1578         MSR_AMD64_DE_CFG,
1579         MSR_IA32_UCODE_REV,
1580         MSR_IA32_ARCH_CAPABILITIES,
1581         MSR_IA32_PERF_CAPABILITIES,
1582 };
1583
1584 static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all_except_vmx) +
1585                               (KVM_LAST_EMULATED_VMX_MSR - KVM_FIRST_EMULATED_VMX_MSR + 1)];
1586 static unsigned int num_msr_based_features;
1587
1588 /*
1589  * All feature MSRs except uCode revID, which tracks the currently loaded uCode
1590  * patch, are immutable once the vCPU model is defined.
1591  */
1592 static bool kvm_is_immutable_feature_msr(u32 msr)
1593 {
1594         int i;
1595
1596         if (msr >= KVM_FIRST_EMULATED_VMX_MSR && msr <= KVM_LAST_EMULATED_VMX_MSR)
1597                 return true;
1598
1599         for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++) {
1600                 if (msr == msr_based_features_all_except_vmx[i])
1601                         return msr != MSR_IA32_UCODE_REV;
1602         }
1603
1604         return false;
1605 }
1606
1607 /*
1608  * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
1609  * does not yet virtualize. These include:
1610  *   10 - MISC_PACKAGE_CTRLS
1611  *   11 - ENERGY_FILTERING_CTL
1612  *   12 - DOITM
1613  *   18 - FB_CLEAR_CTRL
1614  *   21 - XAPIC_DISABLE_STATUS
1615  *   23 - OVERCLOCKING_STATUS
1616  */
1617
1618 #define KVM_SUPPORTED_ARCH_CAP \
1619         (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
1620          ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
1621          ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
1622          ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
1623          ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO)
1624
1625 static u64 kvm_get_arch_capabilities(void)
1626 {
1627         u64 data = host_arch_capabilities & KVM_SUPPORTED_ARCH_CAP;
1628
1629         /*
1630          * If nx_huge_pages is enabled, KVM's shadow paging will ensure that
1631          * the nested hypervisor runs with NX huge pages.  If it is not,
1632          * L1 is anyway vulnerable to ITLB_MULTIHIT exploits from other
1633          * L1 guests, so it need not worry about its own (L2) guests.
1634          */
1635         data |= ARCH_CAP_PSCHANGE_MC_NO;
1636
1637         /*
1638          * If we're doing cache flushes (either "always" or "cond")
1639          * we will do one whenever the guest does a vmlaunch/vmresume.
1640          * If an outer hypervisor is doing the cache flush for us
1641          * (ARCH_CAP_SKIP_VMENTRY_L1DFLUSH), we can safely pass that
1642          * capability to the guest too, and if EPT is disabled we're not
1643          * vulnerable.  Overall, only VMENTER_L1D_FLUSH_NEVER will
1644          * require a nested hypervisor to do a flush of its own.
1645          */
1646         if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
1647                 data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
1648
1649         if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
1650                 data |= ARCH_CAP_RDCL_NO;
1651         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1652                 data |= ARCH_CAP_SSB_NO;
1653         if (!boot_cpu_has_bug(X86_BUG_MDS))
1654                 data |= ARCH_CAP_MDS_NO;
1655
1656         if (!boot_cpu_has(X86_FEATURE_RTM)) {
1657                 /*
1658                  * If RTM=0 because the kernel has disabled TSX, the host might
1659                  * have TAA_NO or TSX_CTRL.  Clear TAA_NO (the guest sees RTM=0
1660                  * and therefore knows that there cannot be TAA) but keep
1661                  * TSX_CTRL: some buggy userspaces leave it set on tsx=on hosts,
1662                  * and we want to allow migrating those guests to tsx=off hosts.
1663                  */
1664                 data &= ~ARCH_CAP_TAA_NO;
1665         } else if (!boot_cpu_has_bug(X86_BUG_TAA)) {
1666                 data |= ARCH_CAP_TAA_NO;
1667         } else {
1668                 /*
1669                  * Nothing to do here; we emulate TSX_CTRL if present on the
1670                  * host so the guest can choose between disabling TSX or
1671                  * using VERW to clear CPU buffers.
1672                  */
1673         }
1674
1675         if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated())
1676                 data |= ARCH_CAP_GDS_NO;
1677
1678         return data;
1679 }
1680
1681 static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
1682 {
1683         switch (msr->index) {
1684         case MSR_IA32_ARCH_CAPABILITIES:
1685                 msr->data = kvm_get_arch_capabilities();
1686                 break;
1687         case MSR_IA32_PERF_CAPABILITIES:
1688                 msr->data = kvm_caps.supported_perf_cap;
1689                 break;
1690         case MSR_IA32_UCODE_REV:
1691                 rdmsrl_safe(msr->index, &msr->data);
1692                 break;
1693         default:
1694                 return static_call(kvm_x86_get_msr_feature)(msr);
1695         }
1696         return 0;
1697 }
1698
1699 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1700 {
1701         struct kvm_msr_entry msr;
1702         int r;
1703
1704         msr.index = index;
1705         r = kvm_get_msr_feature(&msr);
1706
1707         if (r == KVM_MSR_RET_INVALID) {
1708                 /* Unconditionally clear the output for simplicity */
1709                 *data = 0;
1710                 if (kvm_msr_ignored_check(index, 0, false))
1711                         r = 0;
1712         }
1713
1714         if (r)
1715                 return r;
1716
1717         *data = msr.data;
1718
1719         return 0;
1720 }
1721
1722 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1723 {
1724         if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS))
1725                 return false;
1726
1727         if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT))
1728                 return false;
1729
1730         if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM))
1731                 return false;
1732
1733         if (efer & (EFER_LME | EFER_LMA) &&
1734             !guest_cpuid_has(vcpu, X86_FEATURE_LM))
1735                 return false;
1736
1737         if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX))
1738                 return false;
1739
1740         return true;
1741
1742 }
1743 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
1744 {
1745         if (efer & efer_reserved_bits)
1746                 return false;
1747
1748         return __kvm_valid_efer(vcpu, efer);
1749 }
1750 EXPORT_SYMBOL_GPL(kvm_valid_efer);
1751
1752 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1753 {
1754         u64 old_efer = vcpu->arch.efer;
1755         u64 efer = msr_info->data;
1756         int r;
1757
1758         if (efer & efer_reserved_bits)
1759                 return 1;
1760
1761         if (!msr_info->host_initiated) {
1762                 if (!__kvm_valid_efer(vcpu, efer))
1763                         return 1;
1764
1765                 if (is_paging(vcpu) &&
1766                     (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
1767                         return 1;
1768         }
1769
1770         efer &= ~EFER_LMA;
1771         efer |= vcpu->arch.efer & EFER_LMA;
1772
1773         r = static_call(kvm_x86_set_efer)(vcpu, efer);
1774         if (r) {
1775                 WARN_ON(r > 0);
1776                 return r;
1777         }
1778
1779         if ((efer ^ old_efer) & KVM_MMU_EFER_ROLE_BITS)
1780                 kvm_mmu_reset_context(vcpu);
1781
1782         return 0;
1783 }
1784
1785 void kvm_enable_efer_bits(u64 mask)
1786 {
1787        efer_reserved_bits &= ~mask;
1788 }
1789 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
1790
1791 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1792 {
1793         struct kvm_x86_msr_filter *msr_filter;
1794         struct msr_bitmap_range *ranges;
1795         struct kvm *kvm = vcpu->kvm;
1796         bool allowed;
1797         int idx;
1798         u32 i;
1799
1800         /* x2APIC MSRs do not support filtering. */
1801         if (index >= 0x800 && index <= 0x8ff)
1802                 return true;
1803
1804         idx = srcu_read_lock(&kvm->srcu);
1805
1806         msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu);
1807         if (!msr_filter) {
1808                 allowed = true;
1809                 goto out;
1810         }
1811
1812         allowed = msr_filter->default_allow;
1813         ranges = msr_filter->ranges;
1814
1815         for (i = 0; i < msr_filter->count; i++) {
1816                 u32 start = ranges[i].base;
1817                 u32 end = start + ranges[i].nmsrs;
1818                 u32 flags = ranges[i].flags;
1819                 unsigned long *bitmap = ranges[i].bitmap;
1820
1821                 if ((index >= start) && (index < end) && (flags & type)) {
1822                         allowed = test_bit(index - start, bitmap);
1823                         break;
1824                 }
1825         }
1826
1827 out:
1828         srcu_read_unlock(&kvm->srcu, idx);
1829
1830         return allowed;
1831 }
1832 EXPORT_SYMBOL_GPL(kvm_msr_allowed);
1833
1834 /*
1835  * Write @data into the MSR specified by @index.  Select MSR specific fault
1836  * checks are bypassed if @host_initiated is %true.
1837  * Returns 0 on success, non-0 otherwise.
1838  * Assumes vcpu_load() was already called.
1839  */
1840 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1841                          bool host_initiated)
1842 {
1843         struct msr_data msr;
1844
1845         switch (index) {
1846         case MSR_FS_BASE:
1847         case MSR_GS_BASE:
1848         case MSR_KERNEL_GS_BASE:
1849         case MSR_CSTAR:
1850         case MSR_LSTAR:
1851                 if (is_noncanonical_address(data, vcpu))
1852                         return 1;
1853                 break;
1854         case MSR_IA32_SYSENTER_EIP:
1855         case MSR_IA32_SYSENTER_ESP:
1856                 /*
1857                  * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1858                  * non-canonical address is written on Intel but not on
1859                  * AMD (which ignores the top 32-bits, because it does
1860                  * not implement 64-bit SYSENTER).
1861                  *
1862                  * 64-bit code should hence be able to write a non-canonical
1863                  * value on AMD.  Making the address canonical ensures that
1864                  * vmentry does not fail on Intel after writing a non-canonical
1865                  * value, and that something deterministic happens if the guest
1866                  * invokes 64-bit SYSENTER.
1867                  */
1868                 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu));
1869                 break;
1870         case MSR_TSC_AUX:
1871                 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1872                         return 1;
1873
1874                 if (!host_initiated &&
1875                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1876                     !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1877                         return 1;
1878
1879                 /*
1880                  * Per Intel's SDM, bits 63:32 are reserved, but AMD's APM has
1881                  * incomplete and conflicting architectural behavior.  Current
1882                  * AMD CPUs completely ignore bits 63:32, i.e. they aren't
1883                  * reserved and always read as zeros.  Enforce Intel's reserved
1884                  * bits check if and only if the guest CPU is Intel, and clear
1885                  * the bits in all other cases.  This ensures cross-vendor
1886                  * migration will provide consistent behavior for the guest.
1887                  */
1888                 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0)
1889                         return 1;
1890
1891                 data = (u32)data;
1892                 break;
1893         }
1894
1895         msr.data = data;
1896         msr.index = index;
1897         msr.host_initiated = host_initiated;
1898
1899         return static_call(kvm_x86_set_msr)(vcpu, &msr);
1900 }
1901
1902 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
1903                                      u32 index, u64 data, bool host_initiated)
1904 {
1905         int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1906
1907         if (ret == KVM_MSR_RET_INVALID)
1908                 if (kvm_msr_ignored_check(index, data, true))
1909                         ret = 0;
1910
1911         return ret;
1912 }
1913
1914 /*
1915  * Read the MSR specified by @index into @data.  Select MSR specific fault
1916  * checks are bypassed if @host_initiated is %true.
1917  * Returns 0 on success, non-0 otherwise.
1918  * Assumes vcpu_load() was already called.
1919  */
1920 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1921                   bool host_initiated)
1922 {
1923         struct msr_data msr;
1924         int ret;
1925
1926         switch (index) {
1927         case MSR_TSC_AUX:
1928                 if (!kvm_is_supported_user_return_msr(MSR_TSC_AUX))
1929                         return 1;
1930
1931                 if (!host_initiated &&
1932                     !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) &&
1933                     !guest_cpuid_has(vcpu, X86_FEATURE_RDPID))
1934                         return 1;
1935                 break;
1936         }
1937
1938         msr.index = index;
1939         msr.host_initiated = host_initiated;
1940
1941         ret = static_call(kvm_x86_get_msr)(vcpu, &msr);
1942         if (!ret)
1943                 *data = msr.data;
1944         return ret;
1945 }
1946
1947 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
1948                                      u32 index, u64 *data, bool host_initiated)
1949 {
1950         int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1951
1952         if (ret == KVM_MSR_RET_INVALID) {
1953                 /* Unconditionally clear *data for simplicity */
1954                 *data = 0;
1955                 if (kvm_msr_ignored_check(index, 0, false))
1956                         ret = 0;
1957         }
1958
1959         return ret;
1960 }
1961
1962 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1963 {
1964         if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1965                 return KVM_MSR_RET_FILTERED;
1966         return kvm_get_msr_ignored_check(vcpu, index, data, false);
1967 }
1968
1969 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1970 {
1971         if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1972                 return KVM_MSR_RET_FILTERED;
1973         return kvm_set_msr_ignored_check(vcpu, index, data, false);
1974 }
1975
1976 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1977 {
1978         return kvm_get_msr_ignored_check(vcpu, index, data, false);
1979 }
1980 EXPORT_SYMBOL_GPL(kvm_get_msr);
1981
1982 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1983 {
1984         return kvm_set_msr_ignored_check(vcpu, index, data, false);
1985 }
1986 EXPORT_SYMBOL_GPL(kvm_set_msr);
1987
1988 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
1989 {
1990         if (!vcpu->run->msr.error) {
1991                 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
1992                 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
1993         }
1994 }
1995
1996 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
1997 {
1998         return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
1999 }
2000
2001 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
2002 {
2003         complete_userspace_rdmsr(vcpu);
2004         return complete_emulated_msr_access(vcpu);
2005 }
2006
2007 static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
2008 {
2009         return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
2010 }
2011
2012 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
2013 {
2014         complete_userspace_rdmsr(vcpu);
2015         return complete_fast_msr_access(vcpu);
2016 }
2017
2018 static u64 kvm_msr_reason(int r)
2019 {
2020         switch (r) {
2021         case KVM_MSR_RET_INVALID:
2022                 return KVM_MSR_EXIT_REASON_UNKNOWN;
2023         case KVM_MSR_RET_FILTERED:
2024                 return KVM_MSR_EXIT_REASON_FILTER;
2025         default:
2026                 return KVM_MSR_EXIT_REASON_INVAL;
2027         }
2028 }
2029
2030 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
2031                               u32 exit_reason, u64 data,
2032                               int (*completion)(struct kvm_vcpu *vcpu),
2033                               int r)
2034 {
2035         u64 msr_reason = kvm_msr_reason(r);
2036
2037         /* Check if the user wanted to know about this MSR fault */
2038         if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason))
2039                 return 0;
2040
2041         vcpu->run->exit_reason = exit_reason;
2042         vcpu->run->msr.error = 0;
2043         memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad));
2044         vcpu->run->msr.reason = msr_reason;
2045         vcpu->run->msr.index = index;
2046         vcpu->run->msr.data = data;
2047         vcpu->arch.complete_userspace_io = completion;
2048
2049         return 1;
2050 }
2051
2052 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
2053 {
2054         u32 ecx = kvm_rcx_read(vcpu);
2055         u64 data;
2056         int r;
2057
2058         r = kvm_get_msr_with_filter(vcpu, ecx, &data);
2059
2060         if (!r) {
2061                 trace_kvm_msr_read(ecx, data);
2062
2063                 kvm_rax_write(vcpu, data & -1u);
2064                 kvm_rdx_write(vcpu, (data >> 32) & -1u);
2065         } else {
2066                 /* MSR read failed? See if we should ask user space */
2067                 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0,
2068                                        complete_fast_rdmsr, r))
2069                         return 0;
2070                 trace_kvm_msr_read_ex(ecx);
2071         }
2072
2073         return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
2074 }
2075 EXPORT_SYMBOL_GPL(kvm_emulate_rdmsr);
2076
2077 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
2078 {
2079         u32 ecx = kvm_rcx_read(vcpu);
2080         u64 data = kvm_read_edx_eax(vcpu);
2081         int r;
2082
2083         r = kvm_set_msr_with_filter(vcpu, ecx, data);
2084
2085         if (!r) {
2086                 trace_kvm_msr_write(ecx, data);
2087         } else {
2088                 /* MSR write failed? See if we should ask user space */
2089                 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
2090                                        complete_fast_msr_access, r))
2091                         return 0;
2092                 /* Signal all other negative errors to userspace */
2093                 if (r < 0)
2094                         return r;
2095                 trace_kvm_msr_write_ex(ecx, data);
2096         }
2097
2098         return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
2099 }
2100 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
2101
2102 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu)
2103 {
2104         return kvm_skip_emulated_instruction(vcpu);
2105 }
2106
2107 int kvm_emulate_invd(struct kvm_vcpu *vcpu)
2108 {
2109         /* Treat an INVD instruction as a NOP and just skip it. */
2110         return kvm_emulate_as_nop(vcpu);
2111 }
2112 EXPORT_SYMBOL_GPL(kvm_emulate_invd);
2113
2114 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu)
2115 {
2116         kvm_queue_exception(vcpu, UD_VECTOR);
2117         return 1;
2118 }
2119 EXPORT_SYMBOL_GPL(kvm_handle_invalid_op);
2120
2121
2122 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn)
2123 {
2124         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) &&
2125             !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT))
2126                 return kvm_handle_invalid_op(vcpu);
2127
2128         pr_warn_once("%s instruction emulated as NOP!\n", insn);
2129         return kvm_emulate_as_nop(vcpu);
2130 }
2131 int kvm_emulate_mwait(struct kvm_vcpu *vcpu)
2132 {
2133         return kvm_emulate_monitor_mwait(vcpu, "MWAIT");
2134 }
2135 EXPORT_SYMBOL_GPL(kvm_emulate_mwait);
2136
2137 int kvm_emulate_monitor(struct kvm_vcpu *vcpu)
2138 {
2139         return kvm_emulate_monitor_mwait(vcpu, "MONITOR");
2140 }
2141 EXPORT_SYMBOL_GPL(kvm_emulate_monitor);
2142
2143 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
2144 {
2145         xfer_to_guest_mode_prepare();
2146         return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
2147                 xfer_to_guest_mode_work_pending();
2148 }
2149
2150 /*
2151  * The fast path for frequent and performance sensitive wrmsr emulation,
2152  * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
2153  * the latency of virtual IPI by avoiding the expensive bits of transitioning
2154  * from guest to host, e.g. reacquiring KVM's SRCU lock. In contrast to the
2155  * other cases which must be called after interrupts are enabled on the host.
2156  */
2157 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data)
2158 {
2159         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic))
2160                 return 1;
2161
2162         if (((data & APIC_SHORT_MASK) == APIC_DEST_NOSHORT) &&
2163             ((data & APIC_DEST_MASK) == APIC_DEST_PHYSICAL) &&
2164             ((data & APIC_MODE_MASK) == APIC_DM_FIXED) &&
2165             ((u32)(data >> 32) != X2APIC_BROADCAST))
2166                 return kvm_x2apic_icr_write(vcpu->arch.apic, data);
2167
2168         return 1;
2169 }
2170
2171 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data)
2172 {
2173         if (!kvm_can_use_hv_timer(vcpu))
2174                 return 1;
2175
2176         kvm_set_lapic_tscdeadline_msr(vcpu, data);
2177         return 0;
2178 }
2179
2180 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
2181 {
2182         u32 msr = kvm_rcx_read(vcpu);
2183         u64 data;
2184         fastpath_t ret = EXIT_FASTPATH_NONE;
2185
2186         kvm_vcpu_srcu_read_lock(vcpu);
2187
2188         switch (msr) {
2189         case APIC_BASE_MSR + (APIC_ICR >> 4):
2190                 data = kvm_read_edx_eax(vcpu);
2191                 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
2192                         kvm_skip_emulated_instruction(vcpu);
2193                         ret = EXIT_FASTPATH_EXIT_HANDLED;
2194                 }
2195                 break;
2196         case MSR_IA32_TSC_DEADLINE:
2197                 data = kvm_read_edx_eax(vcpu);
2198                 if (!handle_fastpath_set_tscdeadline(vcpu, data)) {
2199                         kvm_skip_emulated_instruction(vcpu);
2200                         ret = EXIT_FASTPATH_REENTER_GUEST;
2201                 }
2202                 break;
2203         default:
2204                 break;
2205         }
2206
2207         if (ret != EXIT_FASTPATH_NONE)
2208                 trace_kvm_msr_write(msr, data);
2209
2210         kvm_vcpu_srcu_read_unlock(vcpu);
2211
2212         return ret;
2213 }
2214 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
2215
2216 /*
2217  * Adapt set_msr() to msr_io()'s calling convention
2218  */
2219 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2220 {
2221         return kvm_get_msr_ignored_check(vcpu, index, data, true);
2222 }
2223
2224 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2225 {
2226         u64 val;
2227
2228         /*
2229          * Disallow writes to immutable feature MSRs after KVM_RUN.  KVM does
2230          * not support modifying the guest vCPU model on the fly, e.g. changing
2231          * the nVMX capabilities while L2 is running is nonsensical.  Ignore
2232          * writes of the same value, e.g. to allow userspace to blindly stuff
2233          * all MSRs when emulating RESET.
2234          */
2235         if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) {
2236                 if (do_get_msr(vcpu, index, &val) || *data != val)
2237                         return -EINVAL;
2238
2239                 return 0;
2240         }
2241
2242         return kvm_set_msr_ignored_check(vcpu, index, *data, true);
2243 }
2244
2245 #ifdef CONFIG_X86_64
2246 struct pvclock_clock {
2247         int vclock_mode;
2248         u64 cycle_last;
2249         u64 mask;
2250         u32 mult;
2251         u32 shift;
2252         u64 base_cycles;
2253         u64 offset;
2254 };
2255
2256 struct pvclock_gtod_data {
2257         seqcount_t      seq;
2258
2259         struct pvclock_clock clock; /* extract of a clocksource struct */
2260         struct pvclock_clock raw_clock; /* extract of a clocksource struct */
2261
2262         ktime_t         offs_boot;
2263         u64             wall_time_sec;
2264 };
2265
2266 static struct pvclock_gtod_data pvclock_gtod_data;
2267
2268 static void update_pvclock_gtod(struct timekeeper *tk)
2269 {
2270         struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
2271
2272         write_seqcount_begin(&vdata->seq);
2273
2274         /* copy pvclock gtod data */
2275         vdata->clock.vclock_mode        = tk->tkr_mono.clock->vdso_clock_mode;
2276         vdata->clock.cycle_last         = tk->tkr_mono.cycle_last;
2277         vdata->clock.mask               = tk->tkr_mono.mask;
2278         vdata->clock.mult               = tk->tkr_mono.mult;
2279         vdata->clock.shift              = tk->tkr_mono.shift;
2280         vdata->clock.base_cycles        = tk->tkr_mono.xtime_nsec;
2281         vdata->clock.offset             = tk->tkr_mono.base;
2282
2283         vdata->raw_clock.vclock_mode    = tk->tkr_raw.clock->vdso_clock_mode;
2284         vdata->raw_clock.cycle_last     = tk->tkr_raw.cycle_last;
2285         vdata->raw_clock.mask           = tk->tkr_raw.mask;
2286         vdata->raw_clock.mult           = tk->tkr_raw.mult;
2287         vdata->raw_clock.shift          = tk->tkr_raw.shift;
2288         vdata->raw_clock.base_cycles    = tk->tkr_raw.xtime_nsec;
2289         vdata->raw_clock.offset         = tk->tkr_raw.base;
2290
2291         vdata->wall_time_sec            = tk->xtime_sec;
2292
2293         vdata->offs_boot                = tk->offs_boot;
2294
2295         write_seqcount_end(&vdata->seq);
2296 }
2297
2298 static s64 get_kvmclock_base_ns(void)
2299 {
2300         /* Count up from boot time, but with the frequency of the raw clock.  */
2301         return ktime_to_ns(ktime_add(ktime_get_raw(), pvclock_gtod_data.offs_boot));
2302 }
2303 #else
2304 static s64 get_kvmclock_base_ns(void)
2305 {
2306         /* Master clock not used, so we can just use CLOCK_BOOTTIME.  */
2307         return ktime_get_boottime_ns();
2308 }
2309 #endif
2310
2311 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
2312 {
2313         int version;
2314         int r;
2315         struct pvclock_wall_clock wc;
2316         u32 wc_sec_hi;
2317         u64 wall_nsec;
2318
2319         if (!wall_clock)
2320                 return;
2321
2322         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
2323         if (r)
2324                 return;
2325
2326         if (version & 1)
2327                 ++version;  /* first time write, random junk */
2328
2329         ++version;
2330
2331         if (kvm_write_guest(kvm, wall_clock, &version, sizeof(version)))
2332                 return;
2333
2334         /*
2335          * The guest calculates current wall clock time by adding
2336          * system time (updated by kvm_guest_time_update below) to the
2337          * wall clock specified here.  We do the reverse here.
2338          */
2339         wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
2340
2341         wc.nsec = do_div(wall_nsec, 1000000000);
2342         wc.sec = (u32)wall_nsec; /* overflow in 2106 guest time */
2343         wc.version = version;
2344
2345         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
2346
2347         if (sec_hi_ofs) {
2348                 wc_sec_hi = wall_nsec >> 32;
2349                 kvm_write_guest(kvm, wall_clock + sec_hi_ofs,
2350                                 &wc_sec_hi, sizeof(wc_sec_hi));
2351         }
2352
2353         version++;
2354         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
2355 }
2356
2357 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
2358                                   bool old_msr, bool host_initiated)
2359 {
2360         struct kvm_arch *ka = &vcpu->kvm->arch;
2361
2362         if (vcpu->vcpu_id == 0 && !host_initiated) {
2363                 if (ka->boot_vcpu_runs_old_kvmclock != old_msr)
2364                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2365
2366                 ka->boot_vcpu_runs_old_kvmclock = old_msr;
2367         }
2368
2369         vcpu->arch.time = system_time;
2370         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
2371
2372         /* we verify if the enable bit is set... */
2373         if (system_time & 1)
2374                 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL,
2375                                  sizeof(struct pvclock_vcpu_time_info));
2376         else
2377                 kvm_gpc_deactivate(&vcpu->arch.pv_time);
2378
2379         return;
2380 }
2381
2382 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
2383 {
2384         do_shl32_div32(dividend, divisor);
2385         return dividend;
2386 }
2387
2388 static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
2389                                s8 *pshift, u32 *pmultiplier)
2390 {
2391         uint64_t scaled64;
2392         int32_t  shift = 0;
2393         uint64_t tps64;
2394         uint32_t tps32;
2395
2396         tps64 = base_hz;
2397         scaled64 = scaled_hz;
2398         while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
2399                 tps64 >>= 1;
2400                 shift--;
2401         }
2402
2403         tps32 = (uint32_t)tps64;
2404         while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
2405                 if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
2406                         scaled64 >>= 1;
2407                 else
2408                         tps32 <<= 1;
2409                 shift++;
2410         }
2411
2412         *pshift = shift;
2413         *pmultiplier = div_frac(scaled64, tps32);
2414 }
2415
2416 #ifdef CONFIG_X86_64
2417 static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
2418 #endif
2419
2420 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
2421 static unsigned long max_tsc_khz;
2422
2423 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
2424 {
2425         u64 v = (u64)khz * (1000000 + ppm);
2426         do_div(v, 1000000);
2427         return v;
2428 }
2429
2430 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2431
2432 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
2433 {
2434         u64 ratio;
2435
2436         /* Guest TSC same frequency as host TSC? */
2437         if (!scale) {
2438                 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2439                 return 0;
2440         }
2441
2442         /* TSC scaling supported? */
2443         if (!kvm_caps.has_tsc_control) {
2444                 if (user_tsc_khz > tsc_khz) {
2445                         vcpu->arch.tsc_catchup = 1;
2446                         vcpu->arch.tsc_always_catchup = 1;
2447                         return 0;
2448                 } else {
2449                         pr_warn_ratelimited("user requested TSC rate below hardware speed\n");
2450                         return -1;
2451                 }
2452         }
2453
2454         /* TSC scaling required  - calculate ratio */
2455         ratio = mul_u64_u32_div(1ULL << kvm_caps.tsc_scaling_ratio_frac_bits,
2456                                 user_tsc_khz, tsc_khz);
2457
2458         if (ratio == 0 || ratio >= kvm_caps.max_tsc_scaling_ratio) {
2459                 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n",
2460                                     user_tsc_khz);
2461                 return -1;
2462         }
2463
2464         kvm_vcpu_write_tsc_multiplier(vcpu, ratio);
2465         return 0;
2466 }
2467
2468 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
2469 {
2470         u32 thresh_lo, thresh_hi;
2471         int use_scaling = 0;
2472
2473         /* tsc_khz can be zero if TSC calibration fails */
2474         if (user_tsc_khz == 0) {
2475                 /* set tsc_scaling_ratio to a safe value */
2476                 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio);
2477                 return -1;
2478         }
2479
2480         /* Compute a scale to convert nanoseconds in TSC cycles */
2481         kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
2482                            &vcpu->arch.virtual_tsc_shift,
2483                            &vcpu->arch.virtual_tsc_mult);
2484         vcpu->arch.virtual_tsc_khz = user_tsc_khz;
2485
2486         /*
2487          * Compute the variation in TSC rate which is acceptable
2488          * within the range of tolerance and decide if the
2489          * rate being applied is within that bounds of the hardware
2490          * rate.  If so, no scaling or compensation need be done.
2491          */
2492         thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
2493         thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
2494         if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
2495                 pr_debug("requested TSC rate %u falls outside tolerance [%u,%u]\n",
2496                          user_tsc_khz, thresh_lo, thresh_hi);
2497                 use_scaling = 1;
2498         }
2499         return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
2500 }
2501
2502 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
2503 {
2504         u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
2505                                       vcpu->arch.virtual_tsc_mult,
2506                                       vcpu->arch.virtual_tsc_shift);
2507         tsc += vcpu->arch.this_tsc_write;
2508         return tsc;
2509 }
2510
2511 #ifdef CONFIG_X86_64
2512 static inline int gtod_is_based_on_tsc(int mode)
2513 {
2514         return mode == VDSO_CLOCKMODE_TSC || mode == VDSO_CLOCKMODE_HVCLOCK;
2515 }
2516 #endif
2517
2518 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
2519 {
2520 #ifdef CONFIG_X86_64
2521         bool vcpus_matched;
2522         struct kvm_arch *ka = &vcpu->kvm->arch;
2523         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2524
2525         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2526                          atomic_read(&vcpu->kvm->online_vcpus));
2527
2528         /*
2529          * Once the masterclock is enabled, always perform request in
2530          * order to update it.
2531          *
2532          * In order to enable masterclock, the host clocksource must be TSC
2533          * and the vcpus need to have matched TSCs.  When that happens,
2534          * perform request to enable masterclock.
2535          */
2536         if (ka->use_master_clock ||
2537             (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched))
2538                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
2539
2540         trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
2541                             atomic_read(&vcpu->kvm->online_vcpus),
2542                             ka->use_master_clock, gtod->clock.vclock_mode);
2543 #endif
2544 }
2545
2546 /*
2547  * Multiply tsc by a fixed point number represented by ratio.
2548  *
2549  * The most significant 64-N bits (mult) of ratio represent the
2550  * integral part of the fixed point number; the remaining N bits
2551  * (frac) represent the fractional part, ie. ratio represents a fixed
2552  * point number (mult + frac * 2^(-N)).
2553  *
2554  * N equals to kvm_caps.tsc_scaling_ratio_frac_bits.
2555  */
2556 static inline u64 __scale_tsc(u64 ratio, u64 tsc)
2557 {
2558         return mul_u64_u64_shr(tsc, ratio, kvm_caps.tsc_scaling_ratio_frac_bits);
2559 }
2560
2561 u64 kvm_scale_tsc(u64 tsc, u64 ratio)
2562 {
2563         u64 _tsc = tsc;
2564
2565         if (ratio != kvm_caps.default_tsc_scaling_ratio)
2566                 _tsc = __scale_tsc(ratio, tsc);
2567
2568         return _tsc;
2569 }
2570
2571 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
2572 {
2573         u64 tsc;
2574
2575         tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio);
2576
2577         return target_tsc - tsc;
2578 }
2579
2580 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
2581 {
2582         return vcpu->arch.l1_tsc_offset +
2583                 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio);
2584 }
2585 EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
2586
2587 u64 kvm_calc_nested_tsc_offset(u64 l1_offset, u64 l2_offset, u64 l2_multiplier)
2588 {
2589         u64 nested_offset;
2590
2591         if (l2_multiplier == kvm_caps.default_tsc_scaling_ratio)
2592                 nested_offset = l1_offset;
2593         else
2594                 nested_offset = mul_s64_u64_shr((s64) l1_offset, l2_multiplier,
2595                                                 kvm_caps.tsc_scaling_ratio_frac_bits);
2596
2597         nested_offset += l2_offset;
2598         return nested_offset;
2599 }
2600 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_offset);
2601
2602 u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
2603 {
2604         if (l2_multiplier != kvm_caps.default_tsc_scaling_ratio)
2605                 return mul_u64_u64_shr(l1_multiplier, l2_multiplier,
2606                                        kvm_caps.tsc_scaling_ratio_frac_bits);
2607
2608         return l1_multiplier;
2609 }
2610 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
2611
2612 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
2613 {
2614         trace_kvm_write_tsc_offset(vcpu->vcpu_id,
2615                                    vcpu->arch.l1_tsc_offset,
2616                                    l1_offset);
2617
2618         vcpu->arch.l1_tsc_offset = l1_offset;
2619
2620         /*
2621          * If we are here because L1 chose not to trap WRMSR to TSC then
2622          * according to the spec this should set L1's TSC (as opposed to
2623          * setting L1's offset for L2).
2624          */
2625         if (is_guest_mode(vcpu))
2626                 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2627                         l1_offset,
2628                         static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
2629                         static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2630         else
2631                 vcpu->arch.tsc_offset = l1_offset;
2632
2633         static_call(kvm_x86_write_tsc_offset)(vcpu);
2634 }
2635
2636 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier)
2637 {
2638         vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier;
2639
2640         /* Userspace is changing the multiplier while L2 is active */
2641         if (is_guest_mode(vcpu))
2642                 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2643                         l1_multiplier,
2644                         static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
2645         else
2646                 vcpu->arch.tsc_scaling_ratio = l1_multiplier;
2647
2648         if (kvm_caps.has_tsc_control)
2649                 static_call(kvm_x86_write_tsc_multiplier)(vcpu);
2650 }
2651
2652 static inline bool kvm_check_tsc_unstable(void)
2653 {
2654 #ifdef CONFIG_X86_64
2655         /*
2656          * TSC is marked unstable when we're running on Hyper-V,
2657          * 'TSC page' clocksource is good.
2658          */
2659         if (pvclock_gtod_data.clock.vclock_mode == VDSO_CLOCKMODE_HVCLOCK)
2660                 return false;
2661 #endif
2662         return check_tsc_unstable();
2663 }
2664
2665 /*
2666  * Infers attempts to synchronize the guest's tsc from host writes. Sets the
2667  * offset for the vcpu and tracks the TSC matching generation that the vcpu
2668  * participates in.
2669  */
2670 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc,
2671                                   u64 ns, bool matched)
2672 {
2673         struct kvm *kvm = vcpu->kvm;
2674
2675         lockdep_assert_held(&kvm->arch.tsc_write_lock);
2676
2677         /*
2678          * We also track th most recent recorded KHZ, write and time to
2679          * allow the matching interval to be extended at each write.
2680          */
2681         kvm->arch.last_tsc_nsec = ns;
2682         kvm->arch.last_tsc_write = tsc;
2683         kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
2684         kvm->arch.last_tsc_offset = offset;
2685
2686         vcpu->arch.last_guest_tsc = tsc;
2687
2688         kvm_vcpu_write_tsc_offset(vcpu, offset);
2689
2690         if (!matched) {
2691                 /*
2692                  * We split periods of matched TSC writes into generations.
2693                  * For each generation, we track the original measured
2694                  * nanosecond time, offset, and write, so if TSCs are in
2695                  * sync, we can match exact offset, and if not, we can match
2696                  * exact software computation in compute_guest_tsc()
2697                  *
2698                  * These values are tracked in kvm->arch.cur_xxx variables.
2699                  */
2700                 kvm->arch.cur_tsc_generation++;
2701                 kvm->arch.cur_tsc_nsec = ns;
2702                 kvm->arch.cur_tsc_write = tsc;
2703                 kvm->arch.cur_tsc_offset = offset;
2704                 kvm->arch.nr_vcpus_matched_tsc = 0;
2705         } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) {
2706                 kvm->arch.nr_vcpus_matched_tsc++;
2707         }
2708
2709         /* Keep track of which generation this VCPU has synchronized to */
2710         vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
2711         vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
2712         vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
2713
2714         kvm_track_tsc_matching(vcpu);
2715 }
2716
2717 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
2718 {
2719         struct kvm *kvm = vcpu->kvm;
2720         u64 offset, ns, elapsed;
2721         unsigned long flags;
2722         bool matched = false;
2723         bool synchronizing = false;
2724
2725         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
2726         offset = kvm_compute_l1_tsc_offset(vcpu, data);
2727         ns = get_kvmclock_base_ns();
2728         elapsed = ns - kvm->arch.last_tsc_nsec;
2729
2730         if (vcpu->arch.virtual_tsc_khz) {
2731                 if (data == 0) {
2732                         /*
2733                          * detection of vcpu initialization -- need to sync
2734                          * with other vCPUs. This particularly helps to keep
2735                          * kvm_clock stable after CPU hotplug
2736                          */
2737                         synchronizing = true;
2738                 } else {
2739                         u64 tsc_exp = kvm->arch.last_tsc_write +
2740                                                 nsec_to_cycles(vcpu, elapsed);
2741                         u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL;
2742                         /*
2743                          * Special case: TSC write with a small delta (1 second)
2744                          * of virtual cycle time against real time is
2745                          * interpreted as an attempt to synchronize the CPU.
2746                          */
2747                         synchronizing = data < tsc_exp + tsc_hz &&
2748                                         data + tsc_hz > tsc_exp;
2749                 }
2750         }
2751
2752         /*
2753          * For a reliable TSC, we can match TSC offsets, and for an unstable
2754          * TSC, we add elapsed time in this computation.  We could let the
2755          * compensation code attempt to catch up if we fall behind, but
2756          * it's better to try to match offsets from the beginning.
2757          */
2758         if (synchronizing &&
2759             vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
2760                 if (!kvm_check_tsc_unstable()) {
2761                         offset = kvm->arch.cur_tsc_offset;
2762                 } else {
2763                         u64 delta = nsec_to_cycles(vcpu, elapsed);
2764                         data += delta;
2765                         offset = kvm_compute_l1_tsc_offset(vcpu, data);
2766                 }
2767                 matched = true;
2768         }
2769
2770         __kvm_synchronize_tsc(vcpu, offset, data, ns, matched);
2771         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
2772 }
2773
2774 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
2775                                            s64 adjustment)
2776 {
2777         u64 tsc_offset = vcpu->arch.l1_tsc_offset;
2778         kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
2779 }
2780
2781 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
2782 {
2783         if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio)
2784                 WARN_ON(adjustment < 0);
2785         adjustment = kvm_scale_tsc((u64) adjustment,
2786                                    vcpu->arch.l1_tsc_scaling_ratio);
2787         adjust_tsc_offset_guest(vcpu, adjustment);
2788 }
2789
2790 #ifdef CONFIG_X86_64
2791
2792 static u64 read_tsc(void)
2793 {
2794         u64 ret = (u64)rdtsc_ordered();
2795         u64 last = pvclock_gtod_data.clock.cycle_last;
2796
2797         if (likely(ret >= last))
2798                 return ret;
2799
2800         /*
2801          * GCC likes to generate cmov here, but this branch is extremely
2802          * predictable (it's just a function of time and the likely is
2803          * very likely) and there's a data dependence, so force GCC
2804          * to generate a branch instead.  I don't barrier() because
2805          * we don't actually need a barrier, and if this function
2806          * ever gets inlined it will generate worse code.
2807          */
2808         asm volatile ("");
2809         return last;
2810 }
2811
2812 static inline u64 vgettsc(struct pvclock_clock *clock, u64 *tsc_timestamp,
2813                           int *mode)
2814 {
2815         u64 tsc_pg_val;
2816         long v;
2817
2818         switch (clock->vclock_mode) {
2819         case VDSO_CLOCKMODE_HVCLOCK:
2820                 if (hv_read_tsc_page_tsc(hv_get_tsc_page(),
2821                                          tsc_timestamp, &tsc_pg_val)) {
2822                         /* TSC page valid */
2823                         *mode = VDSO_CLOCKMODE_HVCLOCK;
2824                         v = (tsc_pg_val - clock->cycle_last) &
2825                                 clock->mask;
2826                 } else {
2827                         /* TSC page invalid */
2828                         *mode = VDSO_CLOCKMODE_NONE;
2829                 }
2830                 break;
2831         case VDSO_CLOCKMODE_TSC:
2832                 *mode = VDSO_CLOCKMODE_TSC;
2833                 *tsc_timestamp = read_tsc();
2834                 v = (*tsc_timestamp - clock->cycle_last) &
2835                         clock->mask;
2836                 break;
2837         default:
2838                 *mode = VDSO_CLOCKMODE_NONE;
2839         }
2840
2841         if (*mode == VDSO_CLOCKMODE_NONE)
2842                 *tsc_timestamp = v = 0;
2843
2844         return v * clock->mult;
2845 }
2846
2847 static int do_monotonic_raw(s64 *t, u64 *tsc_timestamp)
2848 {
2849         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2850         unsigned long seq;
2851         int mode;
2852         u64 ns;
2853
2854         do {
2855                 seq = read_seqcount_begin(&gtod->seq);
2856                 ns = gtod->raw_clock.base_cycles;
2857                 ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode);
2858                 ns >>= gtod->raw_clock.shift;
2859                 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot));
2860         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2861         *t = ns;
2862
2863         return mode;
2864 }
2865
2866 static int do_realtime(struct timespec64 *ts, u64 *tsc_timestamp)
2867 {
2868         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
2869         unsigned long seq;
2870         int mode;
2871         u64 ns;
2872
2873         do {
2874                 seq = read_seqcount_begin(&gtod->seq);
2875                 ts->tv_sec = gtod->wall_time_sec;
2876                 ns = gtod->clock.base_cycles;
2877                 ns += vgettsc(&gtod->clock, tsc_timestamp, &mode);
2878                 ns >>= gtod->clock.shift;
2879         } while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
2880
2881         ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
2882         ts->tv_nsec = ns;
2883
2884         return mode;
2885 }
2886
2887 /* returns true if host is using TSC based clocksource */
2888 static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *tsc_timestamp)
2889 {
2890         /* checked again under seqlock below */
2891         if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2892                 return false;
2893
2894         return gtod_is_based_on_tsc(do_monotonic_raw(kernel_ns,
2895                                                       tsc_timestamp));
2896 }
2897
2898 /* returns true if host is using TSC based clocksource */
2899 static bool kvm_get_walltime_and_clockread(struct timespec64 *ts,
2900                                            u64 *tsc_timestamp)
2901 {
2902         /* checked again under seqlock below */
2903         if (!gtod_is_based_on_tsc(pvclock_gtod_data.clock.vclock_mode))
2904                 return false;
2905
2906         return gtod_is_based_on_tsc(do_realtime(ts, tsc_timestamp));
2907 }
2908 #endif
2909
2910 /*
2911  *
2912  * Assuming a stable TSC across physical CPUS, and a stable TSC
2913  * across virtual CPUs, the following condition is possible.
2914  * Each numbered line represents an event visible to both
2915  * CPUs at the next numbered event.
2916  *
2917  * "timespecX" represents host monotonic time. "tscX" represents
2918  * RDTSC value.
2919  *
2920  *              VCPU0 on CPU0           |       VCPU1 on CPU1
2921  *
2922  * 1.  read timespec0,tsc0
2923  * 2.                                   | timespec1 = timespec0 + N
2924  *                                      | tsc1 = tsc0 + M
2925  * 3. transition to guest               | transition to guest
2926  * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2927  * 5.                                   | ret1 = timespec1 + (rdtsc - tsc1)
2928  *                                      | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2929  *
2930  * Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
2931  *
2932  *      - ret0 < ret1
2933  *      - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2934  *              ...
2935  *      - 0 < N - M => M < N
2936  *
2937  * That is, when timespec0 != timespec1, M < N. Unfortunately that is not
2938  * always the case (the difference between two distinct xtime instances
2939  * might be smaller then the difference between corresponding TSC reads,
2940  * when updating guest vcpus pvclock areas).
2941  *
2942  * To avoid that problem, do not allow visibility of distinct
2943  * system_timestamp/tsc_timestamp values simultaneously: use a master
2944  * copy of host monotonic time values. Update that master copy
2945  * in lockstep.
2946  *
2947  * Rely on synchronization of host TSCs and guest TSCs for monotonicity.
2948  *
2949  */
2950
2951 static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
2952 {
2953 #ifdef CONFIG_X86_64
2954         struct kvm_arch *ka = &kvm->arch;
2955         int vclock_mode;
2956         bool host_tsc_clocksource, vcpus_matched;
2957
2958         lockdep_assert_held(&kvm->arch.tsc_write_lock);
2959         vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
2960                         atomic_read(&kvm->online_vcpus));
2961
2962         /*
2963          * If the host uses TSC clock, then passthrough TSC as stable
2964          * to the guest.
2965          */
2966         host_tsc_clocksource = kvm_get_time_and_clockread(
2967                                         &ka->master_kernel_ns,
2968                                         &ka->master_cycle_now);
2969
2970         ka->use_master_clock = host_tsc_clocksource && vcpus_matched
2971                                 && !ka->backwards_tsc_observed
2972                                 && !ka->boot_vcpu_runs_old_kvmclock;
2973
2974         if (ka->use_master_clock)
2975                 atomic_set(&kvm_guest_has_master_clock, 1);
2976
2977         vclock_mode = pvclock_gtod_data.clock.vclock_mode;
2978         trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
2979                                         vcpus_matched);
2980 #endif
2981 }
2982
2983 static void kvm_make_mclock_inprogress_request(struct kvm *kvm)
2984 {
2985         kvm_make_all_cpus_request(kvm, KVM_REQ_MCLOCK_INPROGRESS);
2986 }
2987
2988 static void __kvm_start_pvclock_update(struct kvm *kvm)
2989 {
2990         raw_spin_lock_irq(&kvm->arch.tsc_write_lock);
2991         write_seqcount_begin(&kvm->arch.pvclock_sc);
2992 }
2993
2994 static void kvm_start_pvclock_update(struct kvm *kvm)
2995 {
2996         kvm_make_mclock_inprogress_request(kvm);
2997
2998         /* no guest entries from this point */
2999         __kvm_start_pvclock_update(kvm);
3000 }
3001
3002 static void kvm_end_pvclock_update(struct kvm *kvm)
3003 {
3004         struct kvm_arch *ka = &kvm->arch;
3005         struct kvm_vcpu *vcpu;
3006         unsigned long i;
3007
3008         write_seqcount_end(&ka->pvclock_sc);
3009         raw_spin_unlock_irq(&ka->tsc_write_lock);
3010         kvm_for_each_vcpu(i, vcpu, kvm)
3011                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3012
3013         /* guest entries allowed */
3014         kvm_for_each_vcpu(i, vcpu, kvm)
3015                 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
3016 }
3017
3018 static void kvm_update_masterclock(struct kvm *kvm)
3019 {
3020         kvm_hv_request_tsc_page_update(kvm);
3021         kvm_start_pvclock_update(kvm);
3022         pvclock_update_vm_gtod_copy(kvm);
3023         kvm_end_pvclock_update(kvm);
3024 }
3025
3026 /*
3027  * Use the kernel's tsc_khz directly if the TSC is constant, otherwise use KVM's
3028  * per-CPU value (which may be zero if a CPU is going offline).  Note, tsc_khz
3029  * can change during boot even if the TSC is constant, as it's possible for KVM
3030  * to be loaded before TSC calibration completes.  Ideally, KVM would get a
3031  * notification when calibration completes, but practically speaking calibration
3032  * will complete before userspace is alive enough to create VMs.
3033  */
3034 static unsigned long get_cpu_tsc_khz(void)
3035 {
3036         if (static_cpu_has(X86_FEATURE_CONSTANT_TSC))
3037                 return tsc_khz;
3038         else
3039                 return __this_cpu_read(cpu_tsc_khz);
3040 }
3041
3042 /* Called within read_seqcount_begin/retry for kvm->pvclock_sc.  */
3043 static void __get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3044 {
3045         struct kvm_arch *ka = &kvm->arch;
3046         struct pvclock_vcpu_time_info hv_clock;
3047
3048         /* both __this_cpu_read() and rdtsc() should be on the same cpu */
3049         get_cpu();
3050
3051         data->flags = 0;
3052         if (ka->use_master_clock &&
3053             (static_cpu_has(X86_FEATURE_CONSTANT_TSC) || __this_cpu_read(cpu_tsc_khz))) {
3054 #ifdef CONFIG_X86_64
3055                 struct timespec64 ts;
3056
3057                 if (kvm_get_walltime_and_clockread(&ts, &data->host_tsc)) {
3058                         data->realtime = ts.tv_nsec + NSEC_PER_SEC * ts.tv_sec;
3059                         data->flags |= KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC;
3060                 } else
3061 #endif
3062                 data->host_tsc = rdtsc();
3063
3064                 data->flags |= KVM_CLOCK_TSC_STABLE;
3065                 hv_clock.tsc_timestamp = ka->master_cycle_now;
3066                 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
3067                 kvm_get_time_scale(NSEC_PER_SEC, get_cpu_tsc_khz() * 1000LL,
3068                                    &hv_clock.tsc_shift,
3069                                    &hv_clock.tsc_to_system_mul);
3070                 data->clock = __pvclock_read_cycles(&hv_clock, data->host_tsc);
3071         } else {
3072                 data->clock = get_kvmclock_base_ns() + ka->kvmclock_offset;
3073         }
3074
3075         put_cpu();
3076 }
3077
3078 static void get_kvmclock(struct kvm *kvm, struct kvm_clock_data *data)
3079 {
3080         struct kvm_arch *ka = &kvm->arch;
3081         unsigned seq;
3082
3083         do {
3084                 seq = read_seqcount_begin(&ka->pvclock_sc);
3085                 __get_kvmclock(kvm, data);
3086         } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3087 }
3088
3089 u64 get_kvmclock_ns(struct kvm *kvm)
3090 {
3091         struct kvm_clock_data data;
3092
3093         get_kvmclock(kvm, &data);
3094         return data.clock;
3095 }
3096
3097 static void kvm_setup_guest_pvclock(struct kvm_vcpu *v,
3098                                     struct gfn_to_pfn_cache *gpc,
3099                                     unsigned int offset)
3100 {
3101         struct kvm_vcpu_arch *vcpu = &v->arch;
3102         struct pvclock_vcpu_time_info *guest_hv_clock;
3103         unsigned long flags;
3104
3105         read_lock_irqsave(&gpc->lock, flags);
3106         while (!kvm_gpc_check(gpc, offset + sizeof(*guest_hv_clock))) {
3107                 read_unlock_irqrestore(&gpc->lock, flags);
3108
3109                 if (kvm_gpc_refresh(gpc, offset + sizeof(*guest_hv_clock)))
3110                         return;
3111
3112                 read_lock_irqsave(&gpc->lock, flags);
3113         }
3114
3115         guest_hv_clock = (void *)(gpc->khva + offset);
3116
3117         /*
3118          * This VCPU is paused, but it's legal for a guest to read another
3119          * VCPU's kvmclock, so we really have to follow the specification where
3120          * it says that version is odd if data is being modified, and even after
3121          * it is consistent.
3122          */
3123
3124         guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1;
3125         smp_wmb();
3126
3127         /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
3128         vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
3129
3130         if (vcpu->pvclock_set_guest_stopped_request) {
3131                 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED;
3132                 vcpu->pvclock_set_guest_stopped_request = false;
3133         }
3134
3135         memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock));
3136         smp_wmb();
3137
3138         guest_hv_clock->version = ++vcpu->hv_clock.version;
3139
3140         mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT);
3141         read_unlock_irqrestore(&gpc->lock, flags);
3142
3143         trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock);
3144 }
3145
3146 static int kvm_guest_time_update(struct kvm_vcpu *v)
3147 {
3148         unsigned long flags, tgt_tsc_khz;
3149         unsigned seq;
3150         struct kvm_vcpu_arch *vcpu = &v->arch;
3151         struct kvm_arch *ka = &v->kvm->arch;
3152         s64 kernel_ns;
3153         u64 tsc_timestamp, host_tsc;
3154         u8 pvclock_flags;
3155         bool use_master_clock;
3156
3157         kernel_ns = 0;
3158         host_tsc = 0;
3159
3160         /*
3161          * If the host uses TSC clock, then passthrough TSC as stable
3162          * to the guest.
3163          */
3164         do {
3165                 seq = read_seqcount_begin(&ka->pvclock_sc);
3166                 use_master_clock = ka->use_master_clock;
3167                 if (use_master_clock) {
3168                         host_tsc = ka->master_cycle_now;
3169                         kernel_ns = ka->master_kernel_ns;
3170                 }
3171         } while (read_seqcount_retry(&ka->pvclock_sc, seq));
3172
3173         /* Keep irq disabled to prevent changes to the clock */
3174         local_irq_save(flags);
3175         tgt_tsc_khz = get_cpu_tsc_khz();
3176         if (unlikely(tgt_tsc_khz == 0)) {
3177                 local_irq_restore(flags);
3178                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3179                 return 1;
3180         }
3181         if (!use_master_clock) {
3182                 host_tsc = rdtsc();
3183                 kernel_ns = get_kvmclock_base_ns();
3184         }
3185
3186         tsc_timestamp = kvm_read_l1_tsc(v, host_tsc);
3187
3188         /*
3189          * We may have to catch up the TSC to match elapsed wall clock
3190          * time for two reasons, even if kvmclock is used.
3191          *   1) CPU could have been running below the maximum TSC rate
3192          *   2) Broken TSC compensation resets the base at each VCPU
3193          *      entry to avoid unknown leaps of TSC even when running
3194          *      again on the same CPU.  This may cause apparent elapsed
3195          *      time to disappear, and the guest to stand still or run
3196          *      very slowly.
3197          */
3198         if (vcpu->tsc_catchup) {
3199                 u64 tsc = compute_guest_tsc(v, kernel_ns);
3200                 if (tsc > tsc_timestamp) {
3201                         adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
3202                         tsc_timestamp = tsc;
3203                 }
3204         }
3205
3206         local_irq_restore(flags);
3207
3208         /* With all the info we got, fill in the values */
3209
3210         if (kvm_caps.has_tsc_control)
3211                 tgt_tsc_khz = kvm_scale_tsc(tgt_tsc_khz,
3212                                             v->arch.l1_tsc_scaling_ratio);
3213
3214         if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
3215                 kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
3216                                    &vcpu->hv_clock.tsc_shift,
3217                                    &vcpu->hv_clock.tsc_to_system_mul);
3218                 vcpu->hw_tsc_khz = tgt_tsc_khz;
3219                 kvm_xen_update_tsc_info(v);
3220         }
3221
3222         vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
3223         vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
3224         vcpu->last_guest_tsc = tsc_timestamp;
3225
3226         /* If the host uses TSC clocksource, then it is stable */
3227         pvclock_flags = 0;
3228         if (use_master_clock)
3229                 pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
3230
3231         vcpu->hv_clock.flags = pvclock_flags;
3232
3233         if (vcpu->pv_time.active)
3234                 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0);
3235         if (vcpu->xen.vcpu_info_cache.active)
3236                 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache,
3237                                         offsetof(struct compat_vcpu_info, time));
3238         if (vcpu->xen.vcpu_time_info_cache.active)
3239                 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0);
3240         kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock);
3241         return 0;
3242 }
3243
3244 /*
3245  * kvmclock updates which are isolated to a given vcpu, such as
3246  * vcpu->cpu migration, should not allow system_timestamp from
3247  * the rest of the vcpus to remain static. Otherwise ntp frequency
3248  * correction applies to one vcpu's system_timestamp but not
3249  * the others.
3250  *
3251  * So in those cases, request a kvmclock update for all vcpus.
3252  * We need to rate-limit these requests though, as they can
3253  * considerably slow guests that have a large number of vcpus.
3254  * The time for a remote vcpu to update its kvmclock is bound
3255  * by the delay we use to rate-limit the updates.
3256  */
3257
3258 #define KVMCLOCK_UPDATE_DELAY msecs_to_jiffies(100)
3259
3260 static void kvmclock_update_fn(struct work_struct *work)
3261 {
3262         unsigned long i;
3263         struct delayed_work *dwork = to_delayed_work(work);
3264         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3265                                            kvmclock_update_work);
3266         struct kvm *kvm = container_of(ka, struct kvm, arch);
3267         struct kvm_vcpu *vcpu;
3268
3269         kvm_for_each_vcpu(i, vcpu, kvm) {
3270                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3271                 kvm_vcpu_kick(vcpu);
3272         }
3273 }
3274
3275 static void kvm_gen_kvmclock_update(struct kvm_vcpu *v)
3276 {
3277         struct kvm *kvm = v->kvm;
3278
3279         kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
3280         schedule_delayed_work(&kvm->arch.kvmclock_update_work,
3281                                         KVMCLOCK_UPDATE_DELAY);
3282 }
3283
3284 #define KVMCLOCK_SYNC_PERIOD (300 * HZ)
3285
3286 static void kvmclock_sync_fn(struct work_struct *work)
3287 {
3288         struct delayed_work *dwork = to_delayed_work(work);
3289         struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
3290                                            kvmclock_sync_work);
3291         struct kvm *kvm = container_of(ka, struct kvm, arch);
3292
3293         if (!kvmclock_periodic_sync)
3294                 return;
3295
3296         schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0);
3297         schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
3298                                         KVMCLOCK_SYNC_PERIOD);
3299 }
3300
3301 /* These helpers are safe iff @msr is known to be an MCx bank MSR. */
3302 static bool is_mci_control_msr(u32 msr)
3303 {
3304         return (msr & 3) == 0;
3305 }
3306 static bool is_mci_status_msr(u32 msr)
3307 {
3308         return (msr & 3) == 1;
3309 }
3310
3311 /*
3312  * On AMD, HWCR[McStatusWrEn] controls whether setting MCi_STATUS results in #GP.
3313  */
3314 static bool can_set_mci_status(struct kvm_vcpu *vcpu)
3315 {
3316         /* McStatusWrEn enabled? */
3317         if (guest_cpuid_is_amd_or_hygon(vcpu))
3318                 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18));
3319
3320         return false;
3321 }
3322
3323 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3324 {
3325         u64 mcg_cap = vcpu->arch.mcg_cap;
3326         unsigned bank_num = mcg_cap & 0xff;
3327         u32 msr = msr_info->index;
3328         u64 data = msr_info->data;
3329         u32 offset, last_msr;
3330
3331         switch (msr) {
3332         case MSR_IA32_MCG_STATUS:
3333                 vcpu->arch.mcg_status = data;
3334                 break;
3335         case MSR_IA32_MCG_CTL:
3336                 if (!(mcg_cap & MCG_CTL_P) &&
3337                     (data || !msr_info->host_initiated))
3338                         return 1;
3339                 if (data != 0 && data != ~(u64)0)
3340                         return 1;
3341                 vcpu->arch.mcg_ctl = data;
3342                 break;
3343         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3344                 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
3345                 if (msr > last_msr)
3346                         return 1;
3347
3348                 if (!(mcg_cap & MCG_CMCI_P) && (data || !msr_info->host_initiated))
3349                         return 1;
3350                 /* An attempt to write a 1 to a reserved bit raises #GP */
3351                 if (data & ~(MCI_CTL2_CMCI_EN | MCI_CTL2_CMCI_THRESHOLD_MASK))
3352                         return 1;
3353                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
3354                                             last_msr + 1 - MSR_IA32_MC0_CTL2);
3355                 vcpu->arch.mci_ctl2_banks[offset] = data;
3356                 break;
3357         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3358                 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
3359                 if (msr > last_msr)
3360                         return 1;
3361
3362                 /*
3363                  * Only 0 or all 1s can be written to IA32_MCi_CTL, all other
3364                  * values are architecturally undefined.  But, some Linux
3365                  * kernels clear bit 10 in bank 4 to workaround a BIOS/GART TLB
3366                  * issue on AMD K8s, allow bit 10 to be clear when setting all
3367                  * other bits in order to avoid an uncaught #GP in the guest.
3368                  *
3369                  * UNIXWARE clears bit 0 of MC1_CTL to ignore correctable,
3370                  * single-bit ECC data errors.
3371                  */
3372                 if (is_mci_control_msr(msr) &&
3373                     data != 0 && (data | (1 << 10) | 1) != ~(u64)0)
3374                         return 1;
3375
3376                 /*
3377                  * All CPUs allow writing 0 to MCi_STATUS MSRs to clear the MSR.
3378                  * AMD-based CPUs allow non-zero values, but if and only if
3379                  * HWCR[McStatusWrEn] is set.
3380                  */
3381                 if (!msr_info->host_initiated && is_mci_status_msr(msr) &&
3382                     data != 0 && !can_set_mci_status(vcpu))
3383                         return 1;
3384
3385                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
3386                                             last_msr + 1 - MSR_IA32_MC0_CTL);
3387                 vcpu->arch.mce_banks[offset] = data;
3388                 break;
3389         default:
3390                 return 1;
3391         }
3392         return 0;
3393 }
3394
3395 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu)
3396 {
3397         u64 mask = KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
3398
3399         return (vcpu->arch.apf.msr_en_val & mask) == mask;
3400 }
3401
3402 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
3403 {
3404         gpa_t gpa = data & ~0x3f;
3405
3406         /* Bits 4:5 are reserved, Should be zero */
3407         if (data & 0x30)
3408                 return 1;
3409
3410         if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) &&
3411             (data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT))
3412                 return 1;
3413
3414         if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) &&
3415             (data & KVM_ASYNC_PF_DELIVERY_AS_INT))
3416                 return 1;
3417
3418         if (!lapic_in_kernel(vcpu))
3419                 return data ? 1 : 0;
3420
3421         vcpu->arch.apf.msr_en_val = data;
3422
3423         if (!kvm_pv_async_pf_enabled(vcpu)) {
3424                 kvm_clear_async_pf_completion_queue(vcpu);
3425                 kvm_async_pf_hash_reset(vcpu);
3426                 return 0;
3427         }
3428
3429         if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
3430                                         sizeof(u64)))
3431                 return 1;
3432
3433         vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
3434         vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
3435
3436         kvm_async_pf_wakeup_all(vcpu);
3437
3438         return 0;
3439 }
3440
3441 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
3442 {
3443         /* Bits 8-63 are reserved */
3444         if (data >> 8)
3445                 return 1;
3446
3447         if (!lapic_in_kernel(vcpu))
3448                 return 1;
3449
3450         vcpu->arch.apf.msr_int_val = data;
3451
3452         vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK;
3453
3454         return 0;
3455 }
3456
3457 static void kvmclock_reset(struct kvm_vcpu *vcpu)
3458 {
3459         kvm_gpc_deactivate(&vcpu->arch.pv_time);
3460         vcpu->arch.time = 0;
3461 }
3462
3463 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu)
3464 {
3465         ++vcpu->stat.tlb_flush;
3466         static_call(kvm_x86_flush_tlb_all)(vcpu);
3467
3468         /* Flushing all ASIDs flushes the current ASID... */
3469         kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
3470 }
3471
3472 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu)
3473 {
3474         ++vcpu->stat.tlb_flush;
3475
3476         if (!tdp_enabled) {
3477                 /*
3478                  * A TLB flush on behalf of the guest is equivalent to
3479                  * INVPCID(all), toggling CR4.PGE, etc., which requires
3480                  * a forced sync of the shadow page tables.  Ensure all the
3481                  * roots are synced and the guest TLB in hardware is clean.
3482                  */
3483                 kvm_mmu_sync_roots(vcpu);
3484                 kvm_mmu_sync_prev_roots(vcpu);
3485         }
3486
3487         static_call(kvm_x86_flush_tlb_guest)(vcpu);
3488
3489         /*
3490          * Flushing all "guest" TLB is always a superset of Hyper-V's fine
3491          * grained flushing.
3492          */
3493         kvm_hv_vcpu_purge_flush_tlb(vcpu);
3494 }
3495
3496
3497 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu)
3498 {
3499         ++vcpu->stat.tlb_flush;
3500         static_call(kvm_x86_flush_tlb_current)(vcpu);
3501 }
3502
3503 /*
3504  * Service "local" TLB flush requests, which are specific to the current MMU
3505  * context.  In addition to the generic event handling in vcpu_enter_guest(),
3506  * TLB flushes that are targeted at an MMU context also need to be serviced
3507  * prior before nested VM-Enter/VM-Exit.
3508  */
3509 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu)
3510 {
3511         if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3512                 kvm_vcpu_flush_tlb_current(vcpu);
3513
3514         if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu))
3515                 kvm_vcpu_flush_tlb_guest(vcpu);
3516 }
3517 EXPORT_SYMBOL_GPL(kvm_service_local_tlb_flush_requests);
3518
3519 static void record_steal_time(struct kvm_vcpu *vcpu)
3520 {
3521         struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
3522         struct kvm_steal_time __user *st;
3523         struct kvm_memslots *slots;
3524         gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
3525         u64 steal;
3526         u32 version;
3527
3528         if (kvm_xen_msr_enabled(vcpu->kvm)) {
3529                 kvm_xen_runstate_set_running(vcpu);
3530                 return;
3531         }
3532
3533         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
3534                 return;
3535
3536         if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm))
3537                 return;
3538
3539         slots = kvm_memslots(vcpu->kvm);
3540
3541         if (unlikely(slots->generation != ghc->generation ||
3542                      gpa != ghc->gpa ||
3543                      kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
3544                 /* We rely on the fact that it fits in a single page. */
3545                 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
3546
3547                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
3548                     kvm_is_error_hva(ghc->hva) || !ghc->memslot)
3549                         return;
3550         }
3551
3552         st = (struct kvm_steal_time __user *)ghc->hva;
3553         /*
3554          * Doing a TLB flush here, on the guest's behalf, can avoid
3555          * expensive IPIs.
3556          */
3557         if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) {
3558                 u8 st_preempted = 0;
3559                 int err = -EFAULT;
3560
3561                 if (!user_access_begin(st, sizeof(*st)))
3562                         return;
3563
3564                 asm volatile("1: xchgb %0, %2\n"
3565                              "xor %1, %1\n"
3566                              "2:\n"
3567                              _ASM_EXTABLE_UA(1b, 2b)
3568                              : "+q" (st_preempted),
3569                                "+&r" (err),
3570                                "+m" (st->preempted));
3571                 if (err)
3572                         goto out;
3573
3574                 user_access_end();
3575
3576                 vcpu->arch.st.preempted = 0;
3577
3578                 trace_kvm_pv_tlb_flush(vcpu->vcpu_id,
3579                                        st_preempted & KVM_VCPU_FLUSH_TLB);
3580                 if (st_preempted & KVM_VCPU_FLUSH_TLB)
3581                         kvm_vcpu_flush_tlb_guest(vcpu);
3582
3583                 if (!user_access_begin(st, sizeof(*st)))
3584                         goto dirty;
3585         } else {
3586                 if (!user_access_begin(st, sizeof(*st)))
3587                         return;
3588
3589                 unsafe_put_user(0, &st->preempted, out);
3590                 vcpu->arch.st.preempted = 0;
3591         }
3592
3593         unsafe_get_user(version, &st->version, out);
3594         if (version & 1)
3595                 version += 1;  /* first time write, random junk */
3596
3597         version += 1;
3598         unsafe_put_user(version, &st->version, out);
3599
3600         smp_wmb();
3601
3602         unsafe_get_user(steal, &st->steal, out);
3603         steal += current->sched_info.run_delay -
3604                 vcpu->arch.st.last_steal;
3605         vcpu->arch.st.last_steal = current->sched_info.run_delay;
3606         unsafe_put_user(steal, &st->steal, out);
3607
3608         version += 1;
3609         unsafe_put_user(version, &st->version, out);
3610
3611  out:
3612         user_access_end();
3613  dirty:
3614         mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
3615 }
3616
3617 static bool kvm_is_msr_to_save(u32 msr_index)
3618 {
3619         unsigned int i;
3620
3621         for (i = 0; i < num_msrs_to_save; i++) {
3622                 if (msrs_to_save[i] == msr_index)
3623                         return true;
3624         }
3625
3626         return false;
3627 }
3628
3629 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
3630 {
3631         u32 msr = msr_info->index;
3632         u64 data = msr_info->data;
3633
3634         if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr)
3635                 return kvm_xen_write_hypercall_page(vcpu, data);
3636
3637         switch (msr) {
3638         case MSR_AMD64_NB_CFG:
3639         case MSR_IA32_UCODE_WRITE:
3640         case MSR_VM_HSAVE_PA:
3641         case MSR_AMD64_PATCH_LOADER:
3642         case MSR_AMD64_BU_CFG2:
3643         case MSR_AMD64_DC_CFG:
3644         case MSR_F15H_EX_CFG:
3645                 break;
3646
3647         case MSR_IA32_UCODE_REV:
3648                 if (msr_info->host_initiated)
3649                         vcpu->arch.microcode_version = data;
3650                 break;
3651         case MSR_IA32_ARCH_CAPABILITIES:
3652                 if (!msr_info->host_initiated)
3653                         return 1;
3654                 vcpu->arch.arch_capabilities = data;
3655                 break;
3656         case MSR_IA32_PERF_CAPABILITIES:
3657                 if (!msr_info->host_initiated)
3658                         return 1;
3659                 if (data & ~kvm_caps.supported_perf_cap)
3660                         return 1;
3661
3662                 /*
3663                  * Note, this is not just a performance optimization!  KVM
3664                  * disallows changing feature MSRs after the vCPU has run; PMU
3665                  * refresh will bug the VM if called after the vCPU has run.
3666                  */
3667                 if (vcpu->arch.perf_capabilities == data)
3668                         break;
3669
3670                 vcpu->arch.perf_capabilities = data;
3671                 kvm_pmu_refresh(vcpu);
3672                 break;
3673         case MSR_IA32_PRED_CMD:
3674                 if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
3675                         return 1;
3676
3677                 if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
3678                         return 1;
3679                 if (!data)
3680                         break;
3681
3682                 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
3683                 break;
3684         case MSR_IA32_FLUSH_CMD:
3685                 if (!msr_info->host_initiated &&
3686                     !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D))
3687                         return 1;
3688
3689                 if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D) || (data & ~L1D_FLUSH))
3690                         return 1;
3691                 if (!data)
3692                         break;
3693
3694                 wrmsrl(MSR_IA32_FLUSH_CMD, L1D_FLUSH);
3695                 break;
3696         case MSR_EFER:
3697                 return set_efer(vcpu, msr_info);
3698         case MSR_K7_HWCR:
3699                 data &= ~(u64)0x40;     /* ignore flush filter disable */
3700                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
3701                 data &= ~(u64)0x8;      /* ignore TLB cache disable */
3702
3703                 /* Handle McStatusWrEn */
3704                 if (data == BIT_ULL(18)) {
3705                         vcpu->arch.msr_hwcr = data;
3706                 } else if (data != 0) {
3707                         kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3708                         return 1;
3709                 }
3710                 break;
3711         case MSR_FAM10H_MMIO_CONF_BASE:
3712                 if (data != 0) {
3713                         kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3714                         return 1;
3715                 }
3716                 break;
3717         case MSR_IA32_CR_PAT:
3718                 if (!kvm_pat_valid(data))
3719                         return 1;
3720
3721                 vcpu->arch.pat = data;
3722                 break;
3723         case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
3724         case MSR_MTRRdefType:
3725                 return kvm_mtrr_set_msr(vcpu, msr, data);
3726         case MSR_IA32_APICBASE:
3727                 return kvm_set_apic_base(vcpu, msr_info);
3728         case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
3729                 return kvm_x2apic_msr_write(vcpu, msr, data);
3730         case MSR_IA32_TSC_DEADLINE:
3731                 kvm_set_lapic_tscdeadline_msr(vcpu, data);
3732                 break;
3733         case MSR_IA32_TSC_ADJUST:
3734                 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) {
3735                         if (!msr_info->host_initiated) {
3736                                 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
3737                                 adjust_tsc_offset_guest(vcpu, adj);
3738                                 /* Before back to guest, tsc_timestamp must be adjusted
3739                                  * as well, otherwise guest's percpu pvclock time could jump.
3740                                  */
3741                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
3742                         }
3743                         vcpu->arch.ia32_tsc_adjust_msr = data;
3744                 }
3745                 break;
3746         case MSR_IA32_MISC_ENABLE: {
3747                 u64 old_val = vcpu->arch.ia32_misc_enable_msr;
3748
3749                 if (!msr_info->host_initiated) {
3750                         /* RO bits */
3751                         if ((old_val ^ data) & MSR_IA32_MISC_ENABLE_PMU_RO_MASK)
3752                                 return 1;
3753
3754                         /* R bits, i.e. writes are ignored, but don't fault. */
3755                         data = data & ~MSR_IA32_MISC_ENABLE_EMON;
3756                         data |= old_val & MSR_IA32_MISC_ENABLE_EMON;
3757                 }
3758
3759                 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) &&
3760                     ((old_val ^ data)  & MSR_IA32_MISC_ENABLE_MWAIT)) {
3761                         if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
3762                                 return 1;
3763                         vcpu->arch.ia32_misc_enable_msr = data;
3764                         kvm_update_cpuid_runtime(vcpu);
3765                 } else {
3766                         vcpu->arch.ia32_misc_enable_msr = data;
3767                 }
3768                 break;
3769         }
3770         case MSR_IA32_SMBASE:
3771                 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
3772                         return 1;
3773                 vcpu->arch.smbase = data;
3774                 break;
3775         case MSR_IA32_POWER_CTL:
3776                 vcpu->arch.msr_ia32_power_ctl = data;
3777                 break;
3778         case MSR_IA32_TSC:
3779                 if (msr_info->host_initiated) {
3780                         kvm_synchronize_tsc(vcpu, data);
3781                 } else {
3782                         u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset;
3783                         adjust_tsc_offset_guest(vcpu, adj);
3784                         vcpu->arch.ia32_tsc_adjust_msr += adj;
3785                 }
3786                 break;
3787         case MSR_IA32_XSS:
3788                 if (!msr_info->host_initiated &&
3789                     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
3790                         return 1;
3791                 /*
3792                  * KVM supports exposing PT to the guest, but does not support
3793                  * IA32_XSS[bit 8]. Guests have to use RDMSR/WRMSR rather than
3794                  * XSAVES/XRSTORS to save/restore PT MSRs.
3795                  */
3796                 if (data & ~kvm_caps.supported_xss)
3797                         return 1;
3798                 vcpu->arch.ia32_xss = data;
3799                 kvm_update_cpuid_runtime(vcpu);
3800                 break;
3801         case MSR_SMI_COUNT:
3802                 if (!msr_info->host_initiated)
3803                         return 1;
3804                 vcpu->arch.smi_count = data;
3805                 break;
3806         case MSR_KVM_WALL_CLOCK_NEW:
3807                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3808                         return 1;
3809
3810                 vcpu->kvm->arch.wall_clock = data;
3811                 kvm_write_wall_clock(vcpu->kvm, data, 0);
3812                 break;
3813         case MSR_KVM_WALL_CLOCK:
3814                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3815                         return 1;
3816
3817                 vcpu->kvm->arch.wall_clock = data;
3818                 kvm_write_wall_clock(vcpu->kvm, data, 0);
3819                 break;
3820         case MSR_KVM_SYSTEM_TIME_NEW:
3821                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
3822                         return 1;
3823
3824                 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated);
3825                 break;
3826         case MSR_KVM_SYSTEM_TIME:
3827                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
3828                         return 1;
3829
3830                 kvm_write_system_time(vcpu, data, true,  msr_info->host_initiated);
3831                 break;
3832         case MSR_KVM_ASYNC_PF_EN:
3833                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
3834                         return 1;
3835
3836                 if (kvm_pv_enable_async_pf(vcpu, data))
3837                         return 1;
3838                 break;
3839         case MSR_KVM_ASYNC_PF_INT:
3840                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3841                         return 1;
3842
3843                 if (kvm_pv_enable_async_pf_int(vcpu, data))
3844                         return 1;
3845                 break;
3846         case MSR_KVM_ASYNC_PF_ACK:
3847                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
3848                         return 1;
3849                 if (data & 0x1) {
3850                         vcpu->arch.apf.pageready_pending = false;
3851                         kvm_check_async_pf_completion(vcpu);
3852                 }
3853                 break;
3854         case MSR_KVM_STEAL_TIME:
3855                 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
3856                         return 1;
3857
3858                 if (unlikely(!sched_info_on()))
3859                         return 1;
3860
3861                 if (data & KVM_STEAL_RESERVED_MASK)
3862                         return 1;
3863
3864                 vcpu->arch.st.msr_val = data;
3865
3866                 if (!(data & KVM_MSR_ENABLED))
3867                         break;
3868
3869                 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
3870
3871                 break;
3872         case MSR_KVM_PV_EOI_EN:
3873                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
3874                         return 1;
3875
3876                 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8)))
3877                         return 1;
3878                 break;
3879
3880         case MSR_KVM_POLL_CONTROL:
3881                 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
3882                         return 1;
3883
3884                 /* only enable bit supported */
3885                 if (data & (-1ULL << 1))
3886                         return 1;
3887
3888                 vcpu->arch.msr_kvm_poll_control = data;
3889                 break;
3890
3891         case MSR_IA32_MCG_CTL:
3892         case MSR_IA32_MCG_STATUS:
3893         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
3894         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
3895                 return set_msr_mce(vcpu, msr_info);
3896
3897         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
3898         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
3899         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
3900         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
3901                 if (kvm_pmu_is_valid_msr(vcpu, msr))
3902                         return kvm_pmu_set_msr(vcpu, msr_info);
3903
3904                 if (data)
3905                         kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3906                 break;
3907         case MSR_K7_CLK_CTL:
3908                 /*
3909                  * Ignore all writes to this no longer documented MSR.
3910                  * Writes are only relevant for old K7 processors,
3911                  * all pre-dating SVM, but a recommended workaround from
3912                  * AMD for these chips. It is possible to specify the
3913                  * affected processor models on the command line, hence
3914                  * the need to ignore the workaround.
3915                  */
3916                 break;
3917         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
3918         case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
3919         case HV_X64_MSR_SYNDBG_OPTIONS:
3920         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
3921         case HV_X64_MSR_CRASH_CTL:
3922         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
3923         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
3924         case HV_X64_MSR_TSC_EMULATION_CONTROL:
3925         case HV_X64_MSR_TSC_EMULATION_STATUS:
3926         case HV_X64_MSR_TSC_INVARIANT_CONTROL:
3927                 return kvm_hv_set_msr_common(vcpu, msr, data,
3928                                              msr_info->host_initiated);
3929         case MSR_IA32_BBL_CR_CTL3:
3930                 /* Drop writes to this legacy MSR -- see rdmsr
3931                  * counterpart for further detail.
3932                  */
3933                 kvm_pr_unimpl_wrmsr(vcpu, msr, data);
3934                 break;
3935         case MSR_AMD64_OSVW_ID_LENGTH:
3936                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3937                         return 1;
3938                 vcpu->arch.osvw.length = data;
3939                 break;
3940         case MSR_AMD64_OSVW_STATUS:
3941                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
3942                         return 1;
3943                 vcpu->arch.osvw.status = data;
3944                 break;
3945         case MSR_PLATFORM_INFO:
3946                 if (!msr_info->host_initiated ||
3947                     (!(data & MSR_PLATFORM_INFO_CPUID_FAULT) &&
3948                      cpuid_fault_enabled(vcpu)))
3949                         return 1;
3950                 vcpu->arch.msr_platform_info = data;
3951                 break;
3952         case MSR_MISC_FEATURES_ENABLES:
3953                 if (data & ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT ||
3954                     (data & MSR_MISC_FEATURES_ENABLES_CPUID_FAULT &&
3955                      !supports_cpuid_fault(vcpu)))
3956                         return 1;
3957                 vcpu->arch.msr_misc_features_enables = data;
3958                 break;
3959 #ifdef CONFIG_X86_64
3960         case MSR_IA32_XFD:
3961                 if (!msr_info->host_initiated &&
3962                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
3963                         return 1;
3964
3965                 if (data & ~kvm_guest_supported_xfd(vcpu))
3966                         return 1;
3967
3968                 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
3969                 break;
3970         case MSR_IA32_XFD_ERR:
3971                 if (!msr_info->host_initiated &&
3972                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
3973                         return 1;
3974
3975                 if (data & ~kvm_guest_supported_xfd(vcpu))
3976                         return 1;
3977
3978                 vcpu->arch.guest_fpu.xfd_err = data;
3979                 break;
3980 #endif
3981         default:
3982                 if (kvm_pmu_is_valid_msr(vcpu, msr))
3983                         return kvm_pmu_set_msr(vcpu, msr_info);
3984
3985                 /*
3986                  * Userspace is allowed to write '0' to MSRs that KVM reports
3987                  * as to-be-saved, even if an MSRs isn't fully supported.
3988                  */
3989                 if (msr_info->host_initiated && !data &&
3990                     kvm_is_msr_to_save(msr))
3991                         break;
3992
3993                 return KVM_MSR_RET_INVALID;
3994         }
3995         return 0;
3996 }
3997 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
3998
3999 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
4000 {
4001         u64 data;
4002         u64 mcg_cap = vcpu->arch.mcg_cap;
4003         unsigned bank_num = mcg_cap & 0xff;
4004         u32 offset, last_msr;
4005
4006         switch (msr) {
4007         case MSR_IA32_P5_MC_ADDR:
4008         case MSR_IA32_P5_MC_TYPE:
4009                 data = 0;
4010                 break;
4011         case MSR_IA32_MCG_CAP:
4012                 data = vcpu->arch.mcg_cap;
4013                 break;
4014         case MSR_IA32_MCG_CTL:
4015                 if (!(mcg_cap & MCG_CTL_P) && !host)
4016                         return 1;
4017                 data = vcpu->arch.mcg_ctl;
4018                 break;
4019         case MSR_IA32_MCG_STATUS:
4020                 data = vcpu->arch.mcg_status;
4021                 break;
4022         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4023                 last_msr = MSR_IA32_MCx_CTL2(bank_num) - 1;
4024                 if (msr > last_msr)
4025                         return 1;
4026
4027                 if (!(mcg_cap & MCG_CMCI_P) && !host)
4028                         return 1;
4029                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL2,
4030                                             last_msr + 1 - MSR_IA32_MC0_CTL2);
4031                 data = vcpu->arch.mci_ctl2_banks[offset];
4032                 break;
4033         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4034                 last_msr = MSR_IA32_MCx_CTL(bank_num) - 1;
4035                 if (msr > last_msr)
4036                         return 1;
4037
4038                 offset = array_index_nospec(msr - MSR_IA32_MC0_CTL,
4039                                             last_msr + 1 - MSR_IA32_MC0_CTL);
4040                 data = vcpu->arch.mce_banks[offset];
4041                 break;
4042         default:
4043                 return 1;
4044         }
4045         *pdata = data;
4046         return 0;
4047 }
4048
4049 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
4050 {
4051         switch (msr_info->index) {
4052         case MSR_IA32_PLATFORM_ID:
4053         case MSR_IA32_EBL_CR_POWERON:
4054         case MSR_IA32_LASTBRANCHFROMIP:
4055         case MSR_IA32_LASTBRANCHTOIP:
4056         case MSR_IA32_LASTINTFROMIP:
4057         case MSR_IA32_LASTINTTOIP:
4058         case MSR_AMD64_SYSCFG:
4059         case MSR_K8_TSEG_ADDR:
4060         case MSR_K8_TSEG_MASK:
4061         case MSR_VM_HSAVE_PA:
4062         case MSR_K8_INT_PENDING_MSG:
4063         case MSR_AMD64_NB_CFG:
4064         case MSR_FAM10H_MMIO_CONF_BASE:
4065         case MSR_AMD64_BU_CFG2:
4066         case MSR_IA32_PERF_CTL:
4067         case MSR_AMD64_DC_CFG:
4068         case MSR_F15H_EX_CFG:
4069         /*
4070          * Intel Sandy Bridge CPUs must support the RAPL (running average power
4071          * limit) MSRs. Just return 0, as we do not want to expose the host
4072          * data here. Do not conditionalize this on CPUID, as KVM does not do
4073          * so for existing CPU-specific MSRs.
4074          */
4075         case MSR_RAPL_POWER_UNIT:
4076         case MSR_PP0_ENERGY_STATUS:     /* Power plane 0 (core) */
4077         case MSR_PP1_ENERGY_STATUS:     /* Power plane 1 (graphics uncore) */
4078         case MSR_PKG_ENERGY_STATUS:     /* Total package */
4079         case MSR_DRAM_ENERGY_STATUS:    /* DRAM controller */
4080                 msr_info->data = 0;
4081                 break;
4082         case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
4083         case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
4084         case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR1:
4085         case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL1:
4086                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4087                         return kvm_pmu_get_msr(vcpu, msr_info);
4088                 msr_info->data = 0;
4089                 break;
4090         case MSR_IA32_UCODE_REV:
4091                 msr_info->data = vcpu->arch.microcode_version;
4092                 break;
4093         case MSR_IA32_ARCH_CAPABILITIES:
4094                 if (!msr_info->host_initiated &&
4095                     !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
4096                         return 1;
4097                 msr_info->data = vcpu->arch.arch_capabilities;
4098                 break;
4099         case MSR_IA32_PERF_CAPABILITIES:
4100                 if (!msr_info->host_initiated &&
4101                     !guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
4102                         return 1;
4103                 msr_info->data = vcpu->arch.perf_capabilities;
4104                 break;
4105         case MSR_IA32_POWER_CTL:
4106                 msr_info->data = vcpu->arch.msr_ia32_power_ctl;
4107                 break;
4108         case MSR_IA32_TSC: {
4109                 /*
4110                  * Intel SDM states that MSR_IA32_TSC read adds the TSC offset
4111                  * even when not intercepted. AMD manual doesn't explicitly
4112                  * state this but appears to behave the same.
4113                  *
4114                  * On userspace reads and writes, however, we unconditionally
4115                  * return L1's TSC value to ensure backwards-compatible
4116                  * behavior for migration.
4117                  */
4118                 u64 offset, ratio;
4119
4120                 if (msr_info->host_initiated) {
4121                         offset = vcpu->arch.l1_tsc_offset;
4122                         ratio = vcpu->arch.l1_tsc_scaling_ratio;
4123                 } else {
4124                         offset = vcpu->arch.tsc_offset;
4125                         ratio = vcpu->arch.tsc_scaling_ratio;
4126                 }
4127
4128                 msr_info->data = kvm_scale_tsc(rdtsc(), ratio) + offset;
4129                 break;
4130         }
4131         case MSR_IA32_CR_PAT:
4132                 msr_info->data = vcpu->arch.pat;
4133                 break;
4134         case MSR_MTRRcap:
4135         case MTRRphysBase_MSR(0) ... MSR_MTRRfix4K_F8000:
4136         case MSR_MTRRdefType:
4137                 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4138         case 0xcd: /* fsb frequency */
4139                 msr_info->data = 3;
4140                 break;
4141                 /*
4142                  * MSR_EBC_FREQUENCY_ID
4143                  * Conservative value valid for even the basic CPU models.
4144                  * Models 0,1: 000 in bits 23:21 indicating a bus speed of
4145                  * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
4146                  * and 266MHz for model 3, or 4. Set Core Clock
4147                  * Frequency to System Bus Frequency Ratio to 1 (bits
4148                  * 31:24) even though these are only valid for CPU
4149                  * models > 2, however guests may end up dividing or
4150                  * multiplying by zero otherwise.
4151                  */
4152         case MSR_EBC_FREQUENCY_ID:
4153                 msr_info->data = 1 << 24;
4154                 break;
4155         case MSR_IA32_APICBASE:
4156                 msr_info->data = kvm_get_apic_base(vcpu);
4157                 break;
4158         case APIC_BASE_MSR ... APIC_BASE_MSR + 0xff:
4159                 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4160         case MSR_IA32_TSC_DEADLINE:
4161                 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu);
4162                 break;
4163         case MSR_IA32_TSC_ADJUST:
4164                 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
4165                 break;
4166         case MSR_IA32_MISC_ENABLE:
4167                 msr_info->data = vcpu->arch.ia32_misc_enable_msr;
4168                 break;
4169         case MSR_IA32_SMBASE:
4170                 if (!IS_ENABLED(CONFIG_KVM_SMM) || !msr_info->host_initiated)
4171                         return 1;
4172                 msr_info->data = vcpu->arch.smbase;
4173                 break;
4174         case MSR_SMI_COUNT:
4175                 msr_info->data = vcpu->arch.smi_count;
4176                 break;
4177         case MSR_IA32_PERF_STATUS:
4178                 /* TSC increment by tick */
4179                 msr_info->data = 1000ULL;
4180                 /* CPU multiplier */
4181                 msr_info->data |= (((uint64_t)4ULL) << 40);
4182                 break;
4183         case MSR_EFER:
4184                 msr_info->data = vcpu->arch.efer;
4185                 break;
4186         case MSR_KVM_WALL_CLOCK:
4187                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4188                         return 1;
4189
4190                 msr_info->data = vcpu->kvm->arch.wall_clock;
4191                 break;
4192         case MSR_KVM_WALL_CLOCK_NEW:
4193                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4194                         return 1;
4195
4196                 msr_info->data = vcpu->kvm->arch.wall_clock;
4197                 break;
4198         case MSR_KVM_SYSTEM_TIME:
4199                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE))
4200                         return 1;
4201
4202                 msr_info->data = vcpu->arch.time;
4203                 break;
4204         case MSR_KVM_SYSTEM_TIME_NEW:
4205                 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2))
4206                         return 1;
4207
4208                 msr_info->data = vcpu->arch.time;
4209                 break;
4210         case MSR_KVM_ASYNC_PF_EN:
4211                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF))
4212                         return 1;
4213
4214                 msr_info->data = vcpu->arch.apf.msr_en_val;
4215                 break;
4216         case MSR_KVM_ASYNC_PF_INT:
4217                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4218                         return 1;
4219
4220                 msr_info->data = vcpu->arch.apf.msr_int_val;
4221                 break;
4222         case MSR_KVM_ASYNC_PF_ACK:
4223                 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT))
4224                         return 1;
4225
4226                 msr_info->data = 0;
4227                 break;
4228         case MSR_KVM_STEAL_TIME:
4229                 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME))
4230                         return 1;
4231
4232                 msr_info->data = vcpu->arch.st.msr_val;
4233                 break;
4234         case MSR_KVM_PV_EOI_EN:
4235                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI))
4236                         return 1;
4237
4238                 msr_info->data = vcpu->arch.pv_eoi.msr_val;
4239                 break;
4240         case MSR_KVM_POLL_CONTROL:
4241                 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL))
4242                         return 1;
4243
4244                 msr_info->data = vcpu->arch.msr_kvm_poll_control;
4245                 break;
4246         case MSR_IA32_P5_MC_ADDR:
4247         case MSR_IA32_P5_MC_TYPE:
4248         case MSR_IA32_MCG_CAP:
4249         case MSR_IA32_MCG_CTL:
4250         case MSR_IA32_MCG_STATUS:
4251         case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1:
4252         case MSR_IA32_MC0_CTL2 ... MSR_IA32_MCx_CTL2(KVM_MAX_MCE_BANKS) - 1:
4253                 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4254                                    msr_info->host_initiated);
4255         case MSR_IA32_XSS:
4256                 if (!msr_info->host_initiated &&
4257                     !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
4258                         return 1;
4259                 msr_info->data = vcpu->arch.ia32_xss;
4260                 break;
4261         case MSR_K7_CLK_CTL:
4262                 /*
4263                  * Provide expected ramp-up count for K7. All other
4264                  * are set to zero, indicating minimum divisors for
4265                  * every field.
4266                  *
4267                  * This prevents guest kernels on AMD host with CPU
4268                  * type 6, model 8 and higher from exploding due to
4269                  * the rdmsr failing.
4270                  */
4271                 msr_info->data = 0x20000000;
4272                 break;
4273         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
4274         case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
4275         case HV_X64_MSR_SYNDBG_OPTIONS:
4276         case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
4277         case HV_X64_MSR_CRASH_CTL:
4278         case HV_X64_MSR_STIMER0_CONFIG ... HV_X64_MSR_STIMER3_COUNT:
4279         case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
4280         case HV_X64_MSR_TSC_EMULATION_CONTROL:
4281         case HV_X64_MSR_TSC_EMULATION_STATUS:
4282         case HV_X64_MSR_TSC_INVARIANT_CONTROL:
4283                 return kvm_hv_get_msr_common(vcpu,
4284                                              msr_info->index, &msr_info->data,
4285                                              msr_info->host_initiated);
4286         case MSR_IA32_BBL_CR_CTL3:
4287                 /* This legacy MSR exists but isn't fully documented in current
4288                  * silicon.  It is however accessed by winxp in very narrow
4289                  * scenarios where it sets bit #19, itself documented as
4290                  * a "reserved" bit.  Best effort attempt to source coherent
4291                  * read data here should the balance of the register be
4292                  * interpreted by the guest:
4293                  *
4294                  * L2 cache control register 3: 64GB range, 256KB size,
4295                  * enabled, latency 0x1, configured
4296                  */
4297                 msr_info->data = 0xbe702111;
4298                 break;
4299         case MSR_AMD64_OSVW_ID_LENGTH:
4300                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
4301                         return 1;
4302                 msr_info->data = vcpu->arch.osvw.length;
4303                 break;
4304         case MSR_AMD64_OSVW_STATUS:
4305                 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
4306                         return 1;
4307                 msr_info->data = vcpu->arch.osvw.status;
4308                 break;
4309         case MSR_PLATFORM_INFO:
4310                 if (!msr_info->host_initiated &&
4311                     !vcpu->kvm->arch.guest_can_read_msr_platform_info)
4312                         return 1;
4313                 msr_info->data = vcpu->arch.msr_platform_info;
4314                 break;
4315         case MSR_MISC_FEATURES_ENABLES:
4316                 msr_info->data = vcpu->arch.msr_misc_features_enables;
4317                 break;
4318         case MSR_K7_HWCR:
4319                 msr_info->data = vcpu->arch.msr_hwcr;
4320                 break;
4321 #ifdef CONFIG_X86_64
4322         case MSR_IA32_XFD:
4323                 if (!msr_info->host_initiated &&
4324                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
4325                         return 1;
4326
4327                 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
4328                 break;
4329         case MSR_IA32_XFD_ERR:
4330                 if (!msr_info->host_initiated &&
4331                     !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
4332                         return 1;
4333
4334                 msr_info->data = vcpu->arch.guest_fpu.xfd_err;
4335                 break;
4336 #endif
4337         default:
4338                 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4339                         return kvm_pmu_get_msr(vcpu, msr_info);
4340
4341                 /*
4342                  * Userspace is allowed to read MSRs that KVM reports as
4343                  * to-be-saved, even if an MSR isn't fully supported.
4344                  */
4345                 if (msr_info->host_initiated &&
4346                     kvm_is_msr_to_save(msr_info->index)) {
4347                         msr_info->data = 0;
4348                         break;
4349                 }
4350
4351                 return KVM_MSR_RET_INVALID;
4352         }
4353         return 0;
4354 }
4355 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
4356
4357 /*
4358  * Read or write a bunch of msrs. All parameters are kernel addresses.
4359  *
4360  * @return number of msrs set successfully.
4361  */
4362 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
4363                     struct kvm_msr_entry *entries,
4364                     int (*do_msr)(struct kvm_vcpu *vcpu,
4365                                   unsigned index, u64 *data))
4366 {
4367         int i;
4368
4369         for (i = 0; i < msrs->nmsrs; ++i)
4370                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4371                         break;
4372
4373         return i;
4374 }
4375
4376 /*
4377  * Read or write a bunch of msrs. Parameters are user addresses.
4378  *
4379  * @return number of msrs set successfully.
4380  */
4381 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
4382                   int (*do_msr)(struct kvm_vcpu *vcpu,
4383                                 unsigned index, u64 *data),
4384                   int writeback)
4385 {
4386         struct kvm_msrs msrs;
4387         struct kvm_msr_entry *entries;
4388         unsigned size;
4389         int r;
4390
4391         r = -EFAULT;
4392         if (copy_from_user(&msrs, user_msrs, sizeof(msrs)))
4393                 goto out;
4394
4395         r = -E2BIG;
4396         if (msrs.nmsrs >= MAX_IO_MSRS)
4397                 goto out;
4398
4399         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
4400         entries = memdup_user(user_msrs->entries, size);
4401         if (IS_ERR(entries)) {
4402                 r = PTR_ERR(entries);
4403                 goto out;
4404         }
4405
4406         r = __msr_io(vcpu, &msrs, entries, do_msr);
4407
4408         if (writeback && copy_to_user(user_msrs->entries, entries, size))
4409                 r = -EFAULT;
4410
4411         kfree(entries);
4412 out:
4413         return r;
4414 }
4415
4416 static inline bool kvm_can_mwait_in_guest(void)
4417 {
4418         return boot_cpu_has(X86_FEATURE_MWAIT) &&
4419                 !boot_cpu_has_bug(X86_BUG_MONITOR) &&
4420                 boot_cpu_has(X86_FEATURE_ARAT);
4421 }
4422
4423 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu,
4424                                             struct kvm_cpuid2 __user *cpuid_arg)
4425 {
4426         struct kvm_cpuid2 cpuid;
4427         int r;
4428
4429         r = -EFAULT;
4430         if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4431                 return r;
4432
4433         r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries);
4434         if (r)
4435                 return r;
4436
4437         r = -EFAULT;
4438         if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4439                 return r;
4440
4441         return 0;
4442 }
4443
4444 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
4445 {
4446         int r = 0;
4447
4448         switch (ext) {
4449         case KVM_CAP_IRQCHIP:
4450         case KVM_CAP_HLT:
4451         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
4452         case KVM_CAP_SET_TSS_ADDR:
4453         case KVM_CAP_EXT_CPUID:
4454         case KVM_CAP_EXT_EMUL_CPUID:
4455         case KVM_CAP_CLOCKSOURCE:
4456         case KVM_CAP_PIT:
4457         case KVM_CAP_NOP_IO_DELAY:
4458         case KVM_CAP_MP_STATE:
4459         case KVM_CAP_SYNC_MMU:
4460         case KVM_CAP_USER_NMI:
4461         case KVM_CAP_REINJECT_CONTROL:
4462         case KVM_CAP_IRQ_INJECT_STATUS:
4463         case KVM_CAP_IOEVENTFD:
4464         case KVM_CAP_IOEVENTFD_NO_LENGTH:
4465         case KVM_CAP_PIT2:
4466         case KVM_CAP_PIT_STATE2:
4467         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
4468         case KVM_CAP_VCPU_EVENTS:
4469         case KVM_CAP_HYPERV:
4470         case KVM_CAP_HYPERV_VAPIC:
4471         case KVM_CAP_HYPERV_SPIN:
4472         case KVM_CAP_HYPERV_SYNIC:
4473         case KVM_CAP_HYPERV_SYNIC2:
4474         case KVM_CAP_HYPERV_VP_INDEX:
4475         case KVM_CAP_HYPERV_EVENTFD:
4476         case KVM_CAP_HYPERV_TLBFLUSH:
4477         case KVM_CAP_HYPERV_SEND_IPI:
4478         case KVM_CAP_HYPERV_CPUID:
4479         case KVM_CAP_HYPERV_ENFORCE_CPUID:
4480         case KVM_CAP_SYS_HYPERV_CPUID:
4481         case KVM_CAP_PCI_SEGMENT:
4482         case KVM_CAP_DEBUGREGS:
4483         case KVM_CAP_X86_ROBUST_SINGLESTEP:
4484         case KVM_CAP_XSAVE:
4485         case KVM_CAP_ASYNC_PF:
4486         case KVM_CAP_ASYNC_PF_INT:
4487         case KVM_CAP_GET_TSC_KHZ:
4488         case KVM_CAP_KVMCLOCK_CTRL:
4489         case KVM_CAP_READONLY_MEM:
4490         case KVM_CAP_HYPERV_TIME:
4491         case KVM_CAP_IOAPIC_POLARITY_IGNORED:
4492         case KVM_CAP_TSC_DEADLINE_TIMER:
4493         case KVM_CAP_DISABLE_QUIRKS:
4494         case KVM_CAP_SET_BOOT_CPU_ID:
4495         case KVM_CAP_SPLIT_IRQCHIP:
4496         case KVM_CAP_IMMEDIATE_EXIT:
4497         case KVM_CAP_PMU_EVENT_FILTER:
4498         case KVM_CAP_PMU_EVENT_MASKED_EVENTS:
4499         case KVM_CAP_GET_MSR_FEATURES:
4500         case KVM_CAP_MSR_PLATFORM_INFO:
4501         case KVM_CAP_EXCEPTION_PAYLOAD:
4502         case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
4503         case KVM_CAP_SET_GUEST_DEBUG:
4504         case KVM_CAP_LAST_CPU:
4505         case KVM_CAP_X86_USER_SPACE_MSR:
4506         case KVM_CAP_X86_MSR_FILTER:
4507         case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
4508 #ifdef CONFIG_X86_SGX_KVM
4509         case KVM_CAP_SGX_ATTRIBUTE:
4510 #endif
4511         case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
4512         case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
4513         case KVM_CAP_SREGS2:
4514         case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
4515         case KVM_CAP_VCPU_ATTRIBUTES:
4516         case KVM_CAP_SYS_ATTRIBUTES:
4517         case KVM_CAP_VAPIC:
4518         case KVM_CAP_ENABLE_CAP:
4519         case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
4520         case KVM_CAP_IRQFD_RESAMPLE:
4521                 r = 1;
4522                 break;
4523         case KVM_CAP_EXIT_HYPERCALL:
4524                 r = KVM_EXIT_HYPERCALL_VALID_MASK;
4525                 break;
4526         case KVM_CAP_SET_GUEST_DEBUG2:
4527                 return KVM_GUESTDBG_VALID_MASK;
4528 #ifdef CONFIG_KVM_XEN
4529         case KVM_CAP_XEN_HVM:
4530                 r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
4531                     KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
4532                     KVM_XEN_HVM_CONFIG_SHARED_INFO |
4533                     KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL |
4534                     KVM_XEN_HVM_CONFIG_EVTCHN_SEND;
4535                 if (sched_info_on())
4536                         r |= KVM_XEN_HVM_CONFIG_RUNSTATE |
4537                              KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG;
4538                 break;
4539 #endif
4540         case KVM_CAP_SYNC_REGS:
4541                 r = KVM_SYNC_X86_VALID_FIELDS;
4542                 break;
4543         case KVM_CAP_ADJUST_CLOCK:
4544                 r = KVM_CLOCK_VALID_FLAGS;
4545                 break;
4546         case KVM_CAP_X86_DISABLE_EXITS:
4547                 r = KVM_X86_DISABLE_EXITS_PAUSE;
4548
4549                 if (!mitigate_smt_rsb) {
4550                         r |= KVM_X86_DISABLE_EXITS_HLT |
4551                              KVM_X86_DISABLE_EXITS_CSTATE;
4552
4553                         if (kvm_can_mwait_in_guest())
4554                                 r |= KVM_X86_DISABLE_EXITS_MWAIT;
4555                 }
4556                 break;
4557         case KVM_CAP_X86_SMM:
4558                 if (!IS_ENABLED(CONFIG_KVM_SMM))
4559                         break;
4560
4561                 /* SMBASE is usually relocated above 1M on modern chipsets,
4562                  * and SMM handlers might indeed rely on 4G segment limits,
4563                  * so do not report SMM to be available if real mode is
4564                  * emulated via vm86 mode.  Still, do not go to great lengths
4565                  * to avoid userspace's usage of the feature, because it is a
4566                  * fringe case that is not enabled except via specific settings
4567                  * of the module parameters.
4568                  */
4569                 r = static_call(kvm_x86_has_emulated_msr)(kvm, MSR_IA32_SMBASE);
4570                 break;
4571         case KVM_CAP_NR_VCPUS:
4572                 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
4573                 break;
4574         case KVM_CAP_MAX_VCPUS:
4575                 r = KVM_MAX_VCPUS;
4576                 break;
4577         case KVM_CAP_MAX_VCPU_ID:
4578                 r = KVM_MAX_VCPU_IDS;
4579                 break;
4580         case KVM_CAP_PV_MMU:    /* obsolete */
4581                 r = 0;
4582                 break;
4583         case KVM_CAP_MCE:
4584                 r = KVM_MAX_MCE_BANKS;
4585                 break;
4586         case KVM_CAP_XCRS:
4587                 r = boot_cpu_has(X86_FEATURE_XSAVE);
4588                 break;
4589         case KVM_CAP_TSC_CONTROL:
4590         case KVM_CAP_VM_TSC_CONTROL:
4591                 r = kvm_caps.has_tsc_control;
4592                 break;
4593         case KVM_CAP_X2APIC_API:
4594                 r = KVM_X2APIC_API_VALID_FLAGS;
4595                 break;
4596         case KVM_CAP_NESTED_STATE:
4597                 r = kvm_x86_ops.nested_ops->get_state ?
4598                         kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0;
4599                 break;
4600         case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
4601                 r = kvm_x86_ops.enable_l2_tlb_flush != NULL;
4602                 break;
4603         case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
4604                 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL;
4605                 break;
4606         case KVM_CAP_SMALLER_MAXPHYADDR:
4607                 r = (int) allow_smaller_maxphyaddr;
4608                 break;
4609         case KVM_CAP_STEAL_TIME:
4610                 r = sched_info_on();
4611                 break;
4612         case KVM_CAP_X86_BUS_LOCK_EXIT:
4613                 if (kvm_caps.has_bus_lock_exit)
4614                         r = KVM_BUS_LOCK_DETECTION_OFF |
4615                             KVM_BUS_LOCK_DETECTION_EXIT;
4616                 else
4617                         r = 0;
4618                 break;
4619         case KVM_CAP_XSAVE2: {
4620                 r = xstate_required_size(kvm_get_filtered_xcr0(), false);
4621                 if (r < sizeof(struct kvm_xsave))
4622                         r = sizeof(struct kvm_xsave);
4623                 break;
4624         }
4625         case KVM_CAP_PMU_CAPABILITY:
4626                 r = enable_pmu ? KVM_CAP_PMU_VALID_MASK : 0;
4627                 break;
4628         case KVM_CAP_DISABLE_QUIRKS2:
4629                 r = KVM_X86_VALID_QUIRKS;
4630                 break;
4631         case KVM_CAP_X86_NOTIFY_VMEXIT:
4632                 r = kvm_caps.has_notify_vmexit;
4633                 break;
4634         default:
4635                 break;
4636         }
4637         return r;
4638 }
4639
4640 static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
4641 {
4642         void __user *uaddr = (void __user*)(unsigned long)attr->addr;
4643
4644         if ((u64)(unsigned long)uaddr != attr->addr)
4645                 return ERR_PTR_USR(-EFAULT);
4646         return uaddr;
4647 }
4648
4649 static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
4650 {
4651         u64 __user *uaddr = kvm_get_attr_addr(attr);
4652
4653         if (attr->group)
4654                 return -ENXIO;
4655
4656         if (IS_ERR(uaddr))
4657                 return PTR_ERR(uaddr);
4658
4659         switch (attr->attr) {
4660         case KVM_X86_XCOMP_GUEST_SUPP:
4661                 if (put_user(kvm_caps.supported_xcr0, uaddr))
4662                         return -EFAULT;
4663                 return 0;
4664         default:
4665                 return -ENXIO;
4666         }
4667 }
4668
4669 static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
4670 {
4671         if (attr->group)
4672                 return -ENXIO;
4673
4674         switch (attr->attr) {
4675         case KVM_X86_XCOMP_GUEST_SUPP:
4676                 return 0;
4677         default:
4678                 return -ENXIO;
4679         }
4680 }
4681
4682 long kvm_arch_dev_ioctl(struct file *filp,
4683                         unsigned int ioctl, unsigned long arg)
4684 {
4685         void __user *argp = (void __user *)arg;
4686         long r;
4687
4688         switch (ioctl) {
4689         case KVM_GET_MSR_INDEX_LIST: {
4690                 struct kvm_msr_list __user *user_msr_list = argp;
4691                 struct kvm_msr_list msr_list;
4692                 unsigned n;
4693
4694                 r = -EFAULT;
4695                 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4696                         goto out;
4697                 n = msr_list.nmsrs;
4698                 msr_list.nmsrs = num_msrs_to_save + num_emulated_msrs;
4699                 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4700                         goto out;
4701                 r = -E2BIG;
4702                 if (n < msr_list.nmsrs)
4703                         goto out;
4704                 r = -EFAULT;
4705                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
4706                                  num_msrs_to_save * sizeof(u32)))
4707                         goto out;
4708                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
4709                                  &emulated_msrs,
4710                                  num_emulated_msrs * sizeof(u32)))
4711                         goto out;
4712                 r = 0;
4713                 break;
4714         }
4715         case KVM_GET_SUPPORTED_CPUID:
4716         case KVM_GET_EMULATED_CPUID: {
4717                 struct kvm_cpuid2 __user *cpuid_arg = argp;
4718                 struct kvm_cpuid2 cpuid;
4719
4720                 r = -EFAULT;
4721                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
4722                         goto out;
4723
4724                 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries,
4725                                             ioctl);
4726                 if (r)
4727                         goto out;
4728
4729                 r = -EFAULT;
4730                 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
4731                         goto out;
4732                 r = 0;
4733                 break;
4734         }
4735         case KVM_X86_GET_MCE_CAP_SUPPORTED:
4736                 r = -EFAULT;
4737                 if (copy_to_user(argp, &kvm_caps.supported_mce_cap,
4738                                  sizeof(kvm_caps.supported_mce_cap)))
4739                         goto out;
4740                 r = 0;
4741                 break;
4742         case KVM_GET_MSR_FEATURE_INDEX_LIST: {
4743                 struct kvm_msr_list __user *user_msr_list = argp;
4744                 struct kvm_msr_list msr_list;
4745                 unsigned int n;
4746
4747                 r = -EFAULT;
4748                 if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list)))
4749                         goto out;
4750                 n = msr_list.nmsrs;
4751                 msr_list.nmsrs = num_msr_based_features;
4752                 if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list)))
4753                         goto out;
4754                 r = -E2BIG;
4755                 if (n < msr_list.nmsrs)
4756                         goto out;
4757                 r = -EFAULT;
4758                 if (copy_to_user(user_msr_list->indices, &msr_based_features,
4759                                  num_msr_based_features * sizeof(u32)))
4760                         goto out;
4761                 r = 0;
4762                 break;
4763         }
4764         case KVM_GET_MSRS:
4765                 r = msr_io(NULL, argp, do_get_msr_feature, 1);
4766                 break;
4767         case KVM_GET_SUPPORTED_HV_CPUID:
4768                 r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
4769                 break;
4770         case KVM_GET_DEVICE_ATTR: {
4771                 struct kvm_device_attr attr;
4772                 r = -EFAULT;
4773                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4774                         break;
4775                 r = kvm_x86_dev_get_attr(&attr);
4776                 break;
4777         }
4778         case KVM_HAS_DEVICE_ATTR: {
4779                 struct kvm_device_attr attr;
4780                 r = -EFAULT;
4781                 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4782                         break;
4783                 r = kvm_x86_dev_has_attr(&attr);
4784                 break;
4785         }
4786         default:
4787                 r = -EINVAL;
4788                 break;
4789         }
4790 out:
4791         return r;
4792 }
4793
4794 static void wbinvd_ipi(void *garbage)
4795 {
4796         wbinvd();
4797 }
4798
4799 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
4800 {
4801         return kvm_arch_has_noncoherent_dma(vcpu->kvm);
4802 }
4803
4804 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
4805 {
4806         /* Address WBINVD may be executed by guest */
4807         if (need_emulate_wbinvd(vcpu)) {
4808                 if (static_call(kvm_x86_has_wbinvd_exit)())
4809                         cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4810                 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
4811                         smp_call_function_single(vcpu->cpu,
4812                                         wbinvd_ipi, NULL, 1);
4813         }
4814
4815         static_call(kvm_x86_vcpu_load)(vcpu, cpu);
4816
4817         /* Save host pkru register if supported */
4818         vcpu->arch.host_pkru = read_pkru();
4819
4820         /* Apply any externally detected TSC adjustments (due to suspend) */
4821         if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
4822                 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
4823                 vcpu->arch.tsc_offset_adjustment = 0;
4824                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4825         }
4826
4827         if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) {
4828                 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
4829                                 rdtsc() - vcpu->arch.last_host_tsc;
4830                 if (tsc_delta < 0)
4831                         mark_tsc_unstable("KVM discovered backwards TSC");
4832
4833                 if (kvm_check_tsc_unstable()) {
4834                         u64 offset = kvm_compute_l1_tsc_offset(vcpu,
4835                                                 vcpu->arch.last_guest_tsc);
4836                         kvm_vcpu_write_tsc_offset(vcpu, offset);
4837                         vcpu->arch.tsc_catchup = 1;
4838                 }
4839
4840                 if (kvm_lapic_hv_timer_in_use(vcpu))
4841                         kvm_lapic_restart_hv_timer(vcpu);
4842
4843                 /*
4844                  * On a host with synchronized TSC, there is no need to update
4845                  * kvmclock on vcpu->cpu migration
4846                  */
4847                 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
4848                         kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
4849                 if (vcpu->cpu != cpu)
4850                         kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu);
4851                 vcpu->cpu = cpu;
4852         }
4853
4854         kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
4855 }
4856
4857 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
4858 {
4859         struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
4860         struct kvm_steal_time __user *st;
4861         struct kvm_memslots *slots;
4862         static const u8 preempted = KVM_VCPU_PREEMPTED;
4863         gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
4864
4865         /*
4866          * The vCPU can be marked preempted if and only if the VM-Exit was on
4867          * an instruction boundary and will not trigger guest emulation of any
4868          * kind (see vcpu_run).  Vendor specific code controls (conservatively)
4869          * when this is true, for example allowing the vCPU to be marked
4870          * preempted if and only if the VM-Exit was due to a host interrupt.
4871          */
4872         if (!vcpu->arch.at_instruction_boundary) {
4873                 vcpu->stat.preemption_other++;
4874                 return;
4875         }
4876
4877         vcpu->stat.preemption_reported++;
4878         if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
4879                 return;
4880
4881         if (vcpu->arch.st.preempted)
4882                 return;
4883
4884         /* This happens on process exit */
4885         if (unlikely(current->mm != vcpu->kvm->mm))
4886                 return;
4887
4888         slots = kvm_memslots(vcpu->kvm);
4889
4890         if (unlikely(slots->generation != ghc->generation ||
4891                      gpa != ghc->gpa ||
4892                      kvm_is_error_hva(ghc->hva) || !ghc->memslot))
4893                 return;
4894
4895         st = (struct kvm_steal_time __user *)ghc->hva;
4896         BUILD_BUG_ON(sizeof(st->preempted) != sizeof(preempted));
4897
4898         if (!copy_to_user_nofault(&st->preempted, &preempted, sizeof(preempted)))
4899                 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
4900
4901         mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa));
4902 }
4903
4904 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
4905 {
4906         int idx;
4907
4908         if (vcpu->preempted) {
4909                 if (!vcpu->arch.guest_state_protected)
4910                         vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu);
4911
4912                 /*
4913                  * Take the srcu lock as memslots will be accessed to check the gfn
4914                  * cache generation against the memslots generation.
4915                  */
4916                 idx = srcu_read_lock(&vcpu->kvm->srcu);
4917                 if (kvm_xen_msr_enabled(vcpu->kvm))
4918                         kvm_xen_runstate_set_preempted(vcpu);
4919                 else
4920                         kvm_steal_time_set_preempted(vcpu);
4921                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4922         }
4923
4924         static_call(kvm_x86_vcpu_put)(vcpu);
4925         vcpu->arch.last_host_tsc = rdtsc();
4926 }
4927
4928 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
4929                                     struct kvm_lapic_state *s)
4930 {
4931         static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
4932
4933         return kvm_apic_get_state(vcpu, s);
4934 }
4935
4936 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
4937                                     struct kvm_lapic_state *s)
4938 {
4939         int r;
4940
4941         r = kvm_apic_set_state(vcpu, s);
4942         if (r)
4943                 return r;
4944         update_cr8_intercept(vcpu);
4945
4946         return 0;
4947 }
4948
4949 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu)
4950 {
4951         /*
4952          * We can accept userspace's request for interrupt injection
4953          * as long as we have a place to store the interrupt number.
4954          * The actual injection will happen when the CPU is able to
4955          * deliver the interrupt.
4956          */
4957         if (kvm_cpu_has_extint(vcpu))
4958                 return false;
4959
4960         /* Acknowledging ExtINT does not happen if LINT0 is masked.  */
4961         return (!lapic_in_kernel(vcpu) ||
4962                 kvm_apic_accept_pic_intr(vcpu));
4963 }
4964
4965 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
4966 {
4967         /*
4968          * Do not cause an interrupt window exit if an exception
4969          * is pending or an event needs reinjection; userspace
4970          * might want to inject the interrupt manually using KVM_SET_REGS
4971          * or KVM_SET_SREGS.  For that to work, we must be at an
4972          * instruction boundary and with no events half-injected.
4973          */
4974         return (kvm_arch_interrupt_allowed(vcpu) &&
4975                 kvm_cpu_accept_dm_intr(vcpu) &&
4976                 !kvm_event_needs_reinjection(vcpu) &&
4977                 !kvm_is_exception_pending(vcpu));
4978 }
4979
4980 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
4981                                     struct kvm_interrupt *irq)
4982 {
4983         if (irq->irq >= KVM_NR_INTERRUPTS)
4984                 return -EINVAL;
4985
4986         if (!irqchip_in_kernel(vcpu->kvm)) {
4987                 kvm_queue_interrupt(vcpu, irq->irq, false);
4988                 kvm_make_request(KVM_REQ_EVENT, vcpu);
4989                 return 0;
4990         }
4991
4992         /*
4993          * With in-kernel LAPIC, we only use this to inject EXTINT, so
4994          * fail for in-kernel 8259.
4995          */
4996         if (pic_in_kernel(vcpu->kvm))
4997                 return -ENXIO;
4998
4999         if (vcpu->arch.pending_external_vector != -1)
5000                 return -EEXIST;
5001
5002         vcpu->arch.pending_external_vector = irq->irq;
5003         kvm_make_request(KVM_REQ_EVENT, vcpu);
5004         return 0;
5005 }
5006
5007 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
5008 {
5009         kvm_inject_nmi(vcpu);
5010
5011         return 0;
5012 }
5013
5014 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
5015                                            struct kvm_tpr_access_ctl *tac)
5016 {
5017         if (tac->flags)
5018                 return -EINVAL;
5019         vcpu->arch.tpr_access_reporting = !!tac->enabled;
5020         return 0;
5021 }
5022
5023 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
5024                                         u64 mcg_cap)
5025 {
5026         int r;
5027         unsigned bank_num = mcg_cap & 0xff, bank;
5028
5029         r = -EINVAL;
5030         if (!bank_num || bank_num > KVM_MAX_MCE_BANKS)
5031                 goto out;
5032         if (mcg_cap & ~(kvm_caps.supported_mce_cap | 0xff | 0xff0000))
5033                 goto out;
5034         r = 0;
5035         vcpu->arch.mcg_cap = mcg_cap;
5036         /* Init IA32_MCG_CTL to all 1s */
5037         if (mcg_cap & MCG_CTL_P)
5038                 vcpu->arch.mcg_ctl = ~(u64)0;
5039         /* Init IA32_MCi_CTL to all 1s, IA32_MCi_CTL2 to all 0s */
5040         for (bank = 0; bank < bank_num; bank++) {
5041                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
5042                 if (mcg_cap & MCG_CMCI_P)
5043                         vcpu->arch.mci_ctl2_banks[bank] = 0;
5044         }
5045
5046         kvm_apic_after_set_mcg_cap(vcpu);
5047
5048         static_call(kvm_x86_setup_mce)(vcpu);
5049 out:
5050         return r;
5051 }
5052
5053 /*
5054  * Validate this is an UCNA (uncorrectable no action) error by checking the
5055  * MCG_STATUS and MCi_STATUS registers:
5056  * - none of the bits for Machine Check Exceptions are set
5057  * - both the VAL (valid) and UC (uncorrectable) bits are set
5058  * MCI_STATUS_PCC - Processor Context Corrupted
5059  * MCI_STATUS_S - Signaled as a Machine Check Exception
5060  * MCI_STATUS_AR - Software recoverable Action Required
5061  */
5062 static bool is_ucna(struct kvm_x86_mce *mce)
5063 {
5064         return  !mce->mcg_status &&
5065                 !(mce->status & (MCI_STATUS_PCC | MCI_STATUS_S | MCI_STATUS_AR)) &&
5066                 (mce->status & MCI_STATUS_VAL) &&
5067                 (mce->status & MCI_STATUS_UC);
5068 }
5069
5070 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks)
5071 {
5072         u64 mcg_cap = vcpu->arch.mcg_cap;
5073
5074         banks[1] = mce->status;
5075         banks[2] = mce->addr;
5076         banks[3] = mce->misc;
5077         vcpu->arch.mcg_status = mce->mcg_status;
5078
5079         if (!(mcg_cap & MCG_CMCI_P) ||
5080             !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN))
5081                 return 0;
5082
5083         if (lapic_in_kernel(vcpu))
5084                 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI);
5085
5086         return 0;
5087 }
5088
5089 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
5090                                       struct kvm_x86_mce *mce)
5091 {
5092         u64 mcg_cap = vcpu->arch.mcg_cap;
5093         unsigned bank_num = mcg_cap & 0xff;
5094         u64 *banks = vcpu->arch.mce_banks;
5095
5096         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
5097                 return -EINVAL;
5098
5099         banks += array_index_nospec(4 * mce->bank, 4 * bank_num);
5100
5101         if (is_ucna(mce))
5102                 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks);
5103
5104         /*
5105          * if IA32_MCG_CTL is not all 1s, the uncorrected error
5106          * reporting is disabled
5107          */
5108         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
5109             vcpu->arch.mcg_ctl != ~(u64)0)
5110                 return 0;
5111         /*
5112          * if IA32_MCi_CTL is not all 1s, the uncorrected error
5113          * reporting is disabled for the bank
5114          */
5115         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
5116                 return 0;
5117         if (mce->status & MCI_STATUS_UC) {
5118                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
5119                     !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) {
5120                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5121                         return 0;
5122                 }
5123                 if (banks[1] & MCI_STATUS_VAL)
5124                         mce->status |= MCI_STATUS_OVER;
5125                 banks[2] = mce->addr;
5126                 banks[3] = mce->misc;
5127                 vcpu->arch.mcg_status = mce->mcg_status;
5128                 banks[1] = mce->status;
5129                 kvm_queue_exception(vcpu, MC_VECTOR);
5130         } else if (!(banks[1] & MCI_STATUS_VAL)
5131                    || !(banks[1] & MCI_STATUS_UC)) {
5132                 if (banks[1] & MCI_STATUS_VAL)
5133                         mce->status |= MCI_STATUS_OVER;
5134                 banks[2] = mce->addr;
5135                 banks[3] = mce->misc;
5136                 banks[1] = mce->status;
5137         } else
5138                 banks[1] |= MCI_STATUS_OVER;
5139         return 0;
5140 }
5141
5142 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
5143                                                struct kvm_vcpu_events *events)
5144 {
5145         struct kvm_queued_exception *ex;
5146
5147         process_nmi(vcpu);
5148
5149 #ifdef CONFIG_KVM_SMM
5150         if (kvm_check_request(KVM_REQ_SMI, vcpu))
5151                 process_smi(vcpu);
5152 #endif
5153
5154         /*
5155          * KVM's ABI only allows for one exception to be migrated.  Luckily,
5156          * the only time there can be two queued exceptions is if there's a
5157          * non-exiting _injected_ exception, and a pending exiting exception.
5158          * In that case, ignore the VM-Exiting exception as it's an extension
5159          * of the injected exception.
5160          */
5161         if (vcpu->arch.exception_vmexit.pending &&
5162             !vcpu->arch.exception.pending &&
5163             !vcpu->arch.exception.injected)
5164                 ex = &vcpu->arch.exception_vmexit;
5165         else
5166                 ex = &vcpu->arch.exception;
5167
5168         /*
5169          * In guest mode, payload delivery should be deferred if the exception
5170          * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
5171          * intercepts #PF, ditto for DR6 and #DBs.  If the per-VM capability,
5172          * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
5173          * propagate the payload and so it cannot be safely deferred.  Deliver
5174          * the payload if the capability hasn't been requested.
5175          */
5176         if (!vcpu->kvm->arch.exception_payload_enabled &&
5177             ex->pending && ex->has_payload)
5178                 kvm_deliver_exception_payload(vcpu, ex);
5179
5180         memset(events, 0, sizeof(*events));
5181
5182         /*
5183          * The API doesn't provide the instruction length for software
5184          * exceptions, so don't report them. As long as the guest RIP
5185          * isn't advanced, we should expect to encounter the exception
5186          * again.
5187          */
5188         if (!kvm_exception_is_soft(ex->vector)) {
5189                 events->exception.injected = ex->injected;
5190                 events->exception.pending = ex->pending;
5191                 /*
5192                  * For ABI compatibility, deliberately conflate
5193                  * pending and injected exceptions when
5194                  * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
5195                  */
5196                 if (!vcpu->kvm->arch.exception_payload_enabled)
5197                         events->exception.injected |= ex->pending;
5198         }
5199         events->exception.nr = ex->vector;
5200         events->exception.has_error_code = ex->has_error_code;
5201         events->exception.error_code = ex->error_code;
5202         events->exception_has_payload = ex->has_payload;
5203         events->exception_payload = ex->payload;
5204
5205         events->interrupt.injected =
5206                 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
5207         events->interrupt.nr = vcpu->arch.interrupt.nr;
5208         events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
5209
5210         events->nmi.injected = vcpu->arch.nmi_injected;
5211         events->nmi.pending = kvm_get_nr_pending_nmis(vcpu);
5212         events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu);
5213
5214         /* events->sipi_vector is never valid when reporting to user space */
5215
5216 #ifdef CONFIG_KVM_SMM
5217         events->smi.smm = is_smm(vcpu);
5218         events->smi.pending = vcpu->arch.smi_pending;
5219         events->smi.smm_inside_nmi =
5220                 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK);
5221 #endif
5222         events->smi.latched_init = kvm_lapic_latched_init(vcpu);
5223
5224         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
5225                          | KVM_VCPUEVENT_VALID_SHADOW
5226                          | KVM_VCPUEVENT_VALID_SMM);
5227         if (vcpu->kvm->arch.exception_payload_enabled)
5228                 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
5229         if (vcpu->kvm->arch.triple_fault_event) {
5230                 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5231                 events->flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
5232         }
5233 }
5234
5235 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
5236                                               struct kvm_vcpu_events *events)
5237 {
5238         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
5239                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
5240                               | KVM_VCPUEVENT_VALID_SHADOW
5241                               | KVM_VCPUEVENT_VALID_SMM
5242                               | KVM_VCPUEVENT_VALID_PAYLOAD
5243                               | KVM_VCPUEVENT_VALID_TRIPLE_FAULT))
5244                 return -EINVAL;
5245
5246         if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
5247                 if (!vcpu->kvm->arch.exception_payload_enabled)
5248                         return -EINVAL;
5249                 if (events->exception.pending)
5250                         events->exception.injected = 0;
5251                 else
5252                         events->exception_has_payload = 0;
5253         } else {
5254                 events->exception.pending = 0;
5255                 events->exception_has_payload = 0;
5256         }
5257
5258         if ((events->exception.injected || events->exception.pending) &&
5259             (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
5260                 return -EINVAL;
5261
5262         /* INITs are latched while in SMM */
5263         if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
5264             (events->smi.smm || events->smi.pending) &&
5265             vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
5266                 return -EINVAL;
5267
5268         process_nmi(vcpu);
5269
5270         /*
5271          * Flag that userspace is stuffing an exception, the next KVM_RUN will
5272          * morph the exception to a VM-Exit if appropriate.  Do this only for
5273          * pending exceptions, already-injected exceptions are not subject to
5274          * intercpetion.  Note, userspace that conflates pending and injected
5275          * is hosed, and will incorrectly convert an injected exception into a
5276          * pending exception, which in turn may cause a spurious VM-Exit.
5277          */
5278         vcpu->arch.exception_from_userspace = events->exception.pending;
5279
5280         vcpu->arch.exception_vmexit.pending = false;
5281
5282         vcpu->arch.exception.injected = events->exception.injected;
5283         vcpu->arch.exception.pending = events->exception.pending;
5284         vcpu->arch.exception.vector = events->exception.nr;
5285         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
5286         vcpu->arch.exception.error_code = events->exception.error_code;
5287         vcpu->arch.exception.has_payload = events->exception_has_payload;
5288         vcpu->arch.exception.payload = events->exception_payload;
5289
5290         vcpu->arch.interrupt.injected = events->interrupt.injected;
5291         vcpu->arch.interrupt.nr = events->interrupt.nr;
5292         vcpu->arch.interrupt.soft = events->interrupt.soft;
5293         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
5294                 static_call(kvm_x86_set_interrupt_shadow)(vcpu,
5295                                                 events->interrupt.shadow);
5296
5297         vcpu->arch.nmi_injected = events->nmi.injected;
5298         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) {
5299                 vcpu->arch.nmi_pending = 0;
5300                 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending);
5301                 kvm_make_request(KVM_REQ_NMI, vcpu);
5302         }
5303         static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked);
5304
5305         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
5306             lapic_in_kernel(vcpu))
5307                 vcpu->arch.apic->sipi_vector = events->sipi_vector;
5308
5309         if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
5310 #ifdef CONFIG_KVM_SMM
5311                 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
5312                         kvm_leave_nested(vcpu);
5313                         kvm_smm_changed(vcpu, events->smi.smm);
5314                 }
5315
5316                 vcpu->arch.smi_pending = events->smi.pending;
5317
5318                 if (events->smi.smm) {
5319                         if (events->smi.smm_inside_nmi)
5320                                 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
5321                         else
5322                                 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
5323                 }
5324
5325 #else
5326                 if (events->smi.smm || events->smi.pending ||
5327                     events->smi.smm_inside_nmi)
5328                         return -EINVAL;
5329 #endif
5330
5331                 if (lapic_in_kernel(vcpu)) {
5332                         if (events->smi.latched_init)
5333                                 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5334                         else
5335                                 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
5336                 }
5337         }
5338
5339         if (events->flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) {
5340                 if (!vcpu->kvm->arch.triple_fault_event)
5341                         return -EINVAL;
5342                 if (events->triple_fault.pending)
5343                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5344                 else
5345                         kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5346         }
5347
5348         kvm_make_request(KVM_REQ_EVENT, vcpu);
5349
5350         return 0;
5351 }
5352
5353 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
5354                                              struct kvm_debugregs *dbgregs)
5355 {
5356         unsigned long val;
5357
5358         memset(dbgregs, 0, sizeof(*dbgregs));
5359         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
5360         kvm_get_dr(vcpu, 6, &val);
5361         dbgregs->dr6 = val;
5362         dbgregs->dr7 = vcpu->arch.dr7;
5363 }
5364
5365 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
5366                                             struct kvm_debugregs *dbgregs)
5367 {
5368         if (dbgregs->flags)
5369                 return -EINVAL;
5370
5371         if (!kvm_dr6_valid(dbgregs->dr6))
5372                 return -EINVAL;
5373         if (!kvm_dr7_valid(dbgregs->dr7))
5374                 return -EINVAL;
5375
5376         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
5377         kvm_update_dr0123(vcpu);
5378         vcpu->arch.dr6 = dbgregs->dr6;
5379         vcpu->arch.dr7 = dbgregs->dr7;
5380         kvm_update_dr7(vcpu);
5381
5382         return 0;
5383 }
5384
5385
5386 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
5387                                           u8 *state, unsigned int size)
5388 {
5389         /*
5390          * Only copy state for features that are enabled for the guest.  The
5391          * state itself isn't problematic, but setting bits in the header for
5392          * features that are supported in *this* host but not exposed to the
5393          * guest can result in KVM_SET_XSAVE failing when live migrating to a
5394          * compatible host without the features that are NOT exposed to the
5395          * guest.
5396          *
5397          * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
5398          * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
5399          * supported by the host.
5400          */
5401         u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 |
5402                              XFEATURE_MASK_FPSSE;
5403
5404         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5405                 return;
5406
5407         fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size,
5408                                        supported_xcr0, vcpu->arch.pkru);
5409 }
5410
5411 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
5412                                          struct kvm_xsave *guest_xsave)
5413 {
5414         return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
5415                                              sizeof(guest_xsave->region));
5416 }
5417
5418 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
5419                                         struct kvm_xsave *guest_xsave)
5420 {
5421         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
5422                 return 0;
5423
5424         return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu,
5425                                               guest_xsave->region,
5426                                               kvm_caps.supported_xcr0,
5427                                               &vcpu->arch.pkru);
5428 }
5429
5430 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
5431                                         struct kvm_xcrs *guest_xcrs)
5432 {
5433         if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
5434                 guest_xcrs->nr_xcrs = 0;
5435                 return;
5436         }
5437
5438         guest_xcrs->nr_xcrs = 1;
5439         guest_xcrs->flags = 0;
5440         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
5441         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
5442 }
5443
5444 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
5445                                        struct kvm_xcrs *guest_xcrs)
5446 {
5447         int i, r = 0;
5448
5449         if (!boot_cpu_has(X86_FEATURE_XSAVE))
5450                 return -EINVAL;
5451
5452         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
5453                 return -EINVAL;
5454
5455         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
5456                 /* Only support XCR0 currently */
5457                 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) {
5458                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
5459                                 guest_xcrs->xcrs[i].value);
5460                         break;
5461                 }
5462         if (r)
5463                 r = -EINVAL;
5464         return r;
5465 }
5466
5467 /*
5468  * kvm_set_guest_paused() indicates to the guest kernel that it has been
5469  * stopped by the hypervisor.  This function will be called from the host only.
5470  * EINVAL is returned when the host attempts to set the flag for a guest that
5471  * does not support pv clocks.
5472  */
5473 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
5474 {
5475         if (!vcpu->arch.pv_time.active)
5476                 return -EINVAL;
5477         vcpu->arch.pvclock_set_guest_stopped_request = true;
5478         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5479         return 0;
5480 }
5481
5482 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
5483                                  struct kvm_device_attr *attr)
5484 {
5485         int r;
5486
5487         switch (attr->attr) {
5488         case KVM_VCPU_TSC_OFFSET:
5489                 r = 0;
5490                 break;
5491         default:
5492                 r = -ENXIO;
5493         }
5494
5495         return r;
5496 }
5497
5498 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
5499                                  struct kvm_device_attr *attr)
5500 {
5501         u64 __user *uaddr = kvm_get_attr_addr(attr);
5502         int r;
5503
5504         if (IS_ERR(uaddr))
5505                 return PTR_ERR(uaddr);
5506
5507         switch (attr->attr) {
5508         case KVM_VCPU_TSC_OFFSET:
5509                 r = -EFAULT;
5510                 if (put_user(vcpu->arch.l1_tsc_offset, uaddr))
5511                         break;
5512                 r = 0;
5513                 break;
5514         default:
5515                 r = -ENXIO;
5516         }
5517
5518         return r;
5519 }
5520
5521 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
5522                                  struct kvm_device_attr *attr)
5523 {
5524         u64 __user *uaddr = kvm_get_attr_addr(attr);
5525         struct kvm *kvm = vcpu->kvm;
5526         int r;
5527
5528         if (IS_ERR(uaddr))
5529                 return PTR_ERR(uaddr);
5530
5531         switch (attr->attr) {
5532         case KVM_VCPU_TSC_OFFSET: {
5533                 u64 offset, tsc, ns;
5534                 unsigned long flags;
5535                 bool matched;
5536
5537                 r = -EFAULT;
5538                 if (get_user(offset, uaddr))
5539                         break;
5540
5541                 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
5542
5543                 matched = (vcpu->arch.virtual_tsc_khz &&
5544                            kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz &&
5545                            kvm->arch.last_tsc_offset == offset);
5546
5547                 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset;
5548                 ns = get_kvmclock_base_ns();
5549
5550                 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched);
5551                 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
5552
5553                 r = 0;
5554                 break;
5555         }
5556         default:
5557                 r = -ENXIO;
5558         }
5559
5560         return r;
5561 }
5562
5563 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu,
5564                                       unsigned int ioctl,
5565                                       void __user *argp)
5566 {
5567         struct kvm_device_attr attr;
5568         int r;
5569
5570         if (copy_from_user(&attr, argp, sizeof(attr)))
5571                 return -EFAULT;
5572
5573         if (attr.group != KVM_VCPU_TSC_CTRL)
5574                 return -ENXIO;
5575
5576         switch (ioctl) {
5577         case KVM_HAS_DEVICE_ATTR:
5578                 r = kvm_arch_tsc_has_attr(vcpu, &attr);
5579                 break;
5580         case KVM_GET_DEVICE_ATTR:
5581                 r = kvm_arch_tsc_get_attr(vcpu, &attr);
5582                 break;
5583         case KVM_SET_DEVICE_ATTR:
5584                 r = kvm_arch_tsc_set_attr(vcpu, &attr);
5585                 break;
5586         }
5587
5588         return r;
5589 }
5590
5591 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
5592                                      struct kvm_enable_cap *cap)
5593 {
5594         int r;
5595         uint16_t vmcs_version;
5596         void __user *user_ptr;
5597
5598         if (cap->flags)
5599                 return -EINVAL;
5600
5601         switch (cap->cap) {
5602         case KVM_CAP_HYPERV_SYNIC2:
5603                 if (cap->args[0])
5604                         return -EINVAL;
5605                 fallthrough;
5606
5607         case KVM_CAP_HYPERV_SYNIC:
5608                 if (!irqchip_in_kernel(vcpu->kvm))
5609                         return -EINVAL;
5610                 return kvm_hv_activate_synic(vcpu, cap->cap ==
5611                                              KVM_CAP_HYPERV_SYNIC2);
5612         case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
5613                 if (!kvm_x86_ops.nested_ops->enable_evmcs)
5614                         return -ENOTTY;
5615                 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version);
5616                 if (!r) {
5617                         user_ptr = (void __user *)(uintptr_t)cap->args[0];
5618                         if (copy_to_user(user_ptr, &vmcs_version,
5619                                          sizeof(vmcs_version)))
5620                                 r = -EFAULT;
5621                 }
5622                 return r;
5623         case KVM_CAP_HYPERV_DIRECT_TLBFLUSH:
5624                 if (!kvm_x86_ops.enable_l2_tlb_flush)
5625                         return -ENOTTY;
5626
5627                 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu);
5628
5629         case KVM_CAP_HYPERV_ENFORCE_CPUID:
5630                 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]);
5631
5632         case KVM_CAP_ENFORCE_PV_FEATURE_CPUID:
5633                 vcpu->arch.pv_cpuid.enforce = cap->args[0];
5634                 if (vcpu->arch.pv_cpuid.enforce)
5635                         kvm_update_pv_runtime(vcpu);
5636
5637                 return 0;
5638         default:
5639                 return -EINVAL;
5640         }
5641 }
5642
5643 long kvm_arch_vcpu_ioctl(struct file *filp,
5644                          unsigned int ioctl, unsigned long arg)
5645 {
5646         struct kvm_vcpu *vcpu = filp->private_data;
5647         void __user *argp = (void __user *)arg;
5648         int r;
5649         union {
5650                 struct kvm_sregs2 *sregs2;
5651                 struct kvm_lapic_state *lapic;
5652                 struct kvm_xsave *xsave;
5653                 struct kvm_xcrs *xcrs;
5654                 void *buffer;
5655         } u;
5656
5657         vcpu_load(vcpu);
5658
5659         u.buffer = NULL;
5660         switch (ioctl) {
5661         case KVM_GET_LAPIC: {
5662                 r = -EINVAL;
5663                 if (!lapic_in_kernel(vcpu))
5664                         goto out;
5665                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state),
5666                                 GFP_KERNEL_ACCOUNT);
5667
5668                 r = -ENOMEM;
5669                 if (!u.lapic)
5670                         goto out;
5671                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
5672                 if (r)
5673                         goto out;
5674                 r = -EFAULT;
5675                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
5676                         goto out;
5677                 r = 0;
5678                 break;
5679         }
5680         case KVM_SET_LAPIC: {
5681                 r = -EINVAL;
5682                 if (!lapic_in_kernel(vcpu))
5683                         goto out;
5684                 u.lapic = memdup_user(argp, sizeof(*u.lapic));
5685                 if (IS_ERR(u.lapic)) {
5686                         r = PTR_ERR(u.lapic);
5687                         goto out_nofree;
5688                 }
5689
5690                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
5691                 break;
5692         }
5693         case KVM_INTERRUPT: {
5694                 struct kvm_interrupt irq;
5695
5696                 r = -EFAULT;
5697                 if (copy_from_user(&irq, argp, sizeof(irq)))
5698                         goto out;
5699                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
5700                 break;
5701         }
5702         case KVM_NMI: {
5703                 r = kvm_vcpu_ioctl_nmi(vcpu);
5704                 break;
5705         }
5706         case KVM_SMI: {
5707                 r = kvm_inject_smi(vcpu);
5708                 break;
5709         }
5710         case KVM_SET_CPUID: {
5711                 struct kvm_cpuid __user *cpuid_arg = argp;
5712                 struct kvm_cpuid cpuid;
5713
5714                 r = -EFAULT;
5715                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5716                         goto out;
5717                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
5718                 break;
5719         }
5720         case KVM_SET_CPUID2: {
5721                 struct kvm_cpuid2 __user *cpuid_arg = argp;
5722                 struct kvm_cpuid2 cpuid;
5723
5724                 r = -EFAULT;
5725                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5726                         goto out;
5727                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
5728                                               cpuid_arg->entries);
5729                 break;
5730         }
5731         case KVM_GET_CPUID2: {
5732                 struct kvm_cpuid2 __user *cpuid_arg = argp;
5733                 struct kvm_cpuid2 cpuid;
5734
5735                 r = -EFAULT;
5736                 if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
5737                         goto out;
5738                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
5739                                               cpuid_arg->entries);
5740                 if (r)
5741                         goto out;
5742                 r = -EFAULT;
5743                 if (copy_to_user(cpuid_arg, &cpuid, sizeof(cpuid)))
5744                         goto out;
5745                 r = 0;
5746                 break;
5747         }
5748         case KVM_GET_MSRS: {
5749                 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5750                 r = msr_io(vcpu, argp, do_get_msr, 1);
5751                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5752                 break;
5753         }
5754         case KVM_SET_MSRS: {
5755                 int idx = srcu_read_lock(&vcpu->kvm->srcu);
5756                 r = msr_io(vcpu, argp, do_set_msr, 0);
5757                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5758                 break;
5759         }
5760         case KVM_TPR_ACCESS_REPORTING: {
5761                 struct kvm_tpr_access_ctl tac;
5762
5763                 r = -EFAULT;
5764                 if (copy_from_user(&tac, argp, sizeof(tac)))
5765                         goto out;
5766                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
5767                 if (r)
5768                         goto out;
5769                 r = -EFAULT;
5770                 if (copy_to_user(argp, &tac, sizeof(tac)))
5771                         goto out;
5772                 r = 0;
5773                 break;
5774         };
5775         case KVM_SET_VAPIC_ADDR: {
5776                 struct kvm_vapic_addr va;
5777                 int idx;
5778
5779                 r = -EINVAL;
5780                 if (!lapic_in_kernel(vcpu))
5781                         goto out;
5782                 r = -EFAULT;
5783                 if (copy_from_user(&va, argp, sizeof(va)))
5784                         goto out;
5785                 idx = srcu_read_lock(&vcpu->kvm->srcu);
5786                 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
5787                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5788                 break;
5789         }
5790         case KVM_X86_SETUP_MCE: {
5791                 u64 mcg_cap;
5792
5793                 r = -EFAULT;
5794                 if (copy_from_user(&mcg_cap, argp, sizeof(mcg_cap)))
5795                         goto out;
5796                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
5797                 break;
5798         }
5799         case KVM_X86_SET_MCE: {
5800                 struct kvm_x86_mce mce;
5801
5802                 r = -EFAULT;
5803                 if (copy_from_user(&mce, argp, sizeof(mce)))
5804                         goto out;
5805                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
5806                 break;
5807         }
5808         case KVM_GET_VCPU_EVENTS: {
5809                 struct kvm_vcpu_events events;
5810
5811                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
5812
5813                 r = -EFAULT;
5814                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
5815                         break;
5816                 r = 0;
5817                 break;
5818         }
5819         case KVM_SET_VCPU_EVENTS: {
5820                 struct kvm_vcpu_events events;
5821
5822                 r = -EFAULT;
5823                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
5824                         break;
5825
5826                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
5827                 break;
5828         }
5829         case KVM_GET_DEBUGREGS: {
5830                 struct kvm_debugregs dbgregs;
5831
5832                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
5833
5834                 r = -EFAULT;
5835                 if (copy_to_user(argp, &dbgregs,
5836                                  sizeof(struct kvm_debugregs)))
5837                         break;
5838                 r = 0;
5839                 break;
5840         }
5841         case KVM_SET_DEBUGREGS: {
5842                 struct kvm_debugregs dbgregs;
5843
5844                 r = -EFAULT;
5845                 if (copy_from_user(&dbgregs, argp,
5846                                    sizeof(struct kvm_debugregs)))
5847                         break;
5848
5849                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
5850                 break;
5851         }
5852         case KVM_GET_XSAVE: {
5853                 r = -EINVAL;
5854                 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
5855                         break;
5856
5857                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
5858                 r = -ENOMEM;
5859                 if (!u.xsave)
5860                         break;
5861
5862                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
5863
5864                 r = -EFAULT;
5865                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
5866                         break;
5867                 r = 0;
5868                 break;
5869         }
5870         case KVM_SET_XSAVE: {
5871                 int size = vcpu->arch.guest_fpu.uabi_size;
5872
5873                 u.xsave = memdup_user(argp, size);
5874                 if (IS_ERR(u.xsave)) {
5875                         r = PTR_ERR(u.xsave);
5876                         goto out_nofree;
5877                 }
5878
5879                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
5880                 break;
5881         }
5882
5883         case KVM_GET_XSAVE2: {
5884                 int size = vcpu->arch.guest_fpu.uabi_size;
5885
5886                 u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT);
5887                 r = -ENOMEM;
5888                 if (!u.xsave)
5889                         break;
5890
5891                 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
5892
5893                 r = -EFAULT;
5894                 if (copy_to_user(argp, u.xsave, size))
5895                         break;
5896
5897                 r = 0;
5898                 break;
5899         }
5900
5901         case KVM_GET_XCRS: {
5902                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
5903                 r = -ENOMEM;
5904                 if (!u.xcrs)
5905                         break;
5906
5907                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
5908
5909                 r = -EFAULT;
5910                 if (copy_to_user(argp, u.xcrs,
5911                                  sizeof(struct kvm_xcrs)))
5912                         break;
5913                 r = 0;
5914                 break;
5915         }
5916         case KVM_SET_XCRS: {
5917                 u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
5918                 if (IS_ERR(u.xcrs)) {
5919                         r = PTR_ERR(u.xcrs);
5920                         goto out_nofree;
5921                 }
5922
5923                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
5924                 break;
5925         }
5926         case KVM_SET_TSC_KHZ: {
5927                 u32 user_tsc_khz;
5928
5929                 r = -EINVAL;
5930                 user_tsc_khz = (u32)arg;
5931
5932                 if (kvm_caps.has_tsc_control &&
5933                     user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
5934                         goto out;
5935
5936                 if (user_tsc_khz == 0)
5937                         user_tsc_khz = tsc_khz;
5938
5939                 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz))
5940                         r = 0;
5941
5942                 goto out;
5943         }
5944         case KVM_GET_TSC_KHZ: {
5945                 r = vcpu->arch.virtual_tsc_khz;
5946                 goto out;
5947         }
5948         case KVM_KVMCLOCK_CTRL: {
5949                 r = kvm_set_guest_paused(vcpu);
5950                 goto out;
5951         }
5952         case KVM_ENABLE_CAP: {
5953                 struct kvm_enable_cap cap;
5954
5955                 r = -EFAULT;
5956                 if (copy_from_user(&cap, argp, sizeof(cap)))
5957                         goto out;
5958                 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
5959                 break;
5960         }
5961         case KVM_GET_NESTED_STATE: {
5962                 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5963                 u32 user_data_size;
5964
5965                 r = -EINVAL;
5966                 if (!kvm_x86_ops.nested_ops->get_state)
5967                         break;
5968
5969                 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size));
5970                 r = -EFAULT;
5971                 if (get_user(user_data_size, &user_kvm_nested_state->size))
5972                         break;
5973
5974                 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state,
5975                                                      user_data_size);
5976                 if (r < 0)
5977                         break;
5978
5979                 if (r > user_data_size) {
5980                         if (put_user(r, &user_kvm_nested_state->size))
5981                                 r = -EFAULT;
5982                         else
5983                                 r = -E2BIG;
5984                         break;
5985                 }
5986
5987                 r = 0;
5988                 break;
5989         }
5990         case KVM_SET_NESTED_STATE: {
5991                 struct kvm_nested_state __user *user_kvm_nested_state = argp;
5992                 struct kvm_nested_state kvm_state;
5993                 int idx;
5994
5995                 r = -EINVAL;
5996                 if (!kvm_x86_ops.nested_ops->set_state)
5997                         break;
5998
5999                 r = -EFAULT;
6000                 if (copy_from_user(&kvm_state, user_kvm_nested_state, sizeof(kvm_state)))
6001                         break;
6002
6003                 r = -EINVAL;
6004                 if (kvm_state.size < sizeof(kvm_state))
6005                         break;
6006
6007                 if (kvm_state.flags &
6008                     ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
6009                       | KVM_STATE_NESTED_EVMCS | KVM_STATE_NESTED_MTF_PENDING
6010                       | KVM_STATE_NESTED_GIF_SET))
6011                         break;
6012
6013                 /* nested_run_pending implies guest_mode.  */
6014                 if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
6015                     && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
6016                         break;
6017
6018                 idx = srcu_read_lock(&vcpu->kvm->srcu);
6019                 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state);
6020                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
6021                 break;
6022         }
6023         case KVM_GET_SUPPORTED_HV_CPUID:
6024                 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp);
6025                 break;
6026 #ifdef CONFIG_KVM_XEN
6027         case KVM_XEN_VCPU_GET_ATTR: {
6028                 struct kvm_xen_vcpu_attr xva;
6029
6030                 r = -EFAULT;
6031                 if (copy_from_user(&xva, argp, sizeof(xva)))
6032                         goto out;
6033                 r = kvm_xen_vcpu_get_attr(vcpu, &xva);
6034                 if (!r && copy_to_user(argp, &xva, sizeof(xva)))
6035                         r = -EFAULT;
6036                 break;
6037         }
6038         case KVM_XEN_VCPU_SET_ATTR: {
6039                 struct kvm_xen_vcpu_attr xva;
6040
6041                 r = -EFAULT;
6042                 if (copy_from_user(&xva, argp, sizeof(xva)))
6043                         goto out;
6044                 r = kvm_xen_vcpu_set_attr(vcpu, &xva);
6045                 break;
6046         }
6047 #endif
6048         case KVM_GET_SREGS2: {
6049                 u.sregs2 = kzalloc(sizeof(struct kvm_sregs2), GFP_KERNEL);
6050                 r = -ENOMEM;
6051                 if (!u.sregs2)
6052                         goto out;
6053                 __get_sregs2(vcpu, u.sregs2);
6054                 r = -EFAULT;
6055                 if (copy_to_user(argp, u.sregs2, sizeof(struct kvm_sregs2)))
6056                         goto out;
6057                 r = 0;
6058                 break;
6059         }
6060         case KVM_SET_SREGS2: {
6061                 u.sregs2 = memdup_user(argp, sizeof(struct kvm_sregs2));
6062                 if (IS_ERR(u.sregs2)) {
6063                         r = PTR_ERR(u.sregs2);
6064                         u.sregs2 = NULL;
6065                         goto out;
6066                 }
6067                 r = __set_sregs2(vcpu, u.sregs2);
6068                 break;
6069         }
6070         case KVM_HAS_DEVICE_ATTR:
6071         case KVM_GET_DEVICE_ATTR:
6072         case KVM_SET_DEVICE_ATTR:
6073                 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp);
6074                 break;
6075         default:
6076                 r = -EINVAL;
6077         }
6078 out:
6079         kfree(u.buffer);
6080 out_nofree:
6081         vcpu_put(vcpu);
6082         return r;
6083 }
6084
6085 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
6086 {
6087         return VM_FAULT_SIGBUS;
6088 }
6089
6090 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
6091 {
6092         int ret;
6093
6094         if (addr > (unsigned int)(-3 * PAGE_SIZE))
6095                 return -EINVAL;
6096         ret = static_call(kvm_x86_set_tss_addr)(kvm, addr);
6097         return ret;
6098 }
6099
6100 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
6101                                               u64 ident_addr)
6102 {
6103         return static_call(kvm_x86_set_identity_map_addr)(kvm, ident_addr);
6104 }
6105
6106 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
6107                                          unsigned long kvm_nr_mmu_pages)
6108 {
6109         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
6110                 return -EINVAL;
6111
6112         mutex_lock(&kvm->slots_lock);
6113
6114         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
6115         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
6116
6117         mutex_unlock(&kvm->slots_lock);
6118         return 0;
6119 }
6120
6121 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6122 {
6123         struct kvm_pic *pic = kvm->arch.vpic;
6124         int r;
6125
6126         r = 0;
6127         switch (chip->chip_id) {
6128         case KVM_IRQCHIP_PIC_MASTER:
6129                 memcpy(&chip->chip.pic, &pic->pics[0],
6130                         sizeof(struct kvm_pic_state));
6131                 break;
6132         case KVM_IRQCHIP_PIC_SLAVE:
6133                 memcpy(&chip->chip.pic, &pic->pics[1],
6134                         sizeof(struct kvm_pic_state));
6135                 break;
6136         case KVM_IRQCHIP_IOAPIC:
6137                 kvm_get_ioapic(kvm, &chip->chip.ioapic);
6138                 break;
6139         default:
6140                 r = -EINVAL;
6141                 break;
6142         }
6143         return r;
6144 }
6145
6146 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
6147 {
6148         struct kvm_pic *pic = kvm->arch.vpic;
6149         int r;
6150
6151         r = 0;
6152         switch (chip->chip_id) {
6153         case KVM_IRQCHIP_PIC_MASTER:
6154                 spin_lock(&pic->lock);
6155                 memcpy(&pic->pics[0], &chip->chip.pic,
6156                         sizeof(struct kvm_pic_state));
6157                 spin_unlock(&pic->lock);
6158                 break;
6159         case KVM_IRQCHIP_PIC_SLAVE:
6160                 spin_lock(&pic->lock);
6161                 memcpy(&pic->pics[1], &chip->chip.pic,
6162                         sizeof(struct kvm_pic_state));
6163                 spin_unlock(&pic->lock);
6164                 break;
6165         case KVM_IRQCHIP_IOAPIC:
6166                 kvm_set_ioapic(kvm, &chip->chip.ioapic);
6167                 break;
6168         default:
6169                 r = -EINVAL;
6170                 break;
6171         }
6172         kvm_pic_update_irq(pic);
6173         return r;
6174 }
6175
6176 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6177 {
6178         struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
6179
6180         BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
6181
6182         mutex_lock(&kps->lock);
6183         memcpy(ps, &kps->channels, sizeof(*ps));
6184         mutex_unlock(&kps->lock);
6185         return 0;
6186 }
6187
6188 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
6189 {
6190         int i;
6191         struct kvm_pit *pit = kvm->arch.vpit;
6192
6193         mutex_lock(&pit->pit_state.lock);
6194         memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
6195         for (i = 0; i < 3; i++)
6196                 kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
6197         mutex_unlock(&pit->pit_state.lock);
6198         return 0;
6199 }
6200
6201 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6202 {
6203         mutex_lock(&kvm->arch.vpit->pit_state.lock);
6204         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
6205                 sizeof(ps->channels));
6206         ps->flags = kvm->arch.vpit->pit_state.flags;
6207         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
6208         memset(&ps->reserved, 0, sizeof(ps->reserved));
6209         return 0;
6210 }
6211
6212 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
6213 {
6214         int start = 0;
6215         int i;
6216         u32 prev_legacy, cur_legacy;
6217         struct kvm_pit *pit = kvm->arch.vpit;
6218
6219         mutex_lock(&pit->pit_state.lock);
6220         prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
6221         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
6222         if (!prev_legacy && cur_legacy)
6223                 start = 1;
6224         memcpy(&pit->pit_state.channels, &ps->channels,
6225                sizeof(pit->pit_state.channels));
6226         pit->pit_state.flags = ps->flags;
6227         for (i = 0; i < 3; i++)
6228                 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
6229                                    start && i == 0);
6230         mutex_unlock(&pit->pit_state.lock);
6231         return 0;
6232 }
6233
6234 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
6235                                  struct kvm_reinject_control *control)
6236 {
6237         struct kvm_pit *pit = kvm->arch.vpit;
6238
6239         /* pit->pit_state.lock was overloaded to prevent userspace from getting
6240          * an inconsistent state after running multiple KVM_REINJECT_CONTROL
6241          * ioctls in parallel.  Use a separate lock if that ioctl isn't rare.
6242          */
6243         mutex_lock(&pit->pit_state.lock);
6244         kvm_pit_set_reinject(pit, control->pit_reinject);
6245         mutex_unlock(&pit->pit_state.lock);
6246
6247         return 0;
6248 }
6249
6250 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
6251 {
6252
6253         /*
6254          * Flush all CPUs' dirty log buffers to the  dirty_bitmap.  Called
6255          * before reporting dirty_bitmap to userspace.  KVM flushes the buffers
6256          * on all VM-Exits, thus we only need to kick running vCPUs to force a
6257          * VM-Exit.
6258          */
6259         struct kvm_vcpu *vcpu;
6260         unsigned long i;
6261
6262         kvm_for_each_vcpu(i, vcpu, kvm)
6263                 kvm_vcpu_kick(vcpu);
6264 }
6265
6266 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
6267                         bool line_status)
6268 {
6269         if (!irqchip_in_kernel(kvm))
6270                 return -ENXIO;
6271
6272         irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
6273                                         irq_event->irq, irq_event->level,
6274                                         line_status);
6275         return 0;
6276 }
6277
6278 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
6279                             struct kvm_enable_cap *cap)
6280 {
6281         int r;
6282
6283         if (cap->flags)
6284                 return -EINVAL;
6285
6286         switch (cap->cap) {
6287         case KVM_CAP_DISABLE_QUIRKS2:
6288                 r = -EINVAL;
6289                 if (cap->args[0] & ~KVM_X86_VALID_QUIRKS)
6290                         break;
6291                 fallthrough;
6292         case KVM_CAP_DISABLE_QUIRKS:
6293                 kvm->arch.disabled_quirks = cap->args[0];
6294                 r = 0;
6295                 break;
6296         case KVM_CAP_SPLIT_IRQCHIP: {
6297                 mutex_lock(&kvm->lock);
6298                 r = -EINVAL;
6299                 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS)
6300                         goto split_irqchip_unlock;
6301                 r = -EEXIST;
6302                 if (irqchip_in_kernel(kvm))
6303                         goto split_irqchip_unlock;
6304                 if (kvm->created_vcpus)
6305                         goto split_irqchip_unlock;
6306                 r = kvm_setup_empty_irq_routing(kvm);
6307                 if (r)
6308                         goto split_irqchip_unlock;
6309                 /* Pairs with irqchip_in_kernel. */
6310                 smp_wmb();
6311                 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
6312                 kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
6313                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6314                 r = 0;
6315 split_irqchip_unlock:
6316                 mutex_unlock(&kvm->lock);
6317                 break;
6318         }
6319         case KVM_CAP_X2APIC_API:
6320                 r = -EINVAL;
6321                 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS)
6322                         break;
6323
6324                 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS)
6325                         kvm->arch.x2apic_format = true;
6326                 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
6327                         kvm->arch.x2apic_broadcast_quirk_disabled = true;
6328
6329                 r = 0;
6330                 break;
6331         case KVM_CAP_X86_DISABLE_EXITS:
6332                 r = -EINVAL;
6333                 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS)
6334                         break;
6335
6336                 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
6337                         kvm->arch.pause_in_guest = true;
6338
6339 #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \
6340                     "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests."
6341
6342                 if (!mitigate_smt_rsb) {
6343                         if (boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() &&
6344                             (cap->args[0] & ~KVM_X86_DISABLE_EXITS_PAUSE))
6345                                 pr_warn_once(SMT_RSB_MSG);
6346
6347                         if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) &&
6348                             kvm_can_mwait_in_guest())
6349                                 kvm->arch.mwait_in_guest = true;
6350                         if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
6351                                 kvm->arch.hlt_in_guest = true;
6352                         if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
6353                                 kvm->arch.cstate_in_guest = true;
6354                 }
6355
6356                 r = 0;
6357                 break;
6358         case KVM_CAP_MSR_PLATFORM_INFO:
6359                 kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
6360                 r = 0;
6361                 break;
6362         case KVM_CAP_EXCEPTION_PAYLOAD:
6363                 kvm->arch.exception_payload_enabled = cap->args[0];
6364                 r = 0;
6365                 break;
6366         case KVM_CAP_X86_TRIPLE_FAULT_EVENT:
6367                 kvm->arch.triple_fault_event = cap->args[0];
6368                 r = 0;
6369                 break;
6370         case KVM_CAP_X86_USER_SPACE_MSR:
6371                 r = -EINVAL;
6372                 if (cap->args[0] & ~KVM_MSR_EXIT_REASON_VALID_MASK)
6373                         break;
6374                 kvm->arch.user_space_msr_mask = cap->args[0];
6375                 r = 0;
6376                 break;
6377         case KVM_CAP_X86_BUS_LOCK_EXIT:
6378                 r = -EINVAL;
6379                 if (cap->args[0] & ~KVM_BUS_LOCK_DETECTION_VALID_MODE)
6380                         break;
6381
6382                 if ((cap->args[0] & KVM_BUS_LOCK_DETECTION_OFF) &&
6383                     (cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT))
6384                         break;
6385
6386                 if (kvm_caps.has_bus_lock_exit &&
6387                     cap->args[0] & KVM_BUS_LOCK_DETECTION_EXIT)
6388                         kvm->arch.bus_lock_detection_enabled = true;
6389                 r = 0;
6390                 break;
6391 #ifdef CONFIG_X86_SGX_KVM
6392         case KVM_CAP_SGX_ATTRIBUTE: {
6393                 unsigned long allowed_attributes = 0;
6394
6395                 r = sgx_set_attribute(&allowed_attributes, cap->args[0]);
6396                 if (r)
6397                         break;
6398
6399                 /* KVM only supports the PROVISIONKEY privileged attribute. */
6400                 if ((allowed_attributes & SGX_ATTR_PROVISIONKEY) &&
6401                     !(allowed_attributes & ~SGX_ATTR_PROVISIONKEY))
6402                         kvm->arch.sgx_provisioning_allowed = true;
6403                 else
6404                         r = -EINVAL;
6405                 break;
6406         }
6407 #endif
6408         case KVM_CAP_VM_COPY_ENC_CONTEXT_FROM:
6409                 r = -EINVAL;
6410                 if (!kvm_x86_ops.vm_copy_enc_context_from)
6411                         break;
6412
6413                 r = static_call(kvm_x86_vm_copy_enc_context_from)(kvm, cap->args[0]);
6414                 break;
6415         case KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM:
6416                 r = -EINVAL;
6417                 if (!kvm_x86_ops.vm_move_enc_context_from)
6418                         break;
6419
6420                 r = static_call(kvm_x86_vm_move_enc_context_from)(kvm, cap->args[0]);
6421                 break;
6422         case KVM_CAP_EXIT_HYPERCALL:
6423                 if (cap->args[0] & ~KVM_EXIT_HYPERCALL_VALID_MASK) {
6424                         r = -EINVAL;
6425                         break;
6426                 }
6427                 kvm->arch.hypercall_exit_enabled = cap->args[0];
6428                 r = 0;
6429                 break;
6430         case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
6431                 r = -EINVAL;
6432                 if (cap->args[0] & ~1)
6433                         break;
6434                 kvm->arch.exit_on_emulation_error = cap->args[0];
6435                 r = 0;
6436                 break;
6437         case KVM_CAP_PMU_CAPABILITY:
6438                 r = -EINVAL;
6439                 if (!enable_pmu || (cap->args[0] & ~KVM_CAP_PMU_VALID_MASK))
6440                         break;
6441
6442                 mutex_lock(&kvm->lock);
6443                 if (!kvm->created_vcpus) {
6444                         kvm->arch.enable_pmu = !(cap->args[0] & KVM_PMU_CAP_DISABLE);
6445                         r = 0;
6446                 }
6447                 mutex_unlock(&kvm->lock);
6448                 break;
6449         case KVM_CAP_MAX_VCPU_ID:
6450                 r = -EINVAL;
6451                 if (cap->args[0] > KVM_MAX_VCPU_IDS)
6452                         break;
6453
6454                 mutex_lock(&kvm->lock);
6455                 if (kvm->arch.max_vcpu_ids == cap->args[0]) {
6456                         r = 0;
6457                 } else if (!kvm->arch.max_vcpu_ids) {
6458                         kvm->arch.max_vcpu_ids = cap->args[0];
6459                         r = 0;
6460                 }
6461                 mutex_unlock(&kvm->lock);
6462                 break;
6463         case KVM_CAP_X86_NOTIFY_VMEXIT:
6464                 r = -EINVAL;
6465                 if ((u32)cap->args[0] & ~KVM_X86_NOTIFY_VMEXIT_VALID_BITS)
6466                         break;
6467                 if (!kvm_caps.has_notify_vmexit)
6468                         break;
6469                 if (!((u32)cap->args[0] & KVM_X86_NOTIFY_VMEXIT_ENABLED))
6470                         break;
6471                 mutex_lock(&kvm->lock);
6472                 if (!kvm->created_vcpus) {
6473                         kvm->arch.notify_window = cap->args[0] >> 32;
6474                         kvm->arch.notify_vmexit_flags = (u32)cap->args[0];
6475                         r = 0;
6476                 }
6477                 mutex_unlock(&kvm->lock);
6478                 break;
6479         case KVM_CAP_VM_DISABLE_NX_HUGE_PAGES:
6480                 r = -EINVAL;
6481
6482                 /*
6483                  * Since the risk of disabling NX hugepages is a guest crashing
6484                  * the system, ensure the userspace process has permission to
6485                  * reboot the system.
6486                  *
6487                  * Note that unlike the reboot() syscall, the process must have
6488                  * this capability in the root namespace because exposing
6489                  * /dev/kvm into a container does not limit the scope of the
6490                  * iTLB multihit bug to that container. In other words,
6491                  * this must use capable(), not ns_capable().
6492                  */
6493                 if (!capable(CAP_SYS_BOOT)) {
6494                         r = -EPERM;
6495                         break;
6496                 }
6497
6498                 if (cap->args[0])
6499                         break;
6500
6501                 mutex_lock(&kvm->lock);
6502                 if (!kvm->created_vcpus) {
6503                         kvm->arch.disable_nx_huge_pages = true;
6504                         r = 0;
6505                 }
6506                 mutex_unlock(&kvm->lock);
6507                 break;
6508         default:
6509                 r = -EINVAL;
6510                 break;
6511         }
6512         return r;
6513 }
6514
6515 static struct kvm_x86_msr_filter *kvm_alloc_msr_filter(bool default_allow)
6516 {
6517         struct kvm_x86_msr_filter *msr_filter;
6518
6519         msr_filter = kzalloc(sizeof(*msr_filter), GFP_KERNEL_ACCOUNT);
6520         if (!msr_filter)
6521                 return NULL;
6522
6523         msr_filter->default_allow = default_allow;
6524         return msr_filter;
6525 }
6526
6527 static void kvm_free_msr_filter(struct kvm_x86_msr_filter *msr_filter)
6528 {
6529         u32 i;
6530
6531         if (!msr_filter)
6532                 return;
6533
6534         for (i = 0; i < msr_filter->count; i++)
6535                 kfree(msr_filter->ranges[i].bitmap);
6536
6537         kfree(msr_filter);
6538 }
6539
6540 static int kvm_add_msr_filter(struct kvm_x86_msr_filter *msr_filter,
6541                               struct kvm_msr_filter_range *user_range)
6542 {
6543         unsigned long *bitmap;
6544         size_t bitmap_size;
6545
6546         if (!user_range->nmsrs)
6547                 return 0;
6548
6549         if (user_range->flags & ~KVM_MSR_FILTER_RANGE_VALID_MASK)
6550                 return -EINVAL;
6551
6552         if (!user_range->flags)
6553                 return -EINVAL;
6554
6555         bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long);
6556         if (!bitmap_size || bitmap_size > KVM_MSR_FILTER_MAX_BITMAP_SIZE)
6557                 return -EINVAL;
6558
6559         bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size);
6560         if (IS_ERR(bitmap))
6561                 return PTR_ERR(bitmap);
6562
6563         msr_filter->ranges[msr_filter->count] = (struct msr_bitmap_range) {
6564                 .flags = user_range->flags,
6565                 .base = user_range->base,
6566                 .nmsrs = user_range->nmsrs,
6567                 .bitmap = bitmap,
6568         };
6569
6570         msr_filter->count++;
6571         return 0;
6572 }
6573
6574 static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm,
6575                                        struct kvm_msr_filter *filter)
6576 {
6577         struct kvm_x86_msr_filter *new_filter, *old_filter;
6578         bool default_allow;
6579         bool empty = true;
6580         int r;
6581         u32 i;
6582
6583         if (filter->flags & ~KVM_MSR_FILTER_VALID_MASK)
6584                 return -EINVAL;
6585
6586         for (i = 0; i < ARRAY_SIZE(filter->ranges); i++)
6587                 empty &= !filter->ranges[i].nmsrs;
6588
6589         default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY);
6590         if (empty && !default_allow)
6591                 return -EINVAL;
6592
6593         new_filter = kvm_alloc_msr_filter(default_allow);
6594         if (!new_filter)
6595                 return -ENOMEM;
6596
6597         for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) {
6598                 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]);
6599                 if (r) {
6600                         kvm_free_msr_filter(new_filter);
6601                         return r;
6602                 }
6603         }
6604
6605         mutex_lock(&kvm->lock);
6606         old_filter = rcu_replace_pointer(kvm->arch.msr_filter, new_filter,
6607                                          mutex_is_locked(&kvm->lock));
6608         mutex_unlock(&kvm->lock);
6609         synchronize_srcu(&kvm->srcu);
6610
6611         kvm_free_msr_filter(old_filter);
6612
6613         kvm_make_all_cpus_request(kvm, KVM_REQ_MSR_FILTER_CHANGED);
6614
6615         return 0;
6616 }
6617
6618 #ifdef CONFIG_KVM_COMPAT
6619 /* for KVM_X86_SET_MSR_FILTER */
6620 struct kvm_msr_filter_range_compat {
6621         __u32 flags;
6622         __u32 nmsrs;
6623         __u32 base;
6624         __u32 bitmap;
6625 };
6626
6627 struct kvm_msr_filter_compat {
6628         __u32 flags;
6629         struct kvm_msr_filter_range_compat ranges[KVM_MSR_FILTER_MAX_RANGES];
6630 };
6631
6632 #define KVM_X86_SET_MSR_FILTER_COMPAT _IOW(KVMIO, 0xc6, struct kvm_msr_filter_compat)
6633
6634 long kvm_arch_vm_compat_ioctl(struct file *filp, unsigned int ioctl,
6635                               unsigned long arg)
6636 {
6637         void __user *argp = (void __user *)arg;
6638         struct kvm *kvm = filp->private_data;
6639         long r = -ENOTTY;
6640
6641         switch (ioctl) {
6642         case KVM_X86_SET_MSR_FILTER_COMPAT: {
6643                 struct kvm_msr_filter __user *user_msr_filter = argp;
6644                 struct kvm_msr_filter_compat filter_compat;
6645                 struct kvm_msr_filter filter;
6646                 int i;
6647
6648                 if (copy_from_user(&filter_compat, user_msr_filter,
6649                                    sizeof(filter_compat)))
6650                         return -EFAULT;
6651
6652                 filter.flags = filter_compat.flags;
6653                 for (i = 0; i < ARRAY_SIZE(filter.ranges); i++) {
6654                         struct kvm_msr_filter_range_compat *cr;
6655
6656                         cr = &filter_compat.ranges[i];
6657                         filter.ranges[i] = (struct kvm_msr_filter_range) {
6658                                 .flags = cr->flags,
6659                                 .nmsrs = cr->nmsrs,
6660                                 .base = cr->base,
6661                                 .bitmap = (__u8 *)(ulong)cr->bitmap,
6662                         };
6663                 }
6664
6665                 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
6666                 break;
6667         }
6668         }
6669
6670         return r;
6671 }
6672 #endif
6673
6674 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
6675 static int kvm_arch_suspend_notifier(struct kvm *kvm)
6676 {
6677         struct kvm_vcpu *vcpu;
6678         unsigned long i;
6679         int ret = 0;
6680
6681         mutex_lock(&kvm->lock);
6682         kvm_for_each_vcpu(i, vcpu, kvm) {
6683                 if (!vcpu->arch.pv_time.active)
6684                         continue;
6685
6686                 ret = kvm_set_guest_paused(vcpu);
6687                 if (ret) {
6688                         kvm_err("Failed to pause guest VCPU%d: %d\n",
6689                                 vcpu->vcpu_id, ret);
6690                         break;
6691                 }
6692         }
6693         mutex_unlock(&kvm->lock);
6694
6695         return ret ? NOTIFY_BAD : NOTIFY_DONE;
6696 }
6697
6698 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state)
6699 {
6700         switch (state) {
6701         case PM_HIBERNATION_PREPARE:
6702         case PM_SUSPEND_PREPARE:
6703                 return kvm_arch_suspend_notifier(kvm);
6704         }
6705
6706         return NOTIFY_DONE;
6707 }
6708 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
6709
6710 static int kvm_vm_ioctl_get_clock(struct kvm *kvm, void __user *argp)
6711 {
6712         struct kvm_clock_data data = { 0 };
6713
6714         get_kvmclock(kvm, &data);
6715         if (copy_to_user(argp, &data, sizeof(data)))
6716                 return -EFAULT;
6717
6718         return 0;
6719 }
6720
6721 static int kvm_vm_ioctl_set_clock(struct kvm *kvm, void __user *argp)
6722 {
6723         struct kvm_arch *ka = &kvm->arch;
6724         struct kvm_clock_data data;
6725         u64 now_raw_ns;
6726
6727         if (copy_from_user(&data, argp, sizeof(data)))
6728                 return -EFAULT;
6729
6730         /*
6731          * Only KVM_CLOCK_REALTIME is used, but allow passing the
6732          * result of KVM_GET_CLOCK back to KVM_SET_CLOCK.
6733          */
6734         if (data.flags & ~KVM_CLOCK_VALID_FLAGS)
6735                 return -EINVAL;
6736
6737         kvm_hv_request_tsc_page_update(kvm);
6738         kvm_start_pvclock_update(kvm);
6739         pvclock_update_vm_gtod_copy(kvm);
6740
6741         /*
6742          * This pairs with kvm_guest_time_update(): when masterclock is
6743          * in use, we use master_kernel_ns + kvmclock_offset to set
6744          * unsigned 'system_time' so if we use get_kvmclock_ns() (which
6745          * is slightly ahead) here we risk going negative on unsigned
6746          * 'system_time' when 'data.clock' is very small.
6747          */
6748         if (data.flags & KVM_CLOCK_REALTIME) {
6749                 u64 now_real_ns = ktime_get_real_ns();
6750
6751                 /*
6752                  * Avoid stepping the kvmclock backwards.
6753                  */
6754                 if (now_real_ns > data.realtime)
6755                         data.clock += now_real_ns - data.realtime;
6756         }
6757
6758         if (ka->use_master_clock)
6759                 now_raw_ns = ka->master_kernel_ns;
6760         else
6761                 now_raw_ns = get_kvmclock_base_ns();
6762         ka->kvmclock_offset = data.clock - now_raw_ns;
6763         kvm_end_pvclock_update(kvm);
6764         return 0;
6765 }
6766
6767 int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
6768 {
6769         struct kvm *kvm = filp->private_data;
6770         void __user *argp = (void __user *)arg;
6771         int r = -ENOTTY;
6772         /*
6773          * This union makes it completely explicit to gcc-3.x
6774          * that these two variables' stack usage should be
6775          * combined, not added together.
6776          */
6777         union {
6778                 struct kvm_pit_state ps;
6779                 struct kvm_pit_state2 ps2;
6780                 struct kvm_pit_config pit_config;
6781         } u;
6782
6783         switch (ioctl) {
6784         case KVM_SET_TSS_ADDR:
6785                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
6786                 break;
6787         case KVM_SET_IDENTITY_MAP_ADDR: {
6788                 u64 ident_addr;
6789
6790                 mutex_lock(&kvm->lock);
6791                 r = -EINVAL;
6792                 if (kvm->created_vcpus)
6793                         goto set_identity_unlock;
6794                 r = -EFAULT;
6795                 if (copy_from_user(&ident_addr, argp, sizeof(ident_addr)))
6796                         goto set_identity_unlock;
6797                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
6798 set_identity_unlock:
6799                 mutex_unlock(&kvm->lock);
6800                 break;
6801         }
6802         case KVM_SET_NR_MMU_PAGES:
6803                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
6804                 break;
6805         case KVM_CREATE_IRQCHIP: {
6806                 mutex_lock(&kvm->lock);
6807
6808                 r = -EEXIST;
6809                 if (irqchip_in_kernel(kvm))
6810                         goto create_irqchip_unlock;
6811
6812                 r = -EINVAL;
6813                 if (kvm->created_vcpus)
6814                         goto create_irqchip_unlock;
6815
6816                 r = kvm_pic_init(kvm);
6817                 if (r)
6818                         goto create_irqchip_unlock;
6819
6820                 r = kvm_ioapic_init(kvm);
6821                 if (r) {
6822                         kvm_pic_destroy(kvm);
6823                         goto create_irqchip_unlock;
6824                 }
6825
6826                 r = kvm_setup_default_irq_routing(kvm);
6827                 if (r) {
6828                         kvm_ioapic_destroy(kvm);
6829                         kvm_pic_destroy(kvm);
6830                         goto create_irqchip_unlock;
6831                 }
6832                 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */
6833                 smp_wmb();
6834                 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
6835                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_ABSENT);
6836         create_irqchip_unlock:
6837                 mutex_unlock(&kvm->lock);
6838                 break;
6839         }
6840         case KVM_CREATE_PIT:
6841                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
6842                 goto create_pit;
6843         case KVM_CREATE_PIT2:
6844                 r = -EFAULT;
6845                 if (copy_from_user(&u.pit_config, argp,
6846                                    sizeof(struct kvm_pit_config)))
6847                         goto out;
6848         create_pit:
6849                 mutex_lock(&kvm->lock);
6850                 r = -EEXIST;
6851                 if (kvm->arch.vpit)
6852                         goto create_pit_unlock;
6853                 r = -ENOMEM;
6854                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
6855                 if (kvm->arch.vpit)
6856                         r = 0;
6857         create_pit_unlock:
6858                 mutex_unlock(&kvm->lock);
6859                 break;
6860         case KVM_GET_IRQCHIP: {
6861                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6862                 struct kvm_irqchip *chip;
6863
6864                 chip = memdup_user(argp, sizeof(*chip));
6865                 if (IS_ERR(chip)) {
6866                         r = PTR_ERR(chip);
6867                         goto out;
6868                 }
6869
6870                 r = -ENXIO;
6871                 if (!irqchip_kernel(kvm))
6872                         goto get_irqchip_out;
6873                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
6874                 if (r)
6875                         goto get_irqchip_out;
6876                 r = -EFAULT;
6877                 if (copy_to_user(argp, chip, sizeof(*chip)))
6878                         goto get_irqchip_out;
6879                 r = 0;
6880         get_irqchip_out:
6881                 kfree(chip);
6882                 break;
6883         }
6884         case KVM_SET_IRQCHIP: {
6885                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
6886                 struct kvm_irqchip *chip;
6887
6888                 chip = memdup_user(argp, sizeof(*chip));
6889                 if (IS_ERR(chip)) {
6890                         r = PTR_ERR(chip);
6891                         goto out;
6892                 }
6893
6894                 r = -ENXIO;
6895                 if (!irqchip_kernel(kvm))
6896                         goto set_irqchip_out;
6897                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
6898         set_irqchip_out:
6899                 kfree(chip);
6900                 break;
6901         }
6902         case KVM_GET_PIT: {
6903                 r = -EFAULT;
6904                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
6905                         goto out;
6906                 r = -ENXIO;
6907                 if (!kvm->arch.vpit)
6908                         goto out;
6909                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
6910                 if (r)
6911                         goto out;
6912                 r = -EFAULT;
6913                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
6914                         goto out;
6915                 r = 0;
6916                 break;
6917         }
6918         case KVM_SET_PIT: {
6919                 r = -EFAULT;
6920                 if (copy_from_user(&u.ps, argp, sizeof(u.ps)))
6921                         goto out;
6922                 mutex_lock(&kvm->lock);
6923                 r = -ENXIO;
6924                 if (!kvm->arch.vpit)
6925                         goto set_pit_out;
6926                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
6927 set_pit_out:
6928                 mutex_unlock(&kvm->lock);
6929                 break;
6930         }
6931         case KVM_GET_PIT2: {
6932                 r = -ENXIO;
6933                 if (!kvm->arch.vpit)
6934                         goto out;
6935                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
6936                 if (r)
6937                         goto out;
6938                 r = -EFAULT;
6939                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
6940                         goto out;
6941                 r = 0;
6942                 break;
6943         }
6944         case KVM_SET_PIT2: {
6945                 r = -EFAULT;
6946                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
6947                         goto out;
6948                 mutex_lock(&kvm->lock);
6949                 r = -ENXIO;
6950                 if (!kvm->arch.vpit)
6951                         goto set_pit2_out;
6952                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
6953 set_pit2_out:
6954                 mutex_unlock(&kvm->lock);
6955                 break;
6956         }
6957         case KVM_REINJECT_CONTROL: {
6958                 struct kvm_reinject_control control;
6959                 r =  -EFAULT;
6960                 if (copy_from_user(&control, argp, sizeof(control)))
6961                         goto out;
6962                 r = -ENXIO;
6963                 if (!kvm->arch.vpit)
6964                         goto out;
6965                 r = kvm_vm_ioctl_reinject(kvm, &control);
6966                 break;
6967         }
6968         case KVM_SET_BOOT_CPU_ID:
6969                 r = 0;
6970                 mutex_lock(&kvm->lock);
6971                 if (kvm->created_vcpus)
6972                         r = -EBUSY;
6973                 else
6974                         kvm->arch.bsp_vcpu_id = arg;
6975                 mutex_unlock(&kvm->lock);
6976                 break;
6977 #ifdef CONFIG_KVM_XEN
6978         case KVM_XEN_HVM_CONFIG: {
6979                 struct kvm_xen_hvm_config xhc;
6980                 r = -EFAULT;
6981                 if (copy_from_user(&xhc, argp, sizeof(xhc)))
6982                         goto out;
6983                 r = kvm_xen_hvm_config(kvm, &xhc);
6984                 break;
6985         }
6986         case KVM_XEN_HVM_GET_ATTR: {
6987                 struct kvm_xen_hvm_attr xha;
6988
6989                 r = -EFAULT;
6990                 if (copy_from_user(&xha, argp, sizeof(xha)))
6991                         goto out;
6992                 r = kvm_xen_hvm_get_attr(kvm, &xha);
6993                 if (!r && copy_to_user(argp, &xha, sizeof(xha)))
6994                         r = -EFAULT;
6995                 break;
6996         }
6997         case KVM_XEN_HVM_SET_ATTR: {
6998                 struct kvm_xen_hvm_attr xha;
6999
7000                 r = -EFAULT;
7001                 if (copy_from_user(&xha, argp, sizeof(xha)))
7002                         goto out;
7003                 r = kvm_xen_hvm_set_attr(kvm, &xha);
7004                 break;
7005         }
7006         case KVM_XEN_HVM_EVTCHN_SEND: {
7007                 struct kvm_irq_routing_xen_evtchn uxe;
7008
7009                 r = -EFAULT;
7010                 if (copy_from_user(&uxe, argp, sizeof(uxe)))
7011                         goto out;
7012                 r = kvm_xen_hvm_evtchn_send(kvm, &uxe);
7013                 break;
7014         }
7015 #endif
7016         case KVM_SET_CLOCK:
7017                 r = kvm_vm_ioctl_set_clock(kvm, argp);
7018                 break;
7019         case KVM_GET_CLOCK:
7020                 r = kvm_vm_ioctl_get_clock(kvm, argp);
7021                 break;
7022         case KVM_SET_TSC_KHZ: {
7023                 u32 user_tsc_khz;
7024
7025                 r = -EINVAL;
7026                 user_tsc_khz = (u32)arg;
7027
7028                 if (kvm_caps.has_tsc_control &&
7029                     user_tsc_khz >= kvm_caps.max_guest_tsc_khz)
7030                         goto out;
7031
7032                 if (user_tsc_khz == 0)
7033                         user_tsc_khz = tsc_khz;
7034
7035                 WRITE_ONCE(kvm->arch.default_tsc_khz, user_tsc_khz);
7036                 r = 0;
7037
7038                 goto out;
7039         }
7040         case KVM_GET_TSC_KHZ: {
7041                 r = READ_ONCE(kvm->arch.default_tsc_khz);
7042                 goto out;
7043         }
7044         case KVM_MEMORY_ENCRYPT_OP: {
7045                 r = -ENOTTY;
7046                 if (!kvm_x86_ops.mem_enc_ioctl)
7047                         goto out;
7048
7049                 r = static_call(kvm_x86_mem_enc_ioctl)(kvm, argp);
7050                 break;
7051         }
7052         case KVM_MEMORY_ENCRYPT_REG_REGION: {
7053                 struct kvm_enc_region region;
7054
7055                 r = -EFAULT;
7056                 if (copy_from_user(&region, argp, sizeof(region)))
7057                         goto out;
7058
7059                 r = -ENOTTY;
7060                 if (!kvm_x86_ops.mem_enc_register_region)
7061                         goto out;
7062
7063                 r = static_call(kvm_x86_mem_enc_register_region)(kvm, &region);
7064                 break;
7065         }
7066         case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
7067                 struct kvm_enc_region region;
7068
7069                 r = -EFAULT;
7070                 if (copy_from_user(&region, argp, sizeof(region)))
7071                         goto out;
7072
7073                 r = -ENOTTY;
7074                 if (!kvm_x86_ops.mem_enc_unregister_region)
7075                         goto out;
7076
7077                 r = static_call(kvm_x86_mem_enc_unregister_region)(kvm, &region);
7078                 break;
7079         }
7080         case KVM_HYPERV_EVENTFD: {
7081                 struct kvm_hyperv_eventfd hvevfd;
7082
7083                 r = -EFAULT;
7084                 if (copy_from_user(&hvevfd, argp, sizeof(hvevfd)))
7085                         goto out;
7086                 r = kvm_vm_ioctl_hv_eventfd(kvm, &hvevfd);
7087                 break;
7088         }
7089         case KVM_SET_PMU_EVENT_FILTER:
7090                 r = kvm_vm_ioctl_set_pmu_event_filter(kvm, argp);
7091                 break;
7092         case KVM_X86_SET_MSR_FILTER: {
7093                 struct kvm_msr_filter __user *user_msr_filter = argp;
7094                 struct kvm_msr_filter filter;
7095
7096                 if (copy_from_user(&filter, user_msr_filter, sizeof(filter)))
7097                         return -EFAULT;
7098
7099                 r = kvm_vm_ioctl_set_msr_filter(kvm, &filter);
7100                 break;
7101         }
7102         default:
7103                 r = -ENOTTY;
7104         }
7105 out:
7106         return r;
7107 }
7108
7109 static void kvm_probe_feature_msr(u32 msr_index)
7110 {
7111         struct kvm_msr_entry msr = {
7112                 .index = msr_index,
7113         };
7114
7115         if (kvm_get_msr_feature(&msr))
7116                 return;
7117
7118         msr_based_features[num_msr_based_features++] = msr_index;
7119 }
7120
7121 static void kvm_probe_msr_to_save(u32 msr_index)
7122 {
7123         u32 dummy[2];
7124
7125         if (rdmsr_safe(msr_index, &dummy[0], &dummy[1]))
7126                 return;
7127
7128         /*
7129          * Even MSRs that are valid in the host may not be exposed to guests in
7130          * some cases.
7131          */
7132         switch (msr_index) {
7133         case MSR_IA32_BNDCFGS:
7134                 if (!kvm_mpx_supported())
7135                         return;
7136                 break;
7137         case MSR_TSC_AUX:
7138                 if (!kvm_cpu_cap_has(X86_FEATURE_RDTSCP) &&
7139                     !kvm_cpu_cap_has(X86_FEATURE_RDPID))
7140                         return;
7141                 break;
7142         case MSR_IA32_UMWAIT_CONTROL:
7143                 if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG))
7144                         return;
7145                 break;
7146         case MSR_IA32_RTIT_CTL:
7147         case MSR_IA32_RTIT_STATUS:
7148                 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT))
7149                         return;
7150                 break;
7151         case MSR_IA32_RTIT_CR3_MATCH:
7152                 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7153                     !intel_pt_validate_hw_cap(PT_CAP_cr3_filtering))
7154                         return;
7155                 break;
7156         case MSR_IA32_RTIT_OUTPUT_BASE:
7157         case MSR_IA32_RTIT_OUTPUT_MASK:
7158                 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7159                     (!intel_pt_validate_hw_cap(PT_CAP_topa_output) &&
7160                      !intel_pt_validate_hw_cap(PT_CAP_single_range_output)))
7161                         return;
7162                 break;
7163         case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B:
7164                 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT) ||
7165                     (msr_index - MSR_IA32_RTIT_ADDR0_A >=
7166                      intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
7167                         return;
7168                 break;
7169         case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX:
7170                 if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
7171                     kvm_pmu_cap.num_counters_gp)
7172                         return;
7173                 break;
7174         case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX:
7175                 if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
7176                     kvm_pmu_cap.num_counters_gp)
7177                         return;
7178                 break;
7179         case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX:
7180                 if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >=
7181                     kvm_pmu_cap.num_counters_fixed)
7182                         return;
7183                 break;
7184         case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
7185         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS:
7186         case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
7187                 if (!kvm_cpu_cap_has(X86_FEATURE_PERFMON_V2))
7188                         return;
7189                 break;
7190         case MSR_IA32_XFD:
7191         case MSR_IA32_XFD_ERR:
7192                 if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
7193                         return;
7194                 break;
7195         case MSR_IA32_TSX_CTRL:
7196                 if (!(kvm_get_arch_capabilities() & ARCH_CAP_TSX_CTRL_MSR))
7197                         return;
7198                 break;
7199         default:
7200                 break;
7201         }
7202
7203         msrs_to_save[num_msrs_to_save++] = msr_index;
7204 }
7205
7206 static void kvm_init_msr_lists(void)
7207 {
7208         unsigned i;
7209
7210         BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3,
7211                          "Please update the fixed PMCs in msrs_to_save_pmu[]");
7212
7213         num_msrs_to_save = 0;
7214         num_emulated_msrs = 0;
7215         num_msr_based_features = 0;
7216
7217         for (i = 0; i < ARRAY_SIZE(msrs_to_save_base); i++)
7218                 kvm_probe_msr_to_save(msrs_to_save_base[i]);
7219
7220         if (enable_pmu) {
7221                 for (i = 0; i < ARRAY_SIZE(msrs_to_save_pmu); i++)
7222                         kvm_probe_msr_to_save(msrs_to_save_pmu[i]);
7223         }
7224
7225         for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
7226                 if (!static_call(kvm_x86_has_emulated_msr)(NULL, emulated_msrs_all[i]))
7227                         continue;
7228
7229                 emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
7230         }
7231
7232         for (i = KVM_FIRST_EMULATED_VMX_MSR; i <= KVM_LAST_EMULATED_VMX_MSR; i++)
7233                 kvm_probe_feature_msr(i);
7234
7235         for (i = 0; i < ARRAY_SIZE(msr_based_features_all_except_vmx); i++)
7236                 kvm_probe_feature_msr(msr_based_features_all_except_vmx[i]);
7237 }
7238
7239 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
7240                            const void *v)
7241 {
7242         int handled = 0;
7243         int n;
7244
7245         do {
7246                 n = min(len, 8);
7247                 if (!(lapic_in_kernel(vcpu) &&
7248                       !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
7249                     && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
7250                         break;
7251                 handled += n;
7252                 addr += n;
7253                 len -= n;
7254                 v += n;
7255         } while (len);
7256
7257         return handled;
7258 }
7259
7260 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
7261 {
7262         int handled = 0;
7263         int n;
7264
7265         do {
7266                 n = min(len, 8);
7267                 if (!(lapic_in_kernel(vcpu) &&
7268                       !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
7269                                          addr, n, v))
7270                     && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
7271                         break;
7272                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
7273                 handled += n;
7274                 addr += n;
7275                 len -= n;
7276                 v += n;
7277         } while (len);
7278
7279         return handled;
7280 }
7281
7282 void kvm_set_segment(struct kvm_vcpu *vcpu,
7283                      struct kvm_segment *var, int seg)
7284 {
7285         static_call(kvm_x86_set_segment)(vcpu, var, seg);
7286 }
7287
7288 void kvm_get_segment(struct kvm_vcpu *vcpu,
7289                      struct kvm_segment *var, int seg)
7290 {
7291         static_call(kvm_x86_get_segment)(vcpu, var, seg);
7292 }
7293
7294 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
7295                            struct x86_exception *exception)
7296 {
7297         struct kvm_mmu *mmu = vcpu->arch.mmu;
7298         gpa_t t_gpa;
7299
7300         BUG_ON(!mmu_is_nested(vcpu));
7301
7302         /* NPT walks are always user-walks */
7303         access |= PFERR_USER_MASK;
7304         t_gpa  = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
7305
7306         return t_gpa;
7307 }
7308
7309 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
7310                               struct x86_exception *exception)
7311 {
7312         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7313
7314         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7315         return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7316 }
7317 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
7318
7319 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
7320                                struct x86_exception *exception)
7321 {
7322         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7323
7324         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7325         access |= PFERR_WRITE_MASK;
7326         return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7327 }
7328 EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
7329
7330 /* uses this to access any guest's mapped memory without checking CPL */
7331 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
7332                                 struct x86_exception *exception)
7333 {
7334         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7335
7336         return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
7337 }
7338
7339 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7340                                       struct kvm_vcpu *vcpu, u64 access,
7341                                       struct x86_exception *exception)
7342 {
7343         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7344         void *data = val;
7345         int r = X86EMUL_CONTINUE;
7346
7347         while (bytes) {
7348                 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7349                 unsigned offset = addr & (PAGE_SIZE-1);
7350                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
7351                 int ret;
7352
7353                 if (gpa == INVALID_GPA)
7354                         return X86EMUL_PROPAGATE_FAULT;
7355                 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data,
7356                                                offset, toread);
7357                 if (ret < 0) {
7358                         r = X86EMUL_IO_NEEDED;
7359                         goto out;
7360                 }
7361
7362                 bytes -= toread;
7363                 data += toread;
7364                 addr += toread;
7365         }
7366 out:
7367         return r;
7368 }
7369
7370 /* used for instruction fetching */
7371 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
7372                                 gva_t addr, void *val, unsigned int bytes,
7373                                 struct x86_exception *exception)
7374 {
7375         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7376         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7377         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7378         unsigned offset;
7379         int ret;
7380
7381         /* Inline kvm_read_guest_virt_helper for speed.  */
7382         gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
7383                                     exception);
7384         if (unlikely(gpa == INVALID_GPA))
7385                 return X86EMUL_PROPAGATE_FAULT;
7386
7387         offset = addr & (PAGE_SIZE-1);
7388         if (WARN_ON(offset + bytes > PAGE_SIZE))
7389                 bytes = (unsigned)PAGE_SIZE - offset;
7390         ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val,
7391                                        offset, bytes);
7392         if (unlikely(ret < 0))
7393                 return X86EMUL_IO_NEEDED;
7394
7395         return X86EMUL_CONTINUE;
7396 }
7397
7398 int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
7399                                gva_t addr, void *val, unsigned int bytes,
7400                                struct x86_exception *exception)
7401 {
7402         u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
7403
7404         /*
7405          * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
7406          * is returned, but our callers are not ready for that and they blindly
7407          * call kvm_inject_page_fault.  Ensure that they at least do not leak
7408          * uninitialized kernel stack memory into cr2 and error code.
7409          */
7410         memset(exception, 0, sizeof(*exception));
7411         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
7412                                           exception);
7413 }
7414 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
7415
7416 static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
7417                              gva_t addr, void *val, unsigned int bytes,
7418                              struct x86_exception *exception, bool system)
7419 {
7420         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7421         u64 access = 0;
7422
7423         if (system)
7424                 access |= PFERR_IMPLICIT_ACCESS;
7425         else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
7426                 access |= PFERR_USER_MASK;
7427
7428         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
7429 }
7430
7431 static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
7432                                       struct kvm_vcpu *vcpu, u64 access,
7433                                       struct x86_exception *exception)
7434 {
7435         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7436         void *data = val;
7437         int r = X86EMUL_CONTINUE;
7438
7439         while (bytes) {
7440                 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
7441                 unsigned offset = addr & (PAGE_SIZE-1);
7442                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
7443                 int ret;
7444
7445                 if (gpa == INVALID_GPA)
7446                         return X86EMUL_PROPAGATE_FAULT;
7447                 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite);
7448                 if (ret < 0) {
7449                         r = X86EMUL_IO_NEEDED;
7450                         goto out;
7451                 }
7452
7453                 bytes -= towrite;
7454                 data += towrite;
7455                 addr += towrite;
7456         }
7457 out:
7458         return r;
7459 }
7460
7461 static int emulator_write_std(struct x86_emulate_ctxt *ctxt, gva_t addr, void *val,
7462                               unsigned int bytes, struct x86_exception *exception,
7463                               bool system)
7464 {
7465         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7466         u64 access = PFERR_WRITE_MASK;
7467
7468         if (system)
7469                 access |= PFERR_IMPLICIT_ACCESS;
7470         else if (static_call(kvm_x86_get_cpl)(vcpu) == 3)
7471                 access |= PFERR_USER_MASK;
7472
7473         return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7474                                            access, exception);
7475 }
7476
7477 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
7478                                 unsigned int bytes, struct x86_exception *exception)
7479 {
7480         /* kvm_write_guest_virt_system can pull in tons of pages. */
7481         vcpu->arch.l1tf_flush_l1d = true;
7482
7483         return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
7484                                            PFERR_WRITE_MASK, exception);
7485 }
7486 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
7487
7488 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
7489                                 void *insn, int insn_len)
7490 {
7491         return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
7492                                                             insn, insn_len);
7493 }
7494
7495 int handle_ud(struct kvm_vcpu *vcpu)
7496 {
7497         static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
7498         int fep_flags = READ_ONCE(force_emulation_prefix);
7499         int emul_type = EMULTYPE_TRAP_UD;
7500         char sig[5]; /* ud2; .ascii "kvm" */
7501         struct x86_exception e;
7502
7503         if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
7504                 return 1;
7505
7506         if (fep_flags &&
7507             kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
7508                                 sig, sizeof(sig), &e) == 0 &&
7509             memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
7510                 if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
7511                         kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
7512                 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
7513                 emul_type = EMULTYPE_TRAP_UD_FORCED;
7514         }
7515
7516         return kvm_emulate_instruction(vcpu, emul_type);
7517 }
7518 EXPORT_SYMBOL_GPL(handle_ud);
7519
7520 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7521                             gpa_t gpa, bool write)
7522 {
7523         /* For APIC access vmexit */
7524         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7525                 return 1;
7526
7527         if (vcpu_match_mmio_gpa(vcpu, gpa)) {
7528                 trace_vcpu_match_mmio(gva, gpa, write, true);
7529                 return 1;
7530         }
7531
7532         return 0;
7533 }
7534
7535 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
7536                                 gpa_t *gpa, struct x86_exception *exception,
7537                                 bool write)
7538 {
7539         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
7540         u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
7541                 | (write ? PFERR_WRITE_MASK : 0);
7542
7543         /*
7544          * currently PKRU is only applied to ept enabled guest so
7545          * there is no pkey in EPT page table for L1 guest or EPT
7546          * shadow page table for L2 guest.
7547          */
7548         if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) ||
7549             !permission_fault(vcpu, vcpu->arch.walk_mmu,
7550                               vcpu->arch.mmio_access, 0, access))) {
7551                 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
7552                                         (gva & (PAGE_SIZE - 1));
7553                 trace_vcpu_match_mmio(gva, *gpa, write, false);
7554                 return 1;
7555         }
7556
7557         *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
7558
7559         if (*gpa == INVALID_GPA)
7560                 return -1;
7561
7562         return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write);
7563 }
7564
7565 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
7566                         const void *val, int bytes)
7567 {
7568         int ret;
7569
7570         ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
7571         if (ret < 0)
7572                 return 0;
7573         kvm_page_track_write(vcpu, gpa, val, bytes);
7574         return 1;
7575 }
7576
7577 struct read_write_emulator_ops {
7578         int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
7579                                   int bytes);
7580         int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
7581                                   void *val, int bytes);
7582         int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7583                                int bytes, void *val);
7584         int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7585                                     void *val, int bytes);
7586         bool write;
7587 };
7588
7589 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
7590 {
7591         if (vcpu->mmio_read_completed) {
7592                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
7593                                vcpu->mmio_fragments[0].gpa, val);
7594                 vcpu->mmio_read_completed = 0;
7595                 return 1;
7596         }
7597
7598         return 0;
7599 }
7600
7601 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7602                         void *val, int bytes)
7603 {
7604         return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes);
7605 }
7606
7607 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
7608                          void *val, int bytes)
7609 {
7610         return emulator_write_phys(vcpu, gpa, val, bytes);
7611 }
7612
7613 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
7614 {
7615         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
7616         return vcpu_mmio_write(vcpu, gpa, bytes, val);
7617 }
7618
7619 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7620                           void *val, int bytes)
7621 {
7622         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
7623         return X86EMUL_IO_NEEDED;
7624 }
7625
7626 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
7627                            void *val, int bytes)
7628 {
7629         struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
7630
7631         memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
7632         return X86EMUL_CONTINUE;
7633 }
7634
7635 static const struct read_write_emulator_ops read_emultor = {
7636         .read_write_prepare = read_prepare,
7637         .read_write_emulate = read_emulate,
7638         .read_write_mmio = vcpu_mmio_read,
7639         .read_write_exit_mmio = read_exit_mmio,
7640 };
7641
7642 static const struct read_write_emulator_ops write_emultor = {
7643         .read_write_emulate = write_emulate,
7644         .read_write_mmio = write_mmio,
7645         .read_write_exit_mmio = write_exit_mmio,
7646         .write = true,
7647 };
7648
7649 static int emulator_read_write_onepage(unsigned long addr, void *val,
7650                                        unsigned int bytes,
7651                                        struct x86_exception *exception,
7652                                        struct kvm_vcpu *vcpu,
7653                                        const struct read_write_emulator_ops *ops)
7654 {
7655         gpa_t gpa;
7656         int handled, ret;
7657         bool write = ops->write;
7658         struct kvm_mmio_fragment *frag;
7659         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
7660
7661         /*
7662          * If the exit was due to a NPF we may already have a GPA.
7663          * If the GPA is present, use it to avoid the GVA to GPA table walk.
7664          * Note, this cannot be used on string operations since string
7665          * operation using rep will only have the initial GPA from the NPF
7666          * occurred.
7667          */
7668         if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) &&
7669             (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) {
7670                 gpa = ctxt->gpa_val;
7671                 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
7672         } else {
7673                 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
7674                 if (ret < 0)
7675                         return X86EMUL_PROPAGATE_FAULT;
7676         }
7677
7678         if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
7679                 return X86EMUL_CONTINUE;
7680
7681         /*
7682          * Is this MMIO handled locally?
7683          */
7684         handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
7685         if (handled == bytes)
7686                 return X86EMUL_CONTINUE;
7687
7688         gpa += handled;
7689         bytes -= handled;
7690         val += handled;
7691
7692         WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
7693         frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
7694         frag->gpa = gpa;
7695         frag->data = val;
7696         frag->len = bytes;
7697         return X86EMUL_CONTINUE;
7698 }
7699
7700 static int emulator_read_write(struct x86_emulate_ctxt *ctxt,
7701                         unsigned long addr,
7702                         void *val, unsigned int bytes,
7703                         struct x86_exception *exception,
7704                         const struct read_write_emulator_ops *ops)
7705 {
7706         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7707         gpa_t gpa;
7708         int rc;
7709
7710         if (ops->read_write_prepare &&
7711                   ops->read_write_prepare(vcpu, val, bytes))
7712                 return X86EMUL_CONTINUE;
7713
7714         vcpu->mmio_nr_fragments = 0;
7715
7716         /* Crossing a page boundary? */
7717         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
7718                 int now;
7719
7720                 now = -addr & ~PAGE_MASK;
7721                 rc = emulator_read_write_onepage(addr, val, now, exception,
7722                                                  vcpu, ops);
7723
7724                 if (rc != X86EMUL_CONTINUE)
7725                         return rc;
7726                 addr += now;
7727                 if (ctxt->mode != X86EMUL_MODE_PROT64)
7728                         addr = (u32)addr;
7729                 val += now;
7730                 bytes -= now;
7731         }
7732
7733         rc = emulator_read_write_onepage(addr, val, bytes, exception,
7734                                          vcpu, ops);
7735         if (rc != X86EMUL_CONTINUE)
7736                 return rc;
7737
7738         if (!vcpu->mmio_nr_fragments)
7739                 return rc;
7740
7741         gpa = vcpu->mmio_fragments[0].gpa;
7742
7743         vcpu->mmio_needed = 1;
7744         vcpu->mmio_cur_fragment = 0;
7745
7746         vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
7747         vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
7748         vcpu->run->exit_reason = KVM_EXIT_MMIO;
7749         vcpu->run->mmio.phys_addr = gpa;
7750
7751         return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
7752 }
7753
7754 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
7755                                   unsigned long addr,
7756                                   void *val,
7757                                   unsigned int bytes,
7758                                   struct x86_exception *exception)
7759 {
7760         return emulator_read_write(ctxt, addr, val, bytes,
7761                                    exception, &read_emultor);
7762 }
7763
7764 static int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
7765                             unsigned long addr,
7766                             const void *val,
7767                             unsigned int bytes,
7768                             struct x86_exception *exception)
7769 {
7770         return emulator_read_write(ctxt, addr, (void *)val, bytes,
7771                                    exception, &write_emultor);
7772 }
7773
7774 #define emulator_try_cmpxchg_user(t, ptr, old, new) \
7775         (__try_cmpxchg_user((t __user *)(ptr), (t *)(old), *(t *)(new), efault ## t))
7776
7777 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
7778                                      unsigned long addr,
7779                                      const void *old,
7780                                      const void *new,
7781                                      unsigned int bytes,
7782                                      struct x86_exception *exception)
7783 {
7784         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7785         u64 page_line_mask;
7786         unsigned long hva;
7787         gpa_t gpa;
7788         int r;
7789
7790         /* guests cmpxchg8b have to be emulated atomically */
7791         if (bytes > 8 || (bytes & (bytes - 1)))
7792                 goto emul_write;
7793
7794         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
7795
7796         if (gpa == INVALID_GPA ||
7797             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
7798                 goto emul_write;
7799
7800         /*
7801          * Emulate the atomic as a straight write to avoid #AC if SLD is
7802          * enabled in the host and the access splits a cache line.
7803          */
7804         if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
7805                 page_line_mask = ~(cache_line_size() - 1);
7806         else
7807                 page_line_mask = PAGE_MASK;
7808
7809         if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask))
7810                 goto emul_write;
7811
7812         hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa));
7813         if (kvm_is_error_hva(hva))
7814                 goto emul_write;
7815
7816         hva += offset_in_page(gpa);
7817
7818         switch (bytes) {
7819         case 1:
7820                 r = emulator_try_cmpxchg_user(u8, hva, old, new);
7821                 break;
7822         case 2:
7823                 r = emulator_try_cmpxchg_user(u16, hva, old, new);
7824                 break;
7825         case 4:
7826                 r = emulator_try_cmpxchg_user(u32, hva, old, new);
7827                 break;
7828         case 8:
7829                 r = emulator_try_cmpxchg_user(u64, hva, old, new);
7830                 break;
7831         default:
7832                 BUG();
7833         }
7834
7835         if (r < 0)
7836                 return X86EMUL_UNHANDLEABLE;
7837         if (r)
7838                 return X86EMUL_CMPXCHG_FAILED;
7839
7840         kvm_page_track_write(vcpu, gpa, new, bytes);
7841
7842         return X86EMUL_CONTINUE;
7843
7844 emul_write:
7845         pr_warn_once("emulating exchange as write\n");
7846
7847         return emulator_write_emulated(ctxt, addr, new, bytes, exception);
7848 }
7849
7850 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
7851                                unsigned short port, void *data,
7852                                unsigned int count, bool in)
7853 {
7854         unsigned i;
7855         int r;
7856
7857         WARN_ON_ONCE(vcpu->arch.pio.count);
7858         for (i = 0; i < count; i++) {
7859                 if (in)
7860                         r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data);
7861                 else
7862                         r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data);
7863
7864                 if (r) {
7865                         if (i == 0)
7866                                 goto userspace_io;
7867
7868                         /*
7869                          * Userspace must have unregistered the device while PIO
7870                          * was running.  Drop writes / read as 0.
7871                          */
7872                         if (in)
7873                                 memset(data, 0, size * (count - i));
7874                         break;
7875                 }
7876
7877                 data += size;
7878         }
7879         return 1;
7880
7881 userspace_io:
7882         vcpu->arch.pio.port = port;
7883         vcpu->arch.pio.in = in;
7884         vcpu->arch.pio.count = count;
7885         vcpu->arch.pio.size = size;
7886
7887         if (in)
7888                 memset(vcpu->arch.pio_data, 0, size * count);
7889         else
7890                 memcpy(vcpu->arch.pio_data, data, size * count);
7891
7892         vcpu->run->exit_reason = KVM_EXIT_IO;
7893         vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
7894         vcpu->run->io.size = size;
7895         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
7896         vcpu->run->io.count = count;
7897         vcpu->run->io.port = port;
7898         return 0;
7899 }
7900
7901 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size,
7902                            unsigned short port, void *val, unsigned int count)
7903 {
7904         int r = emulator_pio_in_out(vcpu, size, port, val, count, true);
7905         if (r)
7906                 trace_kvm_pio(KVM_PIO_IN, port, size, count, val);
7907
7908         return r;
7909 }
7910
7911 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val)
7912 {
7913         int size = vcpu->arch.pio.size;
7914         unsigned int count = vcpu->arch.pio.count;
7915         memcpy(val, vcpu->arch.pio_data, size * count);
7916         trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data);
7917         vcpu->arch.pio.count = 0;
7918 }
7919
7920 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
7921                                     int size, unsigned short port, void *val,
7922                                     unsigned int count)
7923 {
7924         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
7925         if (vcpu->arch.pio.count) {
7926                 /*
7927                  * Complete a previous iteration that required userspace I/O.
7928                  * Note, @count isn't guaranteed to match pio.count as userspace
7929                  * can modify ECX before rerunning the vCPU.  Ignore any such
7930                  * shenanigans as KVM doesn't support modifying the rep count,
7931                  * and the emulator ensures @count doesn't overflow the buffer.
7932                  */
7933                 complete_emulator_pio_in(vcpu, val);
7934                 return 1;
7935         }
7936
7937         return emulator_pio_in(vcpu, size, port, val, count);
7938 }
7939
7940 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size,
7941                             unsigned short port, const void *val,
7942                             unsigned int count)
7943 {
7944         trace_kvm_pio(KVM_PIO_OUT, port, size, count, val);
7945         return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
7946 }
7947
7948 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
7949                                      int size, unsigned short port,
7950                                      const void *val, unsigned int count)
7951 {
7952         return emulator_pio_out(emul_to_vcpu(ctxt), size, port, val, count);
7953 }
7954
7955 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
7956 {
7957         return static_call(kvm_x86_get_segment_base)(vcpu, seg);
7958 }
7959
7960 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
7961 {
7962         kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
7963 }
7964
7965 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu)
7966 {
7967         if (!need_emulate_wbinvd(vcpu))
7968                 return X86EMUL_CONTINUE;
7969
7970         if (static_call(kvm_x86_has_wbinvd_exit)()) {
7971                 int cpu = get_cpu();
7972
7973                 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
7974                 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask,
7975                                 wbinvd_ipi, NULL, 1);
7976                 put_cpu();
7977                 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
7978         } else
7979                 wbinvd();
7980         return X86EMUL_CONTINUE;
7981 }
7982
7983 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
7984 {
7985         kvm_emulate_wbinvd_noskip(vcpu);
7986         return kvm_skip_emulated_instruction(vcpu);
7987 }
7988 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
7989
7990
7991
7992 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
7993 {
7994         kvm_emulate_wbinvd_noskip(emul_to_vcpu(ctxt));
7995 }
7996
7997 static void emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
7998                             unsigned long *dest)
7999 {
8000         kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
8001 }
8002
8003 static int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
8004                            unsigned long value)
8005 {
8006
8007         return kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
8008 }
8009
8010 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
8011 {
8012         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
8013 }
8014
8015 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
8016 {
8017         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8018         unsigned long value;
8019
8020         switch (cr) {
8021         case 0:
8022                 value = kvm_read_cr0(vcpu);
8023                 break;
8024         case 2:
8025                 value = vcpu->arch.cr2;
8026                 break;
8027         case 3:
8028                 value = kvm_read_cr3(vcpu);
8029                 break;
8030         case 4:
8031                 value = kvm_read_cr4(vcpu);
8032                 break;
8033         case 8:
8034                 value = kvm_get_cr8(vcpu);
8035                 break;
8036         default:
8037                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8038                 return 0;
8039         }
8040
8041         return value;
8042 }
8043
8044 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
8045 {
8046         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8047         int res = 0;
8048
8049         switch (cr) {
8050         case 0:
8051                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
8052                 break;
8053         case 2:
8054                 vcpu->arch.cr2 = val;
8055                 break;
8056         case 3:
8057                 res = kvm_set_cr3(vcpu, val);
8058                 break;
8059         case 4:
8060                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
8061                 break;
8062         case 8:
8063                 res = kvm_set_cr8(vcpu, val);
8064                 break;
8065         default:
8066                 kvm_err("%s: unexpected cr %u\n", __func__, cr);
8067                 res = -1;
8068         }
8069
8070         return res;
8071 }
8072
8073 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
8074 {
8075         return static_call(kvm_x86_get_cpl)(emul_to_vcpu(ctxt));
8076 }
8077
8078 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8079 {
8080         static_call(kvm_x86_get_gdt)(emul_to_vcpu(ctxt), dt);
8081 }
8082
8083 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8084 {
8085         static_call(kvm_x86_get_idt)(emul_to_vcpu(ctxt), dt);
8086 }
8087
8088 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8089 {
8090         static_call(kvm_x86_set_gdt)(emul_to_vcpu(ctxt), dt);
8091 }
8092
8093 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
8094 {
8095         static_call(kvm_x86_set_idt)(emul_to_vcpu(ctxt), dt);
8096 }
8097
8098 static unsigned long emulator_get_cached_segment_base(
8099         struct x86_emulate_ctxt *ctxt, int seg)
8100 {
8101         return get_segment_base(emul_to_vcpu(ctxt), seg);
8102 }
8103
8104 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
8105                                  struct desc_struct *desc, u32 *base3,
8106                                  int seg)
8107 {
8108         struct kvm_segment var;
8109
8110         kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
8111         *selector = var.selector;
8112
8113         if (var.unusable) {
8114                 memset(desc, 0, sizeof(*desc));
8115                 if (base3)
8116                         *base3 = 0;
8117                 return false;
8118         }
8119
8120         if (var.g)
8121                 var.limit >>= 12;
8122         set_desc_limit(desc, var.limit);
8123         set_desc_base(desc, (unsigned long)var.base);
8124 #ifdef CONFIG_X86_64
8125         if (base3)
8126                 *base3 = var.base >> 32;
8127 #endif
8128         desc->type = var.type;
8129         desc->s = var.s;
8130         desc->dpl = var.dpl;
8131         desc->p = var.present;
8132         desc->avl = var.avl;
8133         desc->l = var.l;
8134         desc->d = var.db;
8135         desc->g = var.g;
8136
8137         return true;
8138 }
8139
8140 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
8141                                  struct desc_struct *desc, u32 base3,
8142                                  int seg)
8143 {
8144         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8145         struct kvm_segment var;
8146
8147         var.selector = selector;
8148         var.base = get_desc_base(desc);
8149 #ifdef CONFIG_X86_64
8150         var.base |= ((u64)base3) << 32;
8151 #endif
8152         var.limit = get_desc_limit(desc);
8153         if (desc->g)
8154                 var.limit = (var.limit << 12) | 0xfff;
8155         var.type = desc->type;
8156         var.dpl = desc->dpl;
8157         var.db = desc->d;
8158         var.s = desc->s;
8159         var.l = desc->l;
8160         var.g = desc->g;
8161         var.avl = desc->avl;
8162         var.present = desc->p;
8163         var.unusable = !var.present;
8164         var.padding = 0;
8165
8166         kvm_set_segment(vcpu, &var, seg);
8167         return;
8168 }
8169
8170 static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8171                                         u32 msr_index, u64 *pdata)
8172 {
8173         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8174         int r;
8175
8176         r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
8177         if (r < 0)
8178                 return X86EMUL_UNHANDLEABLE;
8179
8180         if (r) {
8181                 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
8182                                        complete_emulated_rdmsr, r))
8183                         return X86EMUL_IO_NEEDED;
8184
8185                 trace_kvm_msr_read_ex(msr_index);
8186                 return X86EMUL_PROPAGATE_FAULT;
8187         }
8188
8189         trace_kvm_msr_read(msr_index, *pdata);
8190         return X86EMUL_CONTINUE;
8191 }
8192
8193 static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
8194                                         u32 msr_index, u64 data)
8195 {
8196         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8197         int r;
8198
8199         r = kvm_set_msr_with_filter(vcpu, msr_index, data);
8200         if (r < 0)
8201                 return X86EMUL_UNHANDLEABLE;
8202
8203         if (r) {
8204                 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
8205                                        complete_emulated_msr_access, r))
8206                         return X86EMUL_IO_NEEDED;
8207
8208                 trace_kvm_msr_write_ex(msr_index, data);
8209                 return X86EMUL_PROPAGATE_FAULT;
8210         }
8211
8212         trace_kvm_msr_write(msr_index, data);
8213         return X86EMUL_CONTINUE;
8214 }
8215
8216 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
8217                             u32 msr_index, u64 *pdata)
8218 {
8219         return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
8220 }
8221
8222 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
8223                               u32 pmc)
8224 {
8225         if (kvm_pmu_is_valid_rdpmc_ecx(emul_to_vcpu(ctxt), pmc))
8226                 return 0;
8227         return -EINVAL;
8228 }
8229
8230 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
8231                              u32 pmc, u64 *pdata)
8232 {
8233         return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
8234 }
8235
8236 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
8237 {
8238         emul_to_vcpu(ctxt)->arch.halt_request = 1;
8239 }
8240
8241 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
8242                               struct x86_instruction_info *info,
8243                               enum x86_intercept_stage stage)
8244 {
8245         return static_call(kvm_x86_check_intercept)(emul_to_vcpu(ctxt), info, stage,
8246                                             &ctxt->exception);
8247 }
8248
8249 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
8250                               u32 *eax, u32 *ebx, u32 *ecx, u32 *edx,
8251                               bool exact_only)
8252 {
8253         return kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx, exact_only);
8254 }
8255
8256 static bool emulator_guest_has_movbe(struct x86_emulate_ctxt *ctxt)
8257 {
8258         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_MOVBE);
8259 }
8260
8261 static bool emulator_guest_has_fxsr(struct x86_emulate_ctxt *ctxt)
8262 {
8263         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_FXSR);
8264 }
8265
8266 static bool emulator_guest_has_rdpid(struct x86_emulate_ctxt *ctxt)
8267 {
8268         return guest_cpuid_has(emul_to_vcpu(ctxt), X86_FEATURE_RDPID);
8269 }
8270
8271 static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
8272 {
8273         return kvm_register_read_raw(emul_to_vcpu(ctxt), reg);
8274 }
8275
8276 static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
8277 {
8278         kvm_register_write_raw(emul_to_vcpu(ctxt), reg, val);
8279 }
8280
8281 static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
8282 {
8283         static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
8284 }
8285
8286 static bool emulator_is_smm(struct x86_emulate_ctxt *ctxt)
8287 {
8288         return is_smm(emul_to_vcpu(ctxt));
8289 }
8290
8291 static bool emulator_is_guest_mode(struct x86_emulate_ctxt *ctxt)
8292 {
8293         return is_guest_mode(emul_to_vcpu(ctxt));
8294 }
8295
8296 #ifndef CONFIG_KVM_SMM
8297 static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
8298 {
8299         WARN_ON_ONCE(1);
8300         return X86EMUL_UNHANDLEABLE;
8301 }
8302 #endif
8303
8304 static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
8305 {
8306         kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
8307 }
8308
8309 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8310 {
8311         return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
8312 }
8313
8314 static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt)
8315 {
8316         struct kvm *kvm = emul_to_vcpu(ctxt)->kvm;
8317
8318         if (!kvm->vm_bugged)
8319                 kvm_vm_bugged(kvm);
8320 }
8321
8322 static const struct x86_emulate_ops emulate_ops = {
8323         .vm_bugged           = emulator_vm_bugged,
8324         .read_gpr            = emulator_read_gpr,
8325         .write_gpr           = emulator_write_gpr,
8326         .read_std            = emulator_read_std,
8327         .write_std           = emulator_write_std,
8328         .fetch               = kvm_fetch_guest_virt,
8329         .read_emulated       = emulator_read_emulated,
8330         .write_emulated      = emulator_write_emulated,
8331         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
8332         .invlpg              = emulator_invlpg,
8333         .pio_in_emulated     = emulator_pio_in_emulated,
8334         .pio_out_emulated    = emulator_pio_out_emulated,
8335         .get_segment         = emulator_get_segment,
8336         .set_segment         = emulator_set_segment,
8337         .get_cached_segment_base = emulator_get_cached_segment_base,
8338         .get_gdt             = emulator_get_gdt,
8339         .get_idt             = emulator_get_idt,
8340         .set_gdt             = emulator_set_gdt,
8341         .set_idt             = emulator_set_idt,
8342         .get_cr              = emulator_get_cr,
8343         .set_cr              = emulator_set_cr,
8344         .cpl                 = emulator_get_cpl,
8345         .get_dr              = emulator_get_dr,
8346         .set_dr              = emulator_set_dr,
8347         .set_msr_with_filter = emulator_set_msr_with_filter,
8348         .get_msr_with_filter = emulator_get_msr_with_filter,
8349         .get_msr             = emulator_get_msr,
8350         .check_pmc           = emulator_check_pmc,
8351         .read_pmc            = emulator_read_pmc,
8352         .halt                = emulator_halt,
8353         .wbinvd              = emulator_wbinvd,
8354         .fix_hypercall       = emulator_fix_hypercall,
8355         .intercept           = emulator_intercept,
8356         .get_cpuid           = emulator_get_cpuid,
8357         .guest_has_movbe     = emulator_guest_has_movbe,
8358         .guest_has_fxsr      = emulator_guest_has_fxsr,
8359         .guest_has_rdpid     = emulator_guest_has_rdpid,
8360         .set_nmi_mask        = emulator_set_nmi_mask,
8361         .is_smm              = emulator_is_smm,
8362         .is_guest_mode       = emulator_is_guest_mode,
8363         .leave_smm           = emulator_leave_smm,
8364         .triple_fault        = emulator_triple_fault,
8365         .set_xcr             = emulator_set_xcr,
8366 };
8367
8368 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
8369 {
8370         u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
8371         /*
8372          * an sti; sti; sequence only disable interrupts for the first
8373          * instruction. So, if the last instruction, be it emulated or
8374          * not, left the system with the INT_STI flag enabled, it
8375          * means that the last instruction is an sti. We should not
8376          * leave the flag on in this case. The same goes for mov ss
8377          */
8378         if (int_shadow & mask)
8379                 mask = 0;
8380         if (unlikely(int_shadow || mask)) {
8381                 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask);
8382                 if (!mask)
8383                         kvm_make_request(KVM_REQ_EVENT, vcpu);
8384         }
8385 }
8386
8387 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
8388 {
8389         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8390
8391         if (ctxt->exception.vector == PF_VECTOR)
8392                 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
8393         else if (ctxt->exception.error_code_valid)
8394                 kvm_queue_exception_e(vcpu, ctxt->exception.vector,
8395                                       ctxt->exception.error_code);
8396         else
8397                 kvm_queue_exception(vcpu, ctxt->exception.vector);
8398 }
8399
8400 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
8401 {
8402         struct x86_emulate_ctxt *ctxt;
8403
8404         ctxt = kmem_cache_zalloc(x86_emulator_cache, GFP_KERNEL_ACCOUNT);
8405         if (!ctxt) {
8406                 pr_err("failed to allocate vcpu's emulator\n");
8407                 return NULL;
8408         }
8409
8410         ctxt->vcpu = vcpu;
8411         ctxt->ops = &emulate_ops;
8412         vcpu->arch.emulate_ctxt = ctxt;
8413
8414         return ctxt;
8415 }
8416
8417 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
8418 {
8419         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8420         int cs_db, cs_l;
8421
8422         static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l);
8423
8424         ctxt->gpa_available = false;
8425         ctxt->eflags = kvm_get_rflags(vcpu);
8426         ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0;
8427
8428         ctxt->eip = kvm_rip_read(vcpu);
8429         ctxt->mode = (!is_protmode(vcpu))               ? X86EMUL_MODE_REAL :
8430                      (ctxt->eflags & X86_EFLAGS_VM)     ? X86EMUL_MODE_VM86 :
8431                      (cs_l && is_long_mode(vcpu))       ? X86EMUL_MODE_PROT64 :
8432                      cs_db                              ? X86EMUL_MODE_PROT32 :
8433                                                           X86EMUL_MODE_PROT16;
8434         ctxt->interruptibility = 0;
8435         ctxt->have_exception = false;
8436         ctxt->exception.vector = -1;
8437         ctxt->perm_ok = false;
8438
8439         init_decode_cache(ctxt);
8440         vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8441 }
8442
8443 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
8444 {
8445         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8446         int ret;
8447
8448         init_emulate_ctxt(vcpu);
8449
8450         ctxt->op_bytes = 2;
8451         ctxt->ad_bytes = 2;
8452         ctxt->_eip = ctxt->eip + inc_eip;
8453         ret = emulate_int_real(ctxt, irq);
8454
8455         if (ret != X86EMUL_CONTINUE) {
8456                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8457         } else {
8458                 ctxt->eip = ctxt->_eip;
8459                 kvm_rip_write(vcpu, ctxt->eip);
8460                 kvm_set_rflags(vcpu, ctxt->eflags);
8461         }
8462 }
8463 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
8464
8465 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8466                                            u8 ndata, u8 *insn_bytes, u8 insn_size)
8467 {
8468         struct kvm_run *run = vcpu->run;
8469         u64 info[5];
8470         u8 info_start;
8471
8472         /*
8473          * Zero the whole array used to retrieve the exit info, as casting to
8474          * u32 for select entries will leave some chunks uninitialized.
8475          */
8476         memset(&info, 0, sizeof(info));
8477
8478         static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1],
8479                                            &info[2], (u32 *)&info[3],
8480                                            (u32 *)&info[4]);
8481
8482         run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
8483         run->emulation_failure.suberror = KVM_INTERNAL_ERROR_EMULATION;
8484
8485         /*
8486          * There's currently space for 13 entries, but 5 are used for the exit
8487          * reason and info.  Restrict to 4 to reduce the maintenance burden
8488          * when expanding kvm_run.emulation_failure in the future.
8489          */
8490         if (WARN_ON_ONCE(ndata > 4))
8491                 ndata = 4;
8492
8493         /* Always include the flags as a 'data' entry. */
8494         info_start = 1;
8495         run->emulation_failure.flags = 0;
8496
8497         if (insn_size) {
8498                 BUILD_BUG_ON((sizeof(run->emulation_failure.insn_size) +
8499                               sizeof(run->emulation_failure.insn_bytes) != 16));
8500                 info_start += 2;
8501                 run->emulation_failure.flags |=
8502                         KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES;
8503                 run->emulation_failure.insn_size = insn_size;
8504                 memset(run->emulation_failure.insn_bytes, 0x90,
8505                        sizeof(run->emulation_failure.insn_bytes));
8506                 memcpy(run->emulation_failure.insn_bytes, insn_bytes, insn_size);
8507         }
8508
8509         memcpy(&run->internal.data[info_start], info, sizeof(info));
8510         memcpy(&run->internal.data[info_start + ARRAY_SIZE(info)], data,
8511                ndata * sizeof(data[0]));
8512
8513         run->emulation_failure.ndata = info_start + ARRAY_SIZE(info) + ndata;
8514 }
8515
8516 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu)
8517 {
8518         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8519
8520         prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data,
8521                                        ctxt->fetch.end - ctxt->fetch.data);
8522 }
8523
8524 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data,
8525                                           u8 ndata)
8526 {
8527         prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0);
8528 }
8529 EXPORT_SYMBOL_GPL(__kvm_prepare_emulation_failure_exit);
8530
8531 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu)
8532 {
8533         __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0);
8534 }
8535 EXPORT_SYMBOL_GPL(kvm_prepare_emulation_failure_exit);
8536
8537 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type)
8538 {
8539         struct kvm *kvm = vcpu->kvm;
8540
8541         ++vcpu->stat.insn_emulation_fail;
8542         trace_kvm_emulate_insn_failed(vcpu);
8543
8544         if (emulation_type & EMULTYPE_VMWARE_GP) {
8545                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8546                 return 1;
8547         }
8548
8549         if (kvm->arch.exit_on_emulation_error ||
8550             (emulation_type & EMULTYPE_SKIP)) {
8551                 prepare_emulation_ctxt_failure_exit(vcpu);
8552                 return 0;
8553         }
8554
8555         kvm_queue_exception(vcpu, UD_VECTOR);
8556
8557         if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) {
8558                 prepare_emulation_ctxt_failure_exit(vcpu);
8559                 return 0;
8560         }
8561
8562         return 1;
8563 }
8564
8565 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
8566                                   int emulation_type)
8567 {
8568         gpa_t gpa = cr2_or_gpa;
8569         kvm_pfn_t pfn;
8570
8571         if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
8572                 return false;
8573
8574         if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
8575             WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
8576                 return false;
8577
8578         if (!vcpu->arch.mmu->root_role.direct) {
8579                 /*
8580                  * Write permission should be allowed since only
8581                  * write access need to be emulated.
8582                  */
8583                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
8584
8585                 /*
8586                  * If the mapping is invalid in guest, let cpu retry
8587                  * it to generate fault.
8588                  */
8589                 if (gpa == INVALID_GPA)
8590                         return true;
8591         }
8592
8593         /*
8594          * Do not retry the unhandleable instruction if it faults on the
8595          * readonly host memory, otherwise it will goto a infinite loop:
8596          * retry instruction -> write #PF -> emulation fail -> retry
8597          * instruction -> ...
8598          */
8599         pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
8600
8601         /*
8602          * If the instruction failed on the error pfn, it can not be fixed,
8603          * report the error to userspace.
8604          */
8605         if (is_error_noslot_pfn(pfn))
8606                 return false;
8607
8608         kvm_release_pfn_clean(pfn);
8609
8610         /* The instructions are well-emulated on direct mmu. */
8611         if (vcpu->arch.mmu->root_role.direct) {
8612                 unsigned int indirect_shadow_pages;
8613
8614                 write_lock(&vcpu->kvm->mmu_lock);
8615                 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
8616                 write_unlock(&vcpu->kvm->mmu_lock);
8617
8618                 if (indirect_shadow_pages)
8619                         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8620
8621                 return true;
8622         }
8623
8624         /*
8625          * if emulation was due to access to shadowed page table
8626          * and it failed try to unshadow page and re-enter the
8627          * guest to let CPU execute the instruction.
8628          */
8629         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8630
8631         /*
8632          * If the access faults on its page table, it can not
8633          * be fixed by unprotecting shadow page and it should
8634          * be reported to userspace.
8635          */
8636         return !(emulation_type & EMULTYPE_WRITE_PF_TO_SP);
8637 }
8638
8639 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
8640                               gpa_t cr2_or_gpa,  int emulation_type)
8641 {
8642         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
8643         unsigned long last_retry_eip, last_retry_addr, gpa = cr2_or_gpa;
8644
8645         last_retry_eip = vcpu->arch.last_retry_eip;
8646         last_retry_addr = vcpu->arch.last_retry_addr;
8647
8648         /*
8649          * If the emulation is caused by #PF and it is non-page_table
8650          * writing instruction, it means the VM-EXIT is caused by shadow
8651          * page protected, we can zap the shadow page and retry this
8652          * instruction directly.
8653          *
8654          * Note: if the guest uses a non-page-table modifying instruction
8655          * on the PDE that points to the instruction, then we will unmap
8656          * the instruction and go to an infinite loop. So, we cache the
8657          * last retried eip and the last fault address, if we meet the eip
8658          * and the address again, we can break out of the potential infinite
8659          * loop.
8660          */
8661         vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
8662
8663         if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
8664                 return false;
8665
8666         if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
8667             WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
8668                 return false;
8669
8670         if (x86_page_table_writing_insn(ctxt))
8671                 return false;
8672
8673         if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa)
8674                 return false;
8675
8676         vcpu->arch.last_retry_eip = ctxt->eip;
8677         vcpu->arch.last_retry_addr = cr2_or_gpa;
8678
8679         if (!vcpu->arch.mmu->root_role.direct)
8680                 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL);
8681
8682         kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
8683
8684         return true;
8685 }
8686
8687 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
8688 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
8689
8690 static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
8691                                 unsigned long *db)
8692 {
8693         u32 dr6 = 0;
8694         int i;
8695         u32 enable, rwlen;
8696
8697         enable = dr7;
8698         rwlen = dr7 >> 16;
8699         for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
8700                 if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
8701                         dr6 |= (1 << i);
8702         return dr6;
8703 }
8704
8705 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
8706 {
8707         struct kvm_run *kvm_run = vcpu->run;
8708
8709         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
8710                 kvm_run->debug.arch.dr6 = DR6_BS | DR6_ACTIVE_LOW;
8711                 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
8712                 kvm_run->debug.arch.exception = DB_VECTOR;
8713                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
8714                 return 0;
8715         }
8716         kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
8717         return 1;
8718 }
8719
8720 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
8721 {
8722         unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
8723         int r;
8724
8725         r = static_call(kvm_x86_skip_emulated_instruction)(vcpu);
8726         if (unlikely(!r))
8727                 return 0;
8728
8729         kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
8730
8731         /*
8732          * rflags is the old, "raw" value of the flags.  The new value has
8733          * not been saved yet.
8734          *
8735          * This is correct even for TF set by the guest, because "the
8736          * processor will not generate this exception after the instruction
8737          * that sets the TF flag".
8738          */
8739         if (unlikely(rflags & X86_EFLAGS_TF))
8740                 r = kvm_vcpu_do_singlestep(vcpu);
8741         return r;
8742 }
8743 EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
8744
8745 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
8746 {
8747         u32 shadow;
8748
8749         if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
8750                 return true;
8751
8752         /*
8753          * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active,
8754          * but AMD CPUs do not.  MOV/POP SS blocking is rare, check that first
8755          * to avoid the relatively expensive CPUID lookup.
8756          */
8757         shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
8758         return (shadow & KVM_X86_SHADOW_INT_MOV_SS) &&
8759                guest_cpuid_is_intel(vcpu);
8760 }
8761
8762 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
8763                                            int emulation_type, int *r)
8764 {
8765         WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
8766
8767         /*
8768          * Do not check for code breakpoints if hardware has already done the
8769          * checks, as inferred from the emulation type.  On NO_DECODE and SKIP,
8770          * the instruction has passed all exception checks, and all intercepted
8771          * exceptions that trigger emulation have lower priority than code
8772          * breakpoints, i.e. the fact that the intercepted exception occurred
8773          * means any code breakpoints have already been serviced.
8774          *
8775          * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
8776          * hardware has checked the RIP of the magic prefix, but not the RIP of
8777          * the instruction being emulated.  The intent of forced emulation is
8778          * to behave as if KVM intercepted the instruction without an exception
8779          * and without a prefix.
8780          */
8781         if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
8782                               EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
8783                 return false;
8784
8785         if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
8786             (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
8787                 struct kvm_run *kvm_run = vcpu->run;
8788                 unsigned long eip = kvm_get_linear_rip(vcpu);
8789                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8790                                            vcpu->arch.guest_debug_dr7,
8791                                            vcpu->arch.eff_db);
8792
8793                 if (dr6 != 0) {
8794                         kvm_run->debug.arch.dr6 = dr6 | DR6_ACTIVE_LOW;
8795                         kvm_run->debug.arch.pc = eip;
8796                         kvm_run->debug.arch.exception = DB_VECTOR;
8797                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
8798                         *r = 0;
8799                         return true;
8800                 }
8801         }
8802
8803         if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
8804             !kvm_is_code_breakpoint_inhibited(vcpu)) {
8805                 unsigned long eip = kvm_get_linear_rip(vcpu);
8806                 u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
8807                                            vcpu->arch.dr7,
8808                                            vcpu->arch.db);
8809
8810                 if (dr6 != 0) {
8811                         kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
8812                         *r = 1;
8813                         return true;
8814                 }
8815         }
8816
8817         return false;
8818 }
8819
8820 static bool is_vmware_backdoor_opcode(struct x86_emulate_ctxt *ctxt)
8821 {
8822         switch (ctxt->opcode_len) {
8823         case 1:
8824                 switch (ctxt->b) {
8825                 case 0xe4:      /* IN */
8826                 case 0xe5:
8827                 case 0xec:
8828                 case 0xed:
8829                 case 0xe6:      /* OUT */
8830                 case 0xe7:
8831                 case 0xee:
8832                 case 0xef:
8833                 case 0x6c:      /* INS */
8834                 case 0x6d:
8835                 case 0x6e:      /* OUTS */
8836                 case 0x6f:
8837                         return true;
8838                 }
8839                 break;
8840         case 2:
8841                 switch (ctxt->b) {
8842                 case 0x33:      /* RDPMC */
8843                         return true;
8844                 }
8845                 break;
8846         }
8847
8848         return false;
8849 }
8850
8851 /*
8852  * Decode an instruction for emulation.  The caller is responsible for handling
8853  * code breakpoints.  Note, manually detecting code breakpoints is unnecessary
8854  * (and wrong) when emulating on an intercepted fault-like exception[*], as
8855  * code breakpoints have higher priority and thus have already been done by
8856  * hardware.
8857  *
8858  * [*] Except #MC, which is higher priority, but KVM should never emulate in
8859  *     response to a machine check.
8860  */
8861 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type,
8862                                     void *insn, int insn_len)
8863 {
8864         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8865         int r;
8866
8867         init_emulate_ctxt(vcpu);
8868
8869         r = x86_decode_insn(ctxt, insn, insn_len, emulation_type);
8870
8871         trace_kvm_emulate_insn_start(vcpu);
8872         ++vcpu->stat.insn_emulation;
8873
8874         return r;
8875 }
8876 EXPORT_SYMBOL_GPL(x86_decode_emulated_instruction);
8877
8878 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
8879                             int emulation_type, void *insn, int insn_len)
8880 {
8881         int r;
8882         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
8883         bool writeback = true;
8884
8885         if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
8886                 return 1;
8887
8888         vcpu->arch.l1tf_flush_l1d = true;
8889
8890         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
8891                 kvm_clear_exception_queue(vcpu);
8892
8893                 /*
8894                  * Return immediately if RIP hits a code breakpoint, such #DBs
8895                  * are fault-like and are higher priority than any faults on
8896                  * the code fetch itself.
8897                  */
8898                 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
8899                         return r;
8900
8901                 r = x86_decode_emulated_instruction(vcpu, emulation_type,
8902                                                     insn, insn_len);
8903                 if (r != EMULATION_OK)  {
8904                         if ((emulation_type & EMULTYPE_TRAP_UD) ||
8905                             (emulation_type & EMULTYPE_TRAP_UD_FORCED)) {
8906                                 kvm_queue_exception(vcpu, UD_VECTOR);
8907                                 return 1;
8908                         }
8909                         if (reexecute_instruction(vcpu, cr2_or_gpa,
8910                                                   emulation_type))
8911                                 return 1;
8912
8913                         if (ctxt->have_exception &&
8914                             !(emulation_type & EMULTYPE_SKIP)) {
8915                                 /*
8916                                  * #UD should result in just EMULATION_FAILED, and trap-like
8917                                  * exception should not be encountered during decode.
8918                                  */
8919                                 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
8920                                              exception_type(ctxt->exception.vector) == EXCPT_TRAP);
8921                                 inject_emulated_exception(vcpu);
8922                                 return 1;
8923                         }
8924                         return handle_emulation_failure(vcpu, emulation_type);
8925                 }
8926         }
8927
8928         if ((emulation_type & EMULTYPE_VMWARE_GP) &&
8929             !is_vmware_backdoor_opcode(ctxt)) {
8930                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
8931                 return 1;
8932         }
8933
8934         /*
8935          * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
8936          * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
8937          * The caller is responsible for updating interruptibility state and
8938          * injecting single-step #DBs.
8939          */
8940         if (emulation_type & EMULTYPE_SKIP) {
8941                 if (ctxt->mode != X86EMUL_MODE_PROT64)
8942                         ctxt->eip = (u32)ctxt->_eip;
8943                 else
8944                         ctxt->eip = ctxt->_eip;
8945
8946                 if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
8947                         r = 1;
8948                         goto writeback;
8949                 }
8950
8951                 kvm_rip_write(vcpu, ctxt->eip);
8952                 if (ctxt->eflags & X86_EFLAGS_RF)
8953                         kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
8954                 return 1;
8955         }
8956
8957         if (retry_instruction(ctxt, cr2_or_gpa, emulation_type))
8958                 return 1;
8959
8960         /* this is needed for vmware backdoor interface to work since it
8961            changes registers values  during IO operation */
8962         if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
8963                 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
8964                 emulator_invalidate_register_cache(ctxt);
8965         }
8966
8967 restart:
8968         if (emulation_type & EMULTYPE_PF) {
8969                 /* Save the faulting GPA (cr2) in the address field */
8970                 ctxt->exception.address = cr2_or_gpa;
8971
8972                 /* With shadow page tables, cr2 contains a GVA or nGPA. */
8973                 if (vcpu->arch.mmu->root_role.direct) {
8974                         ctxt->gpa_available = true;
8975                         ctxt->gpa_val = cr2_or_gpa;
8976                 }
8977         } else {
8978                 /* Sanitize the address out of an abundance of paranoia. */
8979                 ctxt->exception.address = 0;
8980         }
8981
8982         r = x86_emulate_insn(ctxt);
8983
8984         if (r == EMULATION_INTERCEPTED)
8985                 return 1;
8986
8987         if (r == EMULATION_FAILED) {
8988                 if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type))
8989                         return 1;
8990
8991                 return handle_emulation_failure(vcpu, emulation_type);
8992         }
8993
8994         if (ctxt->have_exception) {
8995                 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write);
8996                 vcpu->mmio_needed = false;
8997                 r = 1;
8998                 inject_emulated_exception(vcpu);
8999         } else if (vcpu->arch.pio.count) {
9000                 if (!vcpu->arch.pio.in) {
9001                         /* FIXME: return into emulator if single-stepping.  */
9002                         vcpu->arch.pio.count = 0;
9003                 } else {
9004                         writeback = false;
9005                         vcpu->arch.complete_userspace_io = complete_emulated_pio;
9006                 }
9007                 r = 0;
9008         } else if (vcpu->mmio_needed) {
9009                 ++vcpu->stat.mmio_exits;
9010
9011                 if (!vcpu->mmio_is_write)
9012                         writeback = false;
9013                 r = 0;
9014                 vcpu->arch.complete_userspace_io = complete_emulated_mmio;
9015         } else if (vcpu->arch.complete_userspace_io) {
9016                 writeback = false;
9017                 r = 0;
9018         } else if (r == EMULATION_RESTART)
9019                 goto restart;
9020         else
9021                 r = 1;
9022
9023 writeback:
9024         if (writeback) {
9025                 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
9026                 toggle_interruptibility(vcpu, ctxt->interruptibility);
9027                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
9028
9029                 /*
9030                  * Note, EXCPT_DB is assumed to be fault-like as the emulator
9031                  * only supports code breakpoints and general detect #DB, both
9032                  * of which are fault-like.
9033                  */
9034                 if (!ctxt->have_exception ||
9035                     exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
9036                         kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
9037                         if (ctxt->is_branch)
9038                                 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
9039                         kvm_rip_write(vcpu, ctxt->eip);
9040                         if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
9041                                 r = kvm_vcpu_do_singlestep(vcpu);
9042                         static_call_cond(kvm_x86_update_emulated_instruction)(vcpu);
9043                         __kvm_set_rflags(vcpu, ctxt->eflags);
9044                 }
9045
9046                 /*
9047                  * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
9048                  * do nothing, and it will be requested again as soon as
9049                  * the shadow expires.  But we still need to check here,
9050                  * because POPF has no interrupt shadow.
9051                  */
9052                 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
9053                         kvm_make_request(KVM_REQ_EVENT, vcpu);
9054         } else
9055                 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
9056
9057         return r;
9058 }
9059
9060 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type)
9061 {
9062         return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
9063 }
9064 EXPORT_SYMBOL_GPL(kvm_emulate_instruction);
9065
9066 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
9067                                         void *insn, int insn_len)
9068 {
9069         return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
9070 }
9071 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
9072
9073 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu)
9074 {
9075         vcpu->arch.pio.count = 0;
9076         return 1;
9077 }
9078
9079 static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
9080 {
9081         vcpu->arch.pio.count = 0;
9082
9083         if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
9084                 return 1;
9085
9086         return kvm_skip_emulated_instruction(vcpu);
9087 }
9088
9089 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
9090                             unsigned short port)
9091 {
9092         unsigned long val = kvm_rax_read(vcpu);
9093         int ret = emulator_pio_out(vcpu, size, port, &val, 1);
9094
9095         if (ret)
9096                 return ret;
9097
9098         /*
9099          * Workaround userspace that relies on old KVM behavior of %rip being
9100          * incremented prior to exiting to userspace to handle "OUT 0x7e".
9101          */
9102         if (port == 0x7e &&
9103             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) {
9104                 vcpu->arch.complete_userspace_io =
9105                         complete_fast_pio_out_port_0x7e;
9106                 kvm_skip_emulated_instruction(vcpu);
9107         } else {
9108                 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
9109                 vcpu->arch.complete_userspace_io = complete_fast_pio_out;
9110         }
9111         return 0;
9112 }
9113
9114 static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
9115 {
9116         unsigned long val;
9117
9118         /* We should only ever be called with arch.pio.count equal to 1 */
9119         BUG_ON(vcpu->arch.pio.count != 1);
9120
9121         if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
9122                 vcpu->arch.pio.count = 0;
9123                 return 1;
9124         }
9125
9126         /* For size less than 4 we merge, else we zero extend */
9127         val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0;
9128
9129         complete_emulator_pio_in(vcpu, &val);
9130         kvm_rax_write(vcpu, val);
9131
9132         return kvm_skip_emulated_instruction(vcpu);
9133 }
9134
9135 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
9136                            unsigned short port)
9137 {
9138         unsigned long val;
9139         int ret;
9140
9141         /* For size less than 4 we merge, else we zero extend */
9142         val = (size < 4) ? kvm_rax_read(vcpu) : 0;
9143
9144         ret = emulator_pio_in(vcpu, size, port, &val, 1);
9145         if (ret) {
9146                 kvm_rax_write(vcpu, val);
9147                 return ret;
9148         }
9149
9150         vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
9151         vcpu->arch.complete_userspace_io = complete_fast_pio_in;
9152
9153         return 0;
9154 }
9155
9156 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
9157 {
9158         int ret;
9159
9160         if (in)
9161                 ret = kvm_fast_pio_in(vcpu, size, port);
9162         else
9163                 ret = kvm_fast_pio_out(vcpu, size, port);
9164         return ret && kvm_skip_emulated_instruction(vcpu);
9165 }
9166 EXPORT_SYMBOL_GPL(kvm_fast_pio);
9167
9168 static int kvmclock_cpu_down_prep(unsigned int cpu)
9169 {
9170         __this_cpu_write(cpu_tsc_khz, 0);
9171         return 0;
9172 }
9173
9174 static void tsc_khz_changed(void *data)
9175 {
9176         struct cpufreq_freqs *freq = data;
9177         unsigned long khz;
9178
9179         WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_CONSTANT_TSC));
9180
9181         if (data)
9182                 khz = freq->new;
9183         else
9184                 khz = cpufreq_quick_get(raw_smp_processor_id());
9185         if (!khz)
9186                 khz = tsc_khz;
9187         __this_cpu_write(cpu_tsc_khz, khz);
9188 }
9189
9190 #ifdef CONFIG_X86_64
9191 static void kvm_hyperv_tsc_notifier(void)
9192 {
9193         struct kvm *kvm;
9194         int cpu;
9195
9196         mutex_lock(&kvm_lock);
9197         list_for_each_entry(kvm, &vm_list, vm_list)
9198                 kvm_make_mclock_inprogress_request(kvm);
9199
9200         /* no guest entries from this point */
9201         hyperv_stop_tsc_emulation();
9202
9203         /* TSC frequency always matches when on Hyper-V */
9204         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9205                 for_each_present_cpu(cpu)
9206                         per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
9207         }
9208         kvm_caps.max_guest_tsc_khz = tsc_khz;
9209
9210         list_for_each_entry(kvm, &vm_list, vm_list) {
9211                 __kvm_start_pvclock_update(kvm);
9212                 pvclock_update_vm_gtod_copy(kvm);
9213                 kvm_end_pvclock_update(kvm);
9214         }
9215
9216         mutex_unlock(&kvm_lock);
9217 }
9218 #endif
9219
9220 static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
9221 {
9222         struct kvm *kvm;
9223         struct kvm_vcpu *vcpu;
9224         int send_ipi = 0;
9225         unsigned long i;
9226
9227         /*
9228          * We allow guests to temporarily run on slowing clocks,
9229          * provided we notify them after, or to run on accelerating
9230          * clocks, provided we notify them before.  Thus time never
9231          * goes backwards.
9232          *
9233          * However, we have a problem.  We can't atomically update
9234          * the frequency of a given CPU from this function; it is
9235          * merely a notifier, which can be called from any CPU.
9236          * Changing the TSC frequency at arbitrary points in time
9237          * requires a recomputation of local variables related to
9238          * the TSC for each VCPU.  We must flag these local variables
9239          * to be updated and be sure the update takes place with the
9240          * new frequency before any guests proceed.
9241          *
9242          * Unfortunately, the combination of hotplug CPU and frequency
9243          * change creates an intractable locking scenario; the order
9244          * of when these callouts happen is undefined with respect to
9245          * CPU hotplug, and they can race with each other.  As such,
9246          * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
9247          * undefined; you can actually have a CPU frequency change take
9248          * place in between the computation of X and the setting of the
9249          * variable.  To protect against this problem, all updates of
9250          * the per_cpu tsc_khz variable are done in an interrupt
9251          * protected IPI, and all callers wishing to update the value
9252          * must wait for a synchronous IPI to complete (which is trivial
9253          * if the caller is on the CPU already).  This establishes the
9254          * necessary total order on variable updates.
9255          *
9256          * Note that because a guest time update may take place
9257          * anytime after the setting of the VCPU's request bit, the
9258          * correct TSC value must be set before the request.  However,
9259          * to ensure the update actually makes it to any guest which
9260          * starts running in hardware virtualization between the set
9261          * and the acquisition of the spinlock, we must also ping the
9262          * CPU after setting the request bit.
9263          *
9264          */
9265
9266         smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9267
9268         mutex_lock(&kvm_lock);
9269         list_for_each_entry(kvm, &vm_list, vm_list) {
9270                 kvm_for_each_vcpu(i, vcpu, kvm) {
9271                         if (vcpu->cpu != cpu)
9272                                 continue;
9273                         kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
9274                         if (vcpu->cpu != raw_smp_processor_id())
9275                                 send_ipi = 1;
9276                 }
9277         }
9278         mutex_unlock(&kvm_lock);
9279
9280         if (freq->old < freq->new && send_ipi) {
9281                 /*
9282                  * We upscale the frequency.  Must make the guest
9283                  * doesn't see old kvmclock values while running with
9284                  * the new frequency, otherwise we risk the guest sees
9285                  * time go backwards.
9286                  *
9287                  * In case we update the frequency for another cpu
9288                  * (which might be in guest context) send an interrupt
9289                  * to kick the cpu out of guest context.  Next time
9290                  * guest context is entered kvmclock will be updated,
9291                  * so the guest will not see stale values.
9292                  */
9293                 smp_call_function_single(cpu, tsc_khz_changed, freq, 1);
9294         }
9295 }
9296
9297 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
9298                                      void *data)
9299 {
9300         struct cpufreq_freqs *freq = data;
9301         int cpu;
9302
9303         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
9304                 return 0;
9305         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
9306                 return 0;
9307
9308         for_each_cpu(cpu, freq->policy->cpus)
9309                 __kvmclock_cpufreq_notifier(freq, cpu);
9310
9311         return 0;
9312 }
9313
9314 static struct notifier_block kvmclock_cpufreq_notifier_block = {
9315         .notifier_call  = kvmclock_cpufreq_notifier
9316 };
9317
9318 static int kvmclock_cpu_online(unsigned int cpu)
9319 {
9320         tsc_khz_changed(NULL);
9321         return 0;
9322 }
9323
9324 static void kvm_timer_init(void)
9325 {
9326         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9327                 max_tsc_khz = tsc_khz;
9328
9329                 if (IS_ENABLED(CONFIG_CPU_FREQ)) {
9330                         struct cpufreq_policy *policy;
9331                         int cpu;
9332
9333                         cpu = get_cpu();
9334                         policy = cpufreq_cpu_get(cpu);
9335                         if (policy) {
9336                                 if (policy->cpuinfo.max_freq)
9337                                         max_tsc_khz = policy->cpuinfo.max_freq;
9338                                 cpufreq_cpu_put(policy);
9339                         }
9340                         put_cpu();
9341                 }
9342                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
9343                                           CPUFREQ_TRANSITION_NOTIFIER);
9344
9345                 cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "x86/kvm/clk:online",
9346                                   kvmclock_cpu_online, kvmclock_cpu_down_prep);
9347         }
9348 }
9349
9350 #ifdef CONFIG_X86_64
9351 static void pvclock_gtod_update_fn(struct work_struct *work)
9352 {
9353         struct kvm *kvm;
9354         struct kvm_vcpu *vcpu;
9355         unsigned long i;
9356
9357         mutex_lock(&kvm_lock);
9358         list_for_each_entry(kvm, &vm_list, vm_list)
9359                 kvm_for_each_vcpu(i, vcpu, kvm)
9360                         kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
9361         atomic_set(&kvm_guest_has_master_clock, 0);
9362         mutex_unlock(&kvm_lock);
9363 }
9364
9365 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
9366
9367 /*
9368  * Indirection to move queue_work() out of the tk_core.seq write held
9369  * region to prevent possible deadlocks against time accessors which
9370  * are invoked with work related locks held.
9371  */
9372 static void pvclock_irq_work_fn(struct irq_work *w)
9373 {
9374         queue_work(system_long_wq, &pvclock_gtod_work);
9375 }
9376
9377 static DEFINE_IRQ_WORK(pvclock_irq_work, pvclock_irq_work_fn);
9378
9379 /*
9380  * Notification about pvclock gtod data update.
9381  */
9382 static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
9383                                void *priv)
9384 {
9385         struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
9386         struct timekeeper *tk = priv;
9387
9388         update_pvclock_gtod(tk);
9389
9390         /*
9391          * Disable master clock if host does not trust, or does not use,
9392          * TSC based clocksource. Delegate queue_work() to irq_work as
9393          * this is invoked with tk_core.seq write held.
9394          */
9395         if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) &&
9396             atomic_read(&kvm_guest_has_master_clock) != 0)
9397                 irq_work_queue(&pvclock_irq_work);
9398         return 0;
9399 }
9400
9401 static struct notifier_block pvclock_gtod_notifier = {
9402         .notifier_call = pvclock_gtod_notify,
9403 };
9404 #endif
9405
9406 static inline void kvm_ops_update(struct kvm_x86_init_ops *ops)
9407 {
9408         memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
9409
9410 #define __KVM_X86_OP(func) \
9411         static_call_update(kvm_x86_##func, kvm_x86_ops.func);
9412 #define KVM_X86_OP(func) \
9413         WARN_ON(!kvm_x86_ops.func); __KVM_X86_OP(func)
9414 #define KVM_X86_OP_OPTIONAL __KVM_X86_OP
9415 #define KVM_X86_OP_OPTIONAL_RET0(func) \
9416         static_call_update(kvm_x86_##func, (void *)kvm_x86_ops.func ? : \
9417                                            (void *)__static_call_return0);
9418 #include <asm/kvm-x86-ops.h>
9419 #undef __KVM_X86_OP
9420
9421         kvm_pmu_ops_update(ops->pmu_ops);
9422 }
9423
9424 static int kvm_x86_check_processor_compatibility(void)
9425 {
9426         int cpu = smp_processor_id();
9427         struct cpuinfo_x86 *c = &cpu_data(cpu);
9428
9429         /*
9430          * Compatibility checks are done when loading KVM and when enabling
9431          * hardware, e.g. during CPU hotplug, to ensure all online CPUs are
9432          * compatible, i.e. KVM should never perform a compatibility check on
9433          * an offline CPU.
9434          */
9435         WARN_ON(!cpu_online(cpu));
9436
9437         if (__cr4_reserved_bits(cpu_has, c) !=
9438             __cr4_reserved_bits(cpu_has, &boot_cpu_data))
9439                 return -EIO;
9440
9441         return static_call(kvm_x86_check_processor_compatibility)();
9442 }
9443
9444 static void kvm_x86_check_cpu_compat(void *ret)
9445 {
9446         *(int *)ret = kvm_x86_check_processor_compatibility();
9447 }
9448
9449 static int __kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
9450 {
9451         u64 host_pat;
9452         int r, cpu;
9453
9454         if (kvm_x86_ops.hardware_enable) {
9455                 pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
9456                 return -EEXIST;
9457         }
9458
9459         /*
9460          * KVM explicitly assumes that the guest has an FPU and
9461          * FXSAVE/FXRSTOR. For example, the KVM_GET_FPU explicitly casts the
9462          * vCPU's FPU state as a fxregs_state struct.
9463          */
9464         if (!boot_cpu_has(X86_FEATURE_FPU) || !boot_cpu_has(X86_FEATURE_FXSR)) {
9465                 pr_err("inadequate fpu\n");
9466                 return -EOPNOTSUPP;
9467         }
9468
9469         if (IS_ENABLED(CONFIG_PREEMPT_RT) && !boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9470                 pr_err("RT requires X86_FEATURE_CONSTANT_TSC\n");
9471                 return -EOPNOTSUPP;
9472         }
9473
9474         /*
9475          * KVM assumes that PAT entry '0' encodes WB memtype and simply zeroes
9476          * the PAT bits in SPTEs.  Bail if PAT[0] is programmed to something
9477          * other than WB.  Note, EPT doesn't utilize the PAT, but don't bother
9478          * with an exception.  PAT[0] is set to WB on RESET and also by the
9479          * kernel, i.e. failure indicates a kernel bug or broken firmware.
9480          */
9481         if (rdmsrl_safe(MSR_IA32_CR_PAT, &host_pat) ||
9482             (host_pat & GENMASK(2, 0)) != 6) {
9483                 pr_err("host PAT[0] is not WB\n");
9484                 return -EIO;
9485         }
9486
9487         x86_emulator_cache = kvm_alloc_emulator_cache();
9488         if (!x86_emulator_cache) {
9489                 pr_err("failed to allocate cache for x86 emulator\n");
9490                 return -ENOMEM;
9491         }
9492
9493         user_return_msrs = alloc_percpu(struct kvm_user_return_msrs);
9494         if (!user_return_msrs) {
9495                 pr_err("failed to allocate percpu kvm_user_return_msrs\n");
9496                 r = -ENOMEM;
9497                 goto out_free_x86_emulator_cache;
9498         }
9499         kvm_nr_uret_msrs = 0;
9500
9501         r = kvm_mmu_vendor_module_init();
9502         if (r)
9503                 goto out_free_percpu;
9504
9505         if (boot_cpu_has(X86_FEATURE_XSAVE)) {
9506                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
9507                 kvm_caps.supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
9508         }
9509
9510         rdmsrl_safe(MSR_EFER, &host_efer);
9511
9512         if (boot_cpu_has(X86_FEATURE_XSAVES))
9513                 rdmsrl(MSR_IA32_XSS, host_xss);
9514
9515         kvm_init_pmu_capability(ops->pmu_ops);
9516
9517         if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
9518                 rdmsrl(MSR_IA32_ARCH_CAPABILITIES, host_arch_capabilities);
9519
9520         r = ops->hardware_setup();
9521         if (r != 0)
9522                 goto out_mmu_exit;
9523
9524         kvm_ops_update(ops);
9525
9526         for_each_online_cpu(cpu) {
9527                 smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
9528                 if (r < 0)
9529                         goto out_unwind_ops;
9530         }
9531
9532         /*
9533          * Point of no return!  DO NOT add error paths below this point unless
9534          * absolutely necessary, as most operations from this point forward
9535          * require unwinding.
9536          */
9537         kvm_timer_init();
9538
9539         if (pi_inject_timer == -1)
9540                 pi_inject_timer = housekeeping_enabled(HK_TYPE_TIMER);
9541 #ifdef CONFIG_X86_64
9542         pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
9543
9544         if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9545                 set_hv_tscchange_cb(kvm_hyperv_tsc_notifier);
9546 #endif
9547
9548         kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
9549
9550         if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
9551                 kvm_caps.supported_xss = 0;
9552
9553 #define __kvm_cpu_cap_has(UNUSED_, f) kvm_cpu_cap_has(f)
9554         cr4_reserved_bits = __cr4_reserved_bits(__kvm_cpu_cap_has, UNUSED_);
9555 #undef __kvm_cpu_cap_has
9556
9557         if (kvm_caps.has_tsc_control) {
9558                 /*
9559                  * Make sure the user can only configure tsc_khz values that
9560                  * fit into a signed integer.
9561                  * A min value is not calculated because it will always
9562                  * be 1 on all machines.
9563                  */
9564                 u64 max = min(0x7fffffffULL,
9565                               __scale_tsc(kvm_caps.max_tsc_scaling_ratio, tsc_khz));
9566                 kvm_caps.max_guest_tsc_khz = max;
9567         }
9568         kvm_caps.default_tsc_scaling_ratio = 1ULL << kvm_caps.tsc_scaling_ratio_frac_bits;
9569         kvm_init_msr_lists();
9570         return 0;
9571
9572 out_unwind_ops:
9573         kvm_x86_ops.hardware_enable = NULL;
9574         static_call(kvm_x86_hardware_unsetup)();
9575 out_mmu_exit:
9576         kvm_mmu_vendor_module_exit();
9577 out_free_percpu:
9578         free_percpu(user_return_msrs);
9579 out_free_x86_emulator_cache:
9580         kmem_cache_destroy(x86_emulator_cache);
9581         return r;
9582 }
9583
9584 int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
9585 {
9586         int r;
9587
9588         mutex_lock(&vendor_module_lock);
9589         r = __kvm_x86_vendor_init(ops);
9590         mutex_unlock(&vendor_module_lock);
9591
9592         return r;
9593 }
9594 EXPORT_SYMBOL_GPL(kvm_x86_vendor_init);
9595
9596 void kvm_x86_vendor_exit(void)
9597 {
9598         kvm_unregister_perf_callbacks();
9599
9600 #ifdef CONFIG_X86_64
9601         if (hypervisor_is_type(X86_HYPER_MS_HYPERV))
9602                 clear_hv_tscchange_cb();
9603 #endif
9604         kvm_lapic_exit();
9605
9606         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
9607                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
9608                                             CPUFREQ_TRANSITION_NOTIFIER);
9609                 cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
9610         }
9611 #ifdef CONFIG_X86_64
9612         pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
9613         irq_work_sync(&pvclock_irq_work);
9614         cancel_work_sync(&pvclock_gtod_work);
9615 #endif
9616         static_call(kvm_x86_hardware_unsetup)();
9617         kvm_mmu_vendor_module_exit();
9618         free_percpu(user_return_msrs);
9619         kmem_cache_destroy(x86_emulator_cache);
9620 #ifdef CONFIG_KVM_XEN
9621         static_key_deferred_flush(&kvm_xen_enabled);
9622         WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
9623 #endif
9624         mutex_lock(&vendor_module_lock);
9625         kvm_x86_ops.hardware_enable = NULL;
9626         mutex_unlock(&vendor_module_lock);
9627 }
9628 EXPORT_SYMBOL_GPL(kvm_x86_vendor_exit);
9629
9630 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
9631 {
9632         /*
9633          * The vCPU has halted, e.g. executed HLT.  Update the run state if the
9634          * local APIC is in-kernel, the run loop will detect the non-runnable
9635          * state and halt the vCPU.  Exit to userspace if the local APIC is
9636          * managed by userspace, in which case userspace is responsible for
9637          * handling wake events.
9638          */
9639         ++vcpu->stat.halt_exits;
9640         if (lapic_in_kernel(vcpu)) {
9641                 vcpu->arch.mp_state = state;
9642                 return 1;
9643         } else {
9644                 vcpu->run->exit_reason = reason;
9645                 return 0;
9646         }
9647 }
9648
9649 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
9650 {
9651         return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
9652 }
9653 EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
9654
9655 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
9656 {
9657         int ret = kvm_skip_emulated_instruction(vcpu);
9658         /*
9659          * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
9660          * KVM_EXIT_DEBUG here.
9661          */
9662         return kvm_emulate_halt_noskip(vcpu) && ret;
9663 }
9664 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
9665
9666 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
9667 {
9668         int ret = kvm_skip_emulated_instruction(vcpu);
9669
9670         return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
9671                                         KVM_EXIT_AP_RESET_HOLD) && ret;
9672 }
9673 EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
9674
9675 #ifdef CONFIG_X86_64
9676 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr,
9677                                 unsigned long clock_type)
9678 {
9679         struct kvm_clock_pairing clock_pairing;
9680         struct timespec64 ts;
9681         u64 cycle;
9682         int ret;
9683
9684         if (clock_type != KVM_CLOCK_PAIRING_WALLCLOCK)
9685                 return -KVM_EOPNOTSUPP;
9686
9687         /*
9688          * When tsc is in permanent catchup mode guests won't be able to use
9689          * pvclock_read_retry loop to get consistent view of pvclock
9690          */
9691         if (vcpu->arch.tsc_always_catchup)
9692                 return -KVM_EOPNOTSUPP;
9693
9694         if (!kvm_get_walltime_and_clockread(&ts, &cycle))
9695                 return -KVM_EOPNOTSUPP;
9696
9697         clock_pairing.sec = ts.tv_sec;
9698         clock_pairing.nsec = ts.tv_nsec;
9699         clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle);
9700         clock_pairing.flags = 0;
9701         memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad));
9702
9703         ret = 0;
9704         if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing,
9705                             sizeof(struct kvm_clock_pairing)))
9706                 ret = -KVM_EFAULT;
9707
9708         return ret;
9709 }
9710 #endif
9711
9712 /*
9713  * kvm_pv_kick_cpu_op:  Kick a vcpu.
9714  *
9715  * @apicid - apicid of vcpu to be kicked.
9716  */
9717 static void kvm_pv_kick_cpu_op(struct kvm *kvm, int apicid)
9718 {
9719         /*
9720          * All other fields are unused for APIC_DM_REMRD, but may be consumed by
9721          * common code, e.g. for tracing. Defer initialization to the compiler.
9722          */
9723         struct kvm_lapic_irq lapic_irq = {
9724                 .delivery_mode = APIC_DM_REMRD,
9725                 .dest_mode = APIC_DEST_PHYSICAL,
9726                 .shorthand = APIC_DEST_NOSHORT,
9727                 .dest_id = apicid,
9728         };
9729
9730         kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
9731 }
9732
9733 bool kvm_apicv_activated(struct kvm *kvm)
9734 {
9735         return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0);
9736 }
9737 EXPORT_SYMBOL_GPL(kvm_apicv_activated);
9738
9739 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu)
9740 {
9741         ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons);
9742         ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu);
9743
9744         return (vm_reasons | vcpu_reasons) == 0;
9745 }
9746 EXPORT_SYMBOL_GPL(kvm_vcpu_apicv_activated);
9747
9748 static void set_or_clear_apicv_inhibit(unsigned long *inhibits,
9749                                        enum kvm_apicv_inhibit reason, bool set)
9750 {
9751         if (set)
9752                 __set_bit(reason, inhibits);
9753         else
9754                 __clear_bit(reason, inhibits);
9755
9756         trace_kvm_apicv_inhibit_changed(reason, set, *inhibits);
9757 }
9758
9759 static void kvm_apicv_init(struct kvm *kvm)
9760 {
9761         unsigned long *inhibits = &kvm->arch.apicv_inhibit_reasons;
9762
9763         init_rwsem(&kvm->arch.apicv_update_lock);
9764
9765         set_or_clear_apicv_inhibit(inhibits, APICV_INHIBIT_REASON_ABSENT, true);
9766
9767         if (!enable_apicv)
9768                 set_or_clear_apicv_inhibit(inhibits,
9769                                            APICV_INHIBIT_REASON_DISABLE, true);
9770 }
9771
9772 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id)
9773 {
9774         struct kvm_vcpu *target = NULL;
9775         struct kvm_apic_map *map;
9776
9777         vcpu->stat.directed_yield_attempted++;
9778
9779         if (single_task_running())
9780                 goto no_yield;
9781
9782         rcu_read_lock();
9783         map = rcu_dereference(vcpu->kvm->arch.apic_map);
9784
9785         if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id])
9786                 target = map->phys_map[dest_id]->vcpu;
9787
9788         rcu_read_unlock();
9789
9790         if (!target || !READ_ONCE(target->ready))
9791                 goto no_yield;
9792
9793         /* Ignore requests to yield to self */
9794         if (vcpu == target)
9795                 goto no_yield;
9796
9797         if (kvm_vcpu_yield_to(target) <= 0)
9798                 goto no_yield;
9799
9800         vcpu->stat.directed_yield_successful++;
9801
9802 no_yield:
9803         return;
9804 }
9805
9806 static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
9807 {
9808         u64 ret = vcpu->run->hypercall.ret;
9809
9810         if (!is_64_bit_mode(vcpu))
9811                 ret = (u32)ret;
9812         kvm_rax_write(vcpu, ret);
9813         ++vcpu->stat.hypercalls;
9814         return kvm_skip_emulated_instruction(vcpu);
9815 }
9816
9817 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
9818 {
9819         unsigned long nr, a0, a1, a2, a3, ret;
9820         int op_64_bit;
9821
9822         if (kvm_xen_hypercall_enabled(vcpu->kvm))
9823                 return kvm_xen_hypercall(vcpu);
9824
9825         if (kvm_hv_hypercall_enabled(vcpu))
9826                 return kvm_hv_hypercall(vcpu);
9827
9828         nr = kvm_rax_read(vcpu);
9829         a0 = kvm_rbx_read(vcpu);
9830         a1 = kvm_rcx_read(vcpu);
9831         a2 = kvm_rdx_read(vcpu);
9832         a3 = kvm_rsi_read(vcpu);
9833
9834         trace_kvm_hypercall(nr, a0, a1, a2, a3);
9835
9836         op_64_bit = is_64_bit_hypercall(vcpu);
9837         if (!op_64_bit) {
9838                 nr &= 0xFFFFFFFF;
9839                 a0 &= 0xFFFFFFFF;
9840                 a1 &= 0xFFFFFFFF;
9841                 a2 &= 0xFFFFFFFF;
9842                 a3 &= 0xFFFFFFFF;
9843         }
9844
9845         if (static_call(kvm_x86_get_cpl)(vcpu) != 0) {
9846                 ret = -KVM_EPERM;
9847                 goto out;
9848         }
9849
9850         ret = -KVM_ENOSYS;
9851
9852         switch (nr) {
9853         case KVM_HC_VAPIC_POLL_IRQ:
9854                 ret = 0;
9855                 break;
9856         case KVM_HC_KICK_CPU:
9857                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT))
9858                         break;
9859
9860                 kvm_pv_kick_cpu_op(vcpu->kvm, a1);
9861                 kvm_sched_yield(vcpu, a1);
9862                 ret = 0;
9863                 break;
9864 #ifdef CONFIG_X86_64
9865         case KVM_HC_CLOCK_PAIRING:
9866                 ret = kvm_pv_clock_pairing(vcpu, a0, a1);
9867                 break;
9868 #endif
9869         case KVM_HC_SEND_IPI:
9870                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI))
9871                         break;
9872
9873                 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit);
9874                 break;
9875         case KVM_HC_SCHED_YIELD:
9876                 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD))
9877                         break;
9878
9879                 kvm_sched_yield(vcpu, a0);
9880                 ret = 0;
9881                 break;
9882         case KVM_HC_MAP_GPA_RANGE: {
9883                 u64 gpa = a0, npages = a1, attrs = a2;
9884
9885                 ret = -KVM_ENOSYS;
9886                 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
9887                         break;
9888
9889                 if (!PAGE_ALIGNED(gpa) || !npages ||
9890                     gpa_to_gfn(gpa) + npages <= gpa_to_gfn(gpa)) {
9891                         ret = -KVM_EINVAL;
9892                         break;
9893                 }
9894
9895                 vcpu->run->exit_reason        = KVM_EXIT_HYPERCALL;
9896                 vcpu->run->hypercall.nr       = KVM_HC_MAP_GPA_RANGE;
9897                 vcpu->run->hypercall.args[0]  = gpa;
9898                 vcpu->run->hypercall.args[1]  = npages;
9899                 vcpu->run->hypercall.args[2]  = attrs;
9900                 vcpu->run->hypercall.flags    = 0;
9901                 if (op_64_bit)
9902                         vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
9903
9904                 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
9905                 vcpu->arch.complete_userspace_io = complete_hypercall_exit;
9906                 return 0;
9907         }
9908         default:
9909                 ret = -KVM_ENOSYS;
9910                 break;
9911         }
9912 out:
9913         if (!op_64_bit)
9914                 ret = (u32)ret;
9915         kvm_rax_write(vcpu, ret);
9916
9917         ++vcpu->stat.hypercalls;
9918         return kvm_skip_emulated_instruction(vcpu);
9919 }
9920 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
9921
9922 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
9923 {
9924         struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
9925         char instruction[3];
9926         unsigned long rip = kvm_rip_read(vcpu);
9927
9928         /*
9929          * If the quirk is disabled, synthesize a #UD and let the guest pick up
9930          * the pieces.
9931          */
9932         if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) {
9933                 ctxt->exception.error_code_valid = false;
9934                 ctxt->exception.vector = UD_VECTOR;
9935                 ctxt->have_exception = true;
9936                 return X86EMUL_PROPAGATE_FAULT;
9937         }
9938
9939         static_call(kvm_x86_patch_hypercall)(vcpu, instruction);
9940
9941         return emulator_write_emulated(ctxt, rip, instruction, 3,
9942                 &ctxt->exception);
9943 }
9944
9945 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
9946 {
9947         return vcpu->run->request_interrupt_window &&
9948                 likely(!pic_in_kernel(vcpu->kvm));
9949 }
9950
9951 /* Called within kvm->srcu read side.  */
9952 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
9953 {
9954         struct kvm_run *kvm_run = vcpu->run;
9955
9956         kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu);
9957         kvm_run->cr8 = kvm_get_cr8(vcpu);
9958         kvm_run->apic_base = kvm_get_apic_base(vcpu);
9959
9960         kvm_run->ready_for_interrupt_injection =
9961                 pic_in_kernel(vcpu->kvm) ||
9962                 kvm_vcpu_ready_for_interrupt_injection(vcpu);
9963
9964         if (is_smm(vcpu))
9965                 kvm_run->flags |= KVM_RUN_X86_SMM;
9966 }
9967
9968 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
9969 {
9970         int max_irr, tpr;
9971
9972         if (!kvm_x86_ops.update_cr8_intercept)
9973                 return;
9974
9975         if (!lapic_in_kernel(vcpu))
9976                 return;
9977
9978         if (vcpu->arch.apic->apicv_active)
9979                 return;
9980
9981         if (!vcpu->arch.apic->vapic_addr)
9982                 max_irr = kvm_lapic_find_highest_irr(vcpu);
9983         else
9984                 max_irr = -1;
9985
9986         if (max_irr != -1)
9987                 max_irr >>= 4;
9988
9989         tpr = kvm_lapic_get_cr8(vcpu);
9990
9991         static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr);
9992 }
9993
9994
9995 int kvm_check_nested_events(struct kvm_vcpu *vcpu)
9996 {
9997         if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
9998                 kvm_x86_ops.nested_ops->triple_fault(vcpu);
9999                 return 1;
10000         }
10001
10002         return kvm_x86_ops.nested_ops->check_events(vcpu);
10003 }
10004
10005 static void kvm_inject_exception(struct kvm_vcpu *vcpu)
10006 {
10007         /*
10008          * Suppress the error code if the vCPU is in Real Mode, as Real Mode
10009          * exceptions don't report error codes.  The presence of an error code
10010          * is carried with the exception and only stripped when the exception
10011          * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do
10012          * report an error code despite the CPU being in Real Mode.
10013          */
10014         vcpu->arch.exception.has_error_code &= is_protmode(vcpu);
10015
10016         trace_kvm_inj_exception(vcpu->arch.exception.vector,
10017                                 vcpu->arch.exception.has_error_code,
10018                                 vcpu->arch.exception.error_code,
10019                                 vcpu->arch.exception.injected);
10020
10021         static_call(kvm_x86_inject_exception)(vcpu);
10022 }
10023
10024 /*
10025  * Check for any event (interrupt or exception) that is ready to be injected,
10026  * and if there is at least one event, inject the event with the highest
10027  * priority.  This handles both "pending" events, i.e. events that have never
10028  * been injected into the guest, and "injected" events, i.e. events that were
10029  * injected as part of a previous VM-Enter, but weren't successfully delivered
10030  * and need to be re-injected.
10031  *
10032  * Note, this is not guaranteed to be invoked on a guest instruction boundary,
10033  * i.e. doesn't guarantee that there's an event window in the guest.  KVM must
10034  * be able to inject exceptions in the "middle" of an instruction, and so must
10035  * also be able to re-inject NMIs and IRQs in the middle of an instruction.
10036  * I.e. for exceptions and re-injected events, NOT invoking this on instruction
10037  * boundaries is necessary and correct.
10038  *
10039  * For simplicity, KVM uses a single path to inject all events (except events
10040  * that are injected directly from L1 to L2) and doesn't explicitly track
10041  * instruction boundaries for asynchronous events.  However, because VM-Exits
10042  * that can occur during instruction execution typically result in KVM skipping
10043  * the instruction or injecting an exception, e.g. instruction and exception
10044  * intercepts, and because pending exceptions have higher priority than pending
10045  * interrupts, KVM still honors instruction boundaries in most scenarios.
10046  *
10047  * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
10048  * the instruction or inject an exception, then KVM can incorrecty inject a new
10049  * asynchrounous event if the event became pending after the CPU fetched the
10050  * instruction (in the guest).  E.g. if a page fault (#PF, #NPF, EPT violation)
10051  * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
10052  * injected on the restarted instruction instead of being deferred until the
10053  * instruction completes.
10054  *
10055  * In practice, this virtualization hole is unlikely to be observed by the
10056  * guest, and even less likely to cause functional problems.  To detect the
10057  * hole, the guest would have to trigger an event on a side effect of an early
10058  * phase of instruction execution, e.g. on the instruction fetch from memory.
10059  * And for it to be a functional problem, the guest would need to depend on the
10060  * ordering between that side effect, the instruction completing, _and_ the
10061  * delivery of the asynchronous event.
10062  */
10063 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
10064                                        bool *req_immediate_exit)
10065 {
10066         bool can_inject;
10067         int r;
10068
10069         /*
10070          * Process nested events first, as nested VM-Exit supercedes event
10071          * re-injection.  If there's an event queued for re-injection, it will
10072          * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
10073          */
10074         if (is_guest_mode(vcpu))
10075                 r = kvm_check_nested_events(vcpu);
10076         else
10077                 r = 0;
10078
10079         /*
10080          * Re-inject exceptions and events *especially* if immediate entry+exit
10081          * to/from L2 is needed, as any event that has already been injected
10082          * into L2 needs to complete its lifecycle before injecting a new event.
10083          *
10084          * Don't re-inject an NMI or interrupt if there is a pending exception.
10085          * This collision arises if an exception occurred while vectoring the
10086          * injected event, KVM intercepted said exception, and KVM ultimately
10087          * determined the fault belongs to the guest and queues the exception
10088          * for injection back into the guest.
10089          *
10090          * "Injected" interrupts can also collide with pending exceptions if
10091          * userspace ignores the "ready for injection" flag and blindly queues
10092          * an interrupt.  In that case, prioritizing the exception is correct,
10093          * as the exception "occurred" before the exit to userspace.  Trap-like
10094          * exceptions, e.g. most #DBs, have higher priority than interrupts.
10095          * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
10096          * priority, they're only generated (pended) during instruction
10097          * execution, and interrupts are recognized at instruction boundaries.
10098          * Thus a pending fault-like exception means the fault occurred on the
10099          * *previous* instruction and must be serviced prior to recognizing any
10100          * new events in order to fully complete the previous instruction.
10101          */
10102         if (vcpu->arch.exception.injected)
10103                 kvm_inject_exception(vcpu);
10104         else if (kvm_is_exception_pending(vcpu))
10105                 ; /* see above */
10106         else if (vcpu->arch.nmi_injected)
10107                 static_call(kvm_x86_inject_nmi)(vcpu);
10108         else if (vcpu->arch.interrupt.injected)
10109                 static_call(kvm_x86_inject_irq)(vcpu, true);
10110
10111         /*
10112          * Exceptions that morph to VM-Exits are handled above, and pending
10113          * exceptions on top of injected exceptions that do not VM-Exit should
10114          * either morph to #DF or, sadly, override the injected exception.
10115          */
10116         WARN_ON_ONCE(vcpu->arch.exception.injected &&
10117                      vcpu->arch.exception.pending);
10118
10119         /*
10120          * Bail if immediate entry+exit to/from the guest is needed to complete
10121          * nested VM-Enter or event re-injection so that a different pending
10122          * event can be serviced (or if KVM needs to exit to userspace).
10123          *
10124          * Otherwise, continue processing events even if VM-Exit occurred.  The
10125          * VM-Exit will have cleared exceptions that were meant for L2, but
10126          * there may now be events that can be injected into L1.
10127          */
10128         if (r < 0)
10129                 goto out;
10130
10131         /*
10132          * A pending exception VM-Exit should either result in nested VM-Exit
10133          * or force an immediate re-entry and exit to/from L2, and exception
10134          * VM-Exits cannot be injected (flag should _never_ be set).
10135          */
10136         WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
10137                      vcpu->arch.exception_vmexit.pending);
10138
10139         /*
10140          * New events, other than exceptions, cannot be injected if KVM needs
10141          * to re-inject a previous event.  See above comments on re-injecting
10142          * for why pending exceptions get priority.
10143          */
10144         can_inject = !kvm_event_needs_reinjection(vcpu);
10145
10146         if (vcpu->arch.exception.pending) {
10147                 /*
10148                  * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
10149                  * value pushed on the stack.  Trap-like exception and all #DBs
10150                  * leave RF as-is (KVM follows Intel's behavior in this regard;
10151                  * AMD states that code breakpoint #DBs excplitly clear RF=0).
10152                  *
10153                  * Note, most versions of Intel's SDM and AMD's APM incorrectly
10154                  * describe the behavior of General Detect #DBs, which are
10155                  * fault-like.  They do _not_ set RF, a la code breakpoints.
10156                  */
10157                 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
10158                         __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
10159                                              X86_EFLAGS_RF);
10160
10161                 if (vcpu->arch.exception.vector == DB_VECTOR) {
10162                         kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
10163                         if (vcpu->arch.dr7 & DR7_GD) {
10164                                 vcpu->arch.dr7 &= ~DR7_GD;
10165                                 kvm_update_dr7(vcpu);
10166                         }
10167                 }
10168
10169                 kvm_inject_exception(vcpu);
10170
10171                 vcpu->arch.exception.pending = false;
10172                 vcpu->arch.exception.injected = true;
10173
10174                 can_inject = false;
10175         }
10176
10177         /* Don't inject interrupts if the user asked to avoid doing so */
10178         if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ)
10179                 return 0;
10180
10181         /*
10182          * Finally, inject interrupt events.  If an event cannot be injected
10183          * due to architectural conditions (e.g. IF=0) a window-open exit
10184          * will re-request KVM_REQ_EVENT.  Sometimes however an event is pending
10185          * and can architecturally be injected, but we cannot do it right now:
10186          * an interrupt could have arrived just now and we have to inject it
10187          * as a vmexit, or there could already an event in the queue, which is
10188          * indicated by can_inject.  In that case we request an immediate exit
10189          * in order to make progress and get back here for another iteration.
10190          * The kvm_x86_ops hooks communicate this by returning -EBUSY.
10191          */
10192 #ifdef CONFIG_KVM_SMM
10193         if (vcpu->arch.smi_pending) {
10194                 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY;
10195                 if (r < 0)
10196                         goto out;
10197                 if (r) {
10198                         vcpu->arch.smi_pending = false;
10199                         ++vcpu->arch.smi_count;
10200                         enter_smm(vcpu);
10201                         can_inject = false;
10202                 } else
10203                         static_call(kvm_x86_enable_smi_window)(vcpu);
10204         }
10205 #endif
10206
10207         if (vcpu->arch.nmi_pending) {
10208                 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY;
10209                 if (r < 0)
10210                         goto out;
10211                 if (r) {
10212                         --vcpu->arch.nmi_pending;
10213                         vcpu->arch.nmi_injected = true;
10214                         static_call(kvm_x86_inject_nmi)(vcpu);
10215                         can_inject = false;
10216                         WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0);
10217                 }
10218                 if (vcpu->arch.nmi_pending)
10219                         static_call(kvm_x86_enable_nmi_window)(vcpu);
10220         }
10221
10222         if (kvm_cpu_has_injectable_intr(vcpu)) {
10223                 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY;
10224                 if (r < 0)
10225                         goto out;
10226                 if (r) {
10227                         int irq = kvm_cpu_get_interrupt(vcpu);
10228
10229                         if (!WARN_ON_ONCE(irq == -1)) {
10230                                 kvm_queue_interrupt(vcpu, irq, false);
10231                                 static_call(kvm_x86_inject_irq)(vcpu, false);
10232                                 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0);
10233                         }
10234                 }
10235                 if (kvm_cpu_has_injectable_intr(vcpu))
10236                         static_call(kvm_x86_enable_irq_window)(vcpu);
10237         }
10238
10239         if (is_guest_mode(vcpu) &&
10240             kvm_x86_ops.nested_ops->has_events &&
10241             kvm_x86_ops.nested_ops->has_events(vcpu))
10242                 *req_immediate_exit = true;
10243
10244         /*
10245          * KVM must never queue a new exception while injecting an event; KVM
10246          * is done emulating and should only propagate the to-be-injected event
10247          * to the VMCS/VMCB.  Queueing a new exception can put the vCPU into an
10248          * infinite loop as KVM will bail from VM-Enter to inject the pending
10249          * exception and start the cycle all over.
10250          *
10251          * Exempt triple faults as they have special handling and won't put the
10252          * vCPU into an infinite loop.  Triple fault can be queued when running
10253          * VMX without unrestricted guest, as that requires KVM to emulate Real
10254          * Mode events (see kvm_inject_realmode_interrupt()).
10255          */
10256         WARN_ON_ONCE(vcpu->arch.exception.pending ||
10257                      vcpu->arch.exception_vmexit.pending);
10258         return 0;
10259
10260 out:
10261         if (r == -EBUSY) {
10262                 *req_immediate_exit = true;
10263                 r = 0;
10264         }
10265         return r;
10266 }
10267
10268 static void process_nmi(struct kvm_vcpu *vcpu)
10269 {
10270         unsigned int limit;
10271
10272         /*
10273          * x86 is limited to one NMI pending, but because KVM can't react to
10274          * incoming NMIs as quickly as bare metal, e.g. if the vCPU is
10275          * scheduled out, KVM needs to play nice with two queued NMIs showing
10276          * up at the same time.  To handle this scenario, allow two NMIs to be
10277          * (temporarily) pending so long as NMIs are not blocked and KVM is not
10278          * waiting for a previous NMI injection to complete (which effectively
10279          * blocks NMIs).  KVM will immediately inject one of the two NMIs, and
10280          * will request an NMI window to handle the second NMI.
10281          */
10282         if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected)
10283                 limit = 1;
10284         else
10285                 limit = 2;
10286
10287         /*
10288          * Adjust the limit to account for pending virtual NMIs, which aren't
10289          * tracked in vcpu->arch.nmi_pending.
10290          */
10291         if (static_call(kvm_x86_is_vnmi_pending)(vcpu))
10292                 limit--;
10293
10294         vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
10295         vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
10296
10297         if (vcpu->arch.nmi_pending &&
10298             (static_call(kvm_x86_set_vnmi_pending)(vcpu)))
10299                 vcpu->arch.nmi_pending--;
10300
10301         if (vcpu->arch.nmi_pending)
10302                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10303 }
10304
10305 /* Return total number of NMIs pending injection to the VM */
10306 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu)
10307 {
10308         return vcpu->arch.nmi_pending +
10309                static_call(kvm_x86_is_vnmi_pending)(vcpu);
10310 }
10311
10312 void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
10313                                        unsigned long *vcpu_bitmap)
10314 {
10315         kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC, vcpu_bitmap);
10316 }
10317
10318 void kvm_make_scan_ioapic_request(struct kvm *kvm)
10319 {
10320         kvm_make_all_cpus_request(kvm, KVM_REQ_SCAN_IOAPIC);
10321 }
10322
10323 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10324 {
10325         struct kvm_lapic *apic = vcpu->arch.apic;
10326         bool activate;
10327
10328         if (!lapic_in_kernel(vcpu))
10329                 return;
10330
10331         down_read(&vcpu->kvm->arch.apicv_update_lock);
10332         preempt_disable();
10333
10334         /* Do not activate APICV when APIC is disabled */
10335         activate = kvm_vcpu_apicv_activated(vcpu) &&
10336                    (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED);
10337
10338         if (apic->apicv_active == activate)
10339                 goto out;
10340
10341         apic->apicv_active = activate;
10342         kvm_apic_update_apicv(vcpu);
10343         static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu);
10344
10345         /*
10346          * When APICv gets disabled, we may still have injected interrupts
10347          * pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
10348          * still active when the interrupt got accepted. Make sure
10349          * kvm_check_and_inject_events() is called to check for that.
10350          */
10351         if (!apic->apicv_active)
10352                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10353
10354 out:
10355         preempt_enable();
10356         up_read(&vcpu->kvm->arch.apicv_update_lock);
10357 }
10358 EXPORT_SYMBOL_GPL(__kvm_vcpu_update_apicv);
10359
10360 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
10361 {
10362         if (!lapic_in_kernel(vcpu))
10363                 return;
10364
10365         /*
10366          * Due to sharing page tables across vCPUs, the xAPIC memslot must be
10367          * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but
10368          * and hardware doesn't support x2APIC virtualization.  E.g. some AMD
10369          * CPUs support AVIC but not x2APIC.  KVM still allows enabling AVIC in
10370          * this case so that KVM can the AVIC doorbell to inject interrupts to
10371          * running vCPUs, but KVM must not create SPTEs for the APIC base as
10372          * the vCPU would incorrectly be able to access the vAPIC page via MMIO
10373          * despite being in x2APIC mode.  For simplicity, inhibiting the APIC
10374          * access page is sticky.
10375          */
10376         if (apic_x2apic_mode(vcpu->arch.apic) &&
10377             kvm_x86_ops.allow_apicv_in_x2apic_without_x2apic_virtualization)
10378                 kvm_inhibit_apic_access_page(vcpu);
10379
10380         __kvm_vcpu_update_apicv(vcpu);
10381 }
10382
10383 void __kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10384                                       enum kvm_apicv_inhibit reason, bool set)
10385 {
10386         unsigned long old, new;
10387
10388         lockdep_assert_held_write(&kvm->arch.apicv_update_lock);
10389
10390         if (!(kvm_x86_ops.required_apicv_inhibits & BIT(reason)))
10391                 return;
10392
10393         old = new = kvm->arch.apicv_inhibit_reasons;
10394
10395         set_or_clear_apicv_inhibit(&new, reason, set);
10396
10397         if (!!old != !!new) {
10398                 /*
10399                  * Kick all vCPUs before setting apicv_inhibit_reasons to avoid
10400                  * false positives in the sanity check WARN in svm_vcpu_run().
10401                  * This task will wait for all vCPUs to ack the kick IRQ before
10402                  * updating apicv_inhibit_reasons, and all other vCPUs will
10403                  * block on acquiring apicv_update_lock so that vCPUs can't
10404                  * redo svm_vcpu_run() without seeing the new inhibit state.
10405                  *
10406                  * Note, holding apicv_update_lock and taking it in the read
10407                  * side (handling the request) also prevents other vCPUs from
10408                  * servicing the request with a stale apicv_inhibit_reasons.
10409                  */
10410                 kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
10411                 kvm->arch.apicv_inhibit_reasons = new;
10412                 if (new) {
10413                         unsigned long gfn = gpa_to_gfn(APIC_DEFAULT_PHYS_BASE);
10414                         int idx = srcu_read_lock(&kvm->srcu);
10415
10416                         kvm_zap_gfn_range(kvm, gfn, gfn+1);
10417                         srcu_read_unlock(&kvm->srcu, idx);
10418                 }
10419         } else {
10420                 kvm->arch.apicv_inhibit_reasons = new;
10421         }
10422 }
10423
10424 void kvm_set_or_clear_apicv_inhibit(struct kvm *kvm,
10425                                     enum kvm_apicv_inhibit reason, bool set)
10426 {
10427         if (!enable_apicv)
10428                 return;
10429
10430         down_write(&kvm->arch.apicv_update_lock);
10431         __kvm_set_or_clear_apicv_inhibit(kvm, reason, set);
10432         up_write(&kvm->arch.apicv_update_lock);
10433 }
10434 EXPORT_SYMBOL_GPL(kvm_set_or_clear_apicv_inhibit);
10435
10436 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
10437 {
10438         if (!kvm_apic_present(vcpu))
10439                 return;
10440
10441         bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256);
10442
10443         if (irqchip_split(vcpu->kvm))
10444                 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
10445         else {
10446                 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10447                 if (ioapic_in_kernel(vcpu->kvm))
10448                         kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
10449         }
10450
10451         if (is_guest_mode(vcpu))
10452                 vcpu->arch.load_eoi_exitmap_pending = true;
10453         else
10454                 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu);
10455 }
10456
10457 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu)
10458 {
10459         u64 eoi_exit_bitmap[4];
10460
10461         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
10462                 return;
10463
10464         if (to_hv_vcpu(vcpu)) {
10465                 bitmap_or((ulong *)eoi_exit_bitmap,
10466                           vcpu->arch.ioapic_handled_vectors,
10467                           to_hv_synic(vcpu)->vec_bitmap, 256);
10468                 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap);
10469                 return;
10470         }
10471
10472         static_call_cond(kvm_x86_load_eoi_exitmap)(
10473                 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors);
10474 }
10475
10476 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
10477 {
10478         static_call_cond(kvm_x86_guest_memory_reclaimed)(kvm);
10479 }
10480
10481 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
10482 {
10483         if (!lapic_in_kernel(vcpu))
10484                 return;
10485
10486         static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu);
10487 }
10488
10489 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu)
10490 {
10491         smp_send_reschedule(vcpu->cpu);
10492 }
10493 EXPORT_SYMBOL_GPL(__kvm_request_immediate_exit);
10494
10495 /*
10496  * Called within kvm->srcu read side.
10497  * Returns 1 to let vcpu_run() continue the guest execution loop without
10498  * exiting to the userspace.  Otherwise, the value will be returned to the
10499  * userspace.
10500  */
10501 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
10502 {
10503         int r;
10504         bool req_int_win =
10505                 dm_request_for_irq_injection(vcpu) &&
10506                 kvm_cpu_accept_dm_intr(vcpu);
10507         fastpath_t exit_fastpath;
10508
10509         bool req_immediate_exit = false;
10510
10511         if (kvm_request_pending(vcpu)) {
10512                 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) {
10513                         r = -EIO;
10514                         goto out;
10515                 }
10516
10517                 if (kvm_dirty_ring_check_request(vcpu)) {
10518                         r = 0;
10519                         goto out;
10520                 }
10521
10522                 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
10523                         if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
10524                                 r = 0;
10525                                 goto out;
10526                         }
10527                 }
10528                 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu))
10529                         kvm_mmu_free_obsolete_roots(vcpu);
10530                 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
10531                         __kvm_migrate_timers(vcpu);
10532                 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
10533                         kvm_update_masterclock(vcpu->kvm);
10534                 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
10535                         kvm_gen_kvmclock_update(vcpu);
10536                 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
10537                         r = kvm_guest_time_update(vcpu);
10538                         if (unlikely(r))
10539                                 goto out;
10540                 }
10541                 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
10542                         kvm_mmu_sync_roots(vcpu);
10543                 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu))
10544                         kvm_mmu_load_pgd(vcpu);
10545
10546                 /*
10547                  * Note, the order matters here, as flushing "all" TLB entries
10548                  * also flushes the "current" TLB entries, i.e. servicing the
10549                  * flush "all" will clear any request to flush "current".
10550                  */
10551                 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
10552                         kvm_vcpu_flush_tlb_all(vcpu);
10553
10554                 kvm_service_local_tlb_flush_requests(vcpu);
10555
10556                 /*
10557                  * Fall back to a "full" guest flush if Hyper-V's precise
10558                  * flushing fails.  Note, Hyper-V's flushing is per-vCPU, but
10559                  * the flushes are considered "remote" and not "local" because
10560                  * the requests can be initiated from other vCPUs.
10561                  */
10562                 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) &&
10563                     kvm_hv_vcpu_flush_tlb(vcpu))
10564                         kvm_vcpu_flush_tlb_guest(vcpu);
10565
10566                 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
10567                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
10568                         r = 0;
10569                         goto out;
10570                 }
10571                 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10572                         if (is_guest_mode(vcpu))
10573                                 kvm_x86_ops.nested_ops->triple_fault(vcpu);
10574
10575                         if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
10576                                 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
10577                                 vcpu->mmio_needed = 0;
10578                                 r = 0;
10579                                 goto out;
10580                         }
10581                 }
10582                 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
10583                         /* Page is swapped out. Do synthetic halt */
10584                         vcpu->arch.apf.halted = true;
10585                         r = 1;
10586                         goto out;
10587                 }
10588                 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
10589                         record_steal_time(vcpu);
10590 #ifdef CONFIG_KVM_SMM
10591                 if (kvm_check_request(KVM_REQ_SMI, vcpu))
10592                         process_smi(vcpu);
10593 #endif
10594                 if (kvm_check_request(KVM_REQ_NMI, vcpu))
10595                         process_nmi(vcpu);
10596                 if (kvm_check_request(KVM_REQ_PMU, vcpu))
10597                         kvm_pmu_handle_event(vcpu);
10598                 if (kvm_check_request(KVM_REQ_PMI, vcpu))
10599                         kvm_pmu_deliver_pmi(vcpu);
10600                 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) {
10601                         BUG_ON(vcpu->arch.pending_ioapic_eoi > 255);
10602                         if (test_bit(vcpu->arch.pending_ioapic_eoi,
10603                                      vcpu->arch.ioapic_handled_vectors)) {
10604                                 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI;
10605                                 vcpu->run->eoi.vector =
10606                                                 vcpu->arch.pending_ioapic_eoi;
10607                                 r = 0;
10608                                 goto out;
10609                         }
10610                 }
10611                 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
10612                         vcpu_scan_ioapic(vcpu);
10613                 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu))
10614                         vcpu_load_eoi_exitmap(vcpu);
10615                 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
10616                         kvm_vcpu_reload_apic_access_page(vcpu);
10617                 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) {
10618                         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10619                         vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH;
10620                         vcpu->run->system_event.ndata = 0;
10621                         r = 0;
10622                         goto out;
10623                 }
10624                 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) {
10625                         vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
10626                         vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET;
10627                         vcpu->run->system_event.ndata = 0;
10628                         r = 0;
10629                         goto out;
10630                 }
10631                 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) {
10632                         struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
10633
10634                         vcpu->run->exit_reason = KVM_EXIT_HYPERV;
10635                         vcpu->run->hyperv = hv_vcpu->exit;
10636                         r = 0;
10637                         goto out;
10638                 }
10639
10640                 /*
10641                  * KVM_REQ_HV_STIMER has to be processed after
10642                  * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers
10643                  * depend on the guest clock being up-to-date
10644                  */
10645                 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu))
10646                         kvm_hv_process_stimers(vcpu);
10647                 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
10648                         kvm_vcpu_update_apicv(vcpu);
10649                 if (kvm_check_request(KVM_REQ_APF_READY, vcpu))
10650                         kvm_check_async_pf_completion(vcpu);
10651                 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu))
10652                         static_call(kvm_x86_msr_filter_changed)(vcpu);
10653
10654                 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu))
10655                         static_call(kvm_x86_update_cpu_dirty_logging)(vcpu);
10656         }
10657
10658         if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win ||
10659             kvm_xen_has_interrupt(vcpu)) {
10660                 ++vcpu->stat.req_event;
10661                 r = kvm_apic_accept_events(vcpu);
10662                 if (r < 0) {
10663                         r = 0;
10664                         goto out;
10665                 }
10666                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
10667                         r = 1;
10668                         goto out;
10669                 }
10670
10671                 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
10672                 if (r < 0) {
10673                         r = 0;
10674                         goto out;
10675                 }
10676                 if (req_int_win)
10677                         static_call(kvm_x86_enable_irq_window)(vcpu);
10678
10679                 if (kvm_lapic_enabled(vcpu)) {
10680                         update_cr8_intercept(vcpu);
10681                         kvm_lapic_sync_to_vapic(vcpu);
10682                 }
10683         }
10684
10685         r = kvm_mmu_reload(vcpu);
10686         if (unlikely(r)) {
10687                 goto cancel_injection;
10688         }
10689
10690         preempt_disable();
10691
10692         static_call(kvm_x86_prepare_switch_to_guest)(vcpu);
10693
10694         /*
10695          * Disable IRQs before setting IN_GUEST_MODE.  Posted interrupt
10696          * IPI are then delayed after guest entry, which ensures that they
10697          * result in virtual interrupt delivery.
10698          */
10699         local_irq_disable();
10700
10701         /* Store vcpu->apicv_active before vcpu->mode.  */
10702         smp_store_release(&vcpu->mode, IN_GUEST_MODE);
10703
10704         kvm_vcpu_srcu_read_unlock(vcpu);
10705
10706         /*
10707          * 1) We should set ->mode before checking ->requests.  Please see
10708          * the comment in kvm_vcpu_exiting_guest_mode().
10709          *
10710          * 2) For APICv, we should set ->mode before checking PID.ON. This
10711          * pairs with the memory barrier implicit in pi_test_and_set_on
10712          * (see vmx_deliver_posted_interrupt).
10713          *
10714          * 3) This also orders the write to mode from any reads to the page
10715          * tables done while the VCPU is running.  Please see the comment
10716          * in kvm_flush_remote_tlbs.
10717          */
10718         smp_mb__after_srcu_read_unlock();
10719
10720         /*
10721          * Process pending posted interrupts to handle the case where the
10722          * notification IRQ arrived in the host, or was never sent (because the
10723          * target vCPU wasn't running).  Do this regardless of the vCPU's APICv
10724          * status, KVM doesn't update assigned devices when APICv is inhibited,
10725          * i.e. they can post interrupts even if APICv is temporarily disabled.
10726          */
10727         if (kvm_lapic_enabled(vcpu))
10728                 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10729
10730         if (kvm_vcpu_exit_request(vcpu)) {
10731                 vcpu->mode = OUTSIDE_GUEST_MODE;
10732                 smp_wmb();
10733                 local_irq_enable();
10734                 preempt_enable();
10735                 kvm_vcpu_srcu_read_lock(vcpu);
10736                 r = 1;
10737                 goto cancel_injection;
10738         }
10739
10740         if (req_immediate_exit) {
10741                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10742                 static_call(kvm_x86_request_immediate_exit)(vcpu);
10743         }
10744
10745         fpregs_assert_state_consistent();
10746         if (test_thread_flag(TIF_NEED_FPU_LOAD))
10747                 switch_fpu_return();
10748
10749         if (vcpu->arch.guest_fpu.xfd_err)
10750                 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
10751
10752         if (unlikely(vcpu->arch.switch_db_regs)) {
10753                 set_debugreg(0, 7);
10754                 set_debugreg(vcpu->arch.eff_db[0], 0);
10755                 set_debugreg(vcpu->arch.eff_db[1], 1);
10756                 set_debugreg(vcpu->arch.eff_db[2], 2);
10757                 set_debugreg(vcpu->arch.eff_db[3], 3);
10758         } else if (unlikely(hw_breakpoint_active())) {
10759                 set_debugreg(0, 7);
10760         }
10761
10762         guest_timing_enter_irqoff();
10763
10764         for (;;) {
10765                 /*
10766                  * Assert that vCPU vs. VM APICv state is consistent.  An APICv
10767                  * update must kick and wait for all vCPUs before toggling the
10768                  * per-VM state, and responsing vCPUs must wait for the update
10769                  * to complete before servicing KVM_REQ_APICV_UPDATE.
10770                  */
10771                 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) &&
10772                              (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED));
10773
10774                 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu);
10775                 if (likely(exit_fastpath != EXIT_FASTPATH_REENTER_GUEST))
10776                         break;
10777
10778                 if (kvm_lapic_enabled(vcpu))
10779                         static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
10780
10781                 if (unlikely(kvm_vcpu_exit_request(vcpu))) {
10782                         exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
10783                         break;
10784                 }
10785
10786                 /* Note, VM-Exits that go down the "slow" path are accounted below. */
10787                 ++vcpu->stat.exits;
10788         }
10789
10790         /*
10791          * Do this here before restoring debug registers on the host.  And
10792          * since we do this before handling the vmexit, a DR access vmexit
10793          * can (a) read the correct value of the debug registers, (b) set
10794          * KVM_DEBUGREG_WONT_EXIT again.
10795          */
10796         if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
10797                 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
10798                 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu);
10799                 kvm_update_dr0123(vcpu);
10800                 kvm_update_dr7(vcpu);
10801         }
10802
10803         /*
10804          * If the guest has used debug registers, at least dr7
10805          * will be disabled while returning to the host.
10806          * If we don't have active breakpoints in the host, we don't
10807          * care about the messed up debug address registers. But if
10808          * we have some of them active, restore the old state.
10809          */
10810         if (hw_breakpoint_active())
10811                 hw_breakpoint_restore();
10812
10813         vcpu->arch.last_vmentry_cpu = vcpu->cpu;
10814         vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
10815
10816         vcpu->mode = OUTSIDE_GUEST_MODE;
10817         smp_wmb();
10818
10819         /*
10820          * Sync xfd before calling handle_exit_irqoff() which may
10821          * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
10822          * in #NM irqoff handler).
10823          */
10824         if (vcpu->arch.xfd_no_write_intercept)
10825                 fpu_sync_guest_vmexit_xfd_state();
10826
10827         static_call(kvm_x86_handle_exit_irqoff)(vcpu);
10828
10829         if (vcpu->arch.guest_fpu.xfd_err)
10830                 wrmsrl(MSR_IA32_XFD_ERR, 0);
10831
10832         /*
10833          * Consume any pending interrupts, including the possible source of
10834          * VM-Exit on SVM and any ticks that occur between VM-Exit and now.
10835          * An instruction is required after local_irq_enable() to fully unblock
10836          * interrupts on processors that implement an interrupt shadow, the
10837          * stat.exits increment will do nicely.
10838          */
10839         kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
10840         local_irq_enable();
10841         ++vcpu->stat.exits;
10842         local_irq_disable();
10843         kvm_after_interrupt(vcpu);
10844
10845         /*
10846          * Wait until after servicing IRQs to account guest time so that any
10847          * ticks that occurred while running the guest are properly accounted
10848          * to the guest.  Waiting until IRQs are enabled degrades the accuracy
10849          * of accounting via context tracking, but the loss of accuracy is
10850          * acceptable for all known use cases.
10851          */
10852         guest_timing_exit_irqoff();
10853
10854         local_irq_enable();
10855         preempt_enable();
10856
10857         kvm_vcpu_srcu_read_lock(vcpu);
10858
10859         /*
10860          * Profile KVM exit RIPs:
10861          */
10862         if (unlikely(prof_on == KVM_PROFILING)) {
10863                 unsigned long rip = kvm_rip_read(vcpu);
10864                 profile_hit(KVM_PROFILING, (void *)rip);
10865         }
10866
10867         if (unlikely(vcpu->arch.tsc_always_catchup))
10868                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
10869
10870         if (vcpu->arch.apic_attention)
10871                 kvm_lapic_sync_from_vapic(vcpu);
10872
10873         r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath);
10874         return r;
10875
10876 cancel_injection:
10877         if (req_immediate_exit)
10878                 kvm_make_request(KVM_REQ_EVENT, vcpu);
10879         static_call(kvm_x86_cancel_injection)(vcpu);
10880         if (unlikely(vcpu->arch.apic_attention))
10881                 kvm_lapic_sync_from_vapic(vcpu);
10882 out:
10883         return r;
10884 }
10885
10886 /* Called within kvm->srcu read side.  */
10887 static inline int vcpu_block(struct kvm_vcpu *vcpu)
10888 {
10889         bool hv_timer;
10890
10891         if (!kvm_arch_vcpu_runnable(vcpu)) {
10892                 /*
10893                  * Switch to the software timer before halt-polling/blocking as
10894                  * the guest's timer may be a break event for the vCPU, and the
10895                  * hypervisor timer runs only when the CPU is in guest mode.
10896                  * Switch before halt-polling so that KVM recognizes an expired
10897                  * timer before blocking.
10898                  */
10899                 hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
10900                 if (hv_timer)
10901                         kvm_lapic_switch_to_sw_timer(vcpu);
10902
10903                 kvm_vcpu_srcu_read_unlock(vcpu);
10904                 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
10905                         kvm_vcpu_halt(vcpu);
10906                 else
10907                         kvm_vcpu_block(vcpu);
10908                 kvm_vcpu_srcu_read_lock(vcpu);
10909
10910                 if (hv_timer)
10911                         kvm_lapic_switch_to_hv_timer(vcpu);
10912
10913                 /*
10914                  * If the vCPU is not runnable, a signal or another host event
10915                  * of some kind is pending; service it without changing the
10916                  * vCPU's activity state.
10917                  */
10918                 if (!kvm_arch_vcpu_runnable(vcpu))
10919                         return 1;
10920         }
10921
10922         /*
10923          * Evaluate nested events before exiting the halted state.  This allows
10924          * the halt state to be recorded properly in the VMCS12's activity
10925          * state field (AMD does not have a similar field and a VM-Exit always
10926          * causes a spurious wakeup from HLT).
10927          */
10928         if (is_guest_mode(vcpu)) {
10929                 if (kvm_check_nested_events(vcpu) < 0)
10930                         return 0;
10931         }
10932
10933         if (kvm_apic_accept_events(vcpu) < 0)
10934                 return 0;
10935         switch(vcpu->arch.mp_state) {
10936         case KVM_MP_STATE_HALTED:
10937         case KVM_MP_STATE_AP_RESET_HOLD:
10938                 vcpu->arch.pv.pv_unhalted = false;
10939                 vcpu->arch.mp_state =
10940                         KVM_MP_STATE_RUNNABLE;
10941                 fallthrough;
10942         case KVM_MP_STATE_RUNNABLE:
10943                 vcpu->arch.apf.halted = false;
10944                 break;
10945         case KVM_MP_STATE_INIT_RECEIVED:
10946                 break;
10947         default:
10948                 WARN_ON_ONCE(1);
10949                 break;
10950         }
10951         return 1;
10952 }
10953
10954 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
10955 {
10956         return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
10957                 !vcpu->arch.apf.halted);
10958 }
10959
10960 /* Called within kvm->srcu read side.  */
10961 static int vcpu_run(struct kvm_vcpu *vcpu)
10962 {
10963         int r;
10964
10965         vcpu->arch.l1tf_flush_l1d = true;
10966
10967         for (;;) {
10968                 /*
10969                  * If another guest vCPU requests a PV TLB flush in the middle
10970                  * of instruction emulation, the rest of the emulation could
10971                  * use a stale page translation. Assume that any code after
10972                  * this point can start executing an instruction.
10973                  */
10974                 vcpu->arch.at_instruction_boundary = false;
10975                 if (kvm_vcpu_running(vcpu)) {
10976                         r = vcpu_enter_guest(vcpu);
10977                 } else {
10978                         r = vcpu_block(vcpu);
10979                 }
10980
10981                 if (r <= 0)
10982                         break;
10983
10984                 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu);
10985                 if (kvm_xen_has_pending_events(vcpu))
10986                         kvm_xen_inject_pending_events(vcpu);
10987
10988                 if (kvm_cpu_has_pending_timer(vcpu))
10989                         kvm_inject_pending_timer_irqs(vcpu);
10990
10991                 if (dm_request_for_irq_injection(vcpu) &&
10992                         kvm_vcpu_ready_for_interrupt_injection(vcpu)) {
10993                         r = 0;
10994                         vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
10995                         ++vcpu->stat.request_irq_exits;
10996                         break;
10997                 }
10998
10999                 if (__xfer_to_guest_mode_work_pending()) {
11000                         kvm_vcpu_srcu_read_unlock(vcpu);
11001                         r = xfer_to_guest_mode_handle_work(vcpu);
11002                         kvm_vcpu_srcu_read_lock(vcpu);
11003                         if (r)
11004                                 return r;
11005                 }
11006         }
11007
11008         return r;
11009 }
11010
11011 static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
11012 {
11013         return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
11014 }
11015
11016 static int complete_emulated_pio(struct kvm_vcpu *vcpu)
11017 {
11018         BUG_ON(!vcpu->arch.pio.count);
11019
11020         return complete_emulated_io(vcpu);
11021 }
11022
11023 /*
11024  * Implements the following, as a state machine:
11025  *
11026  * read:
11027  *   for each fragment
11028  *     for each mmio piece in the fragment
11029  *       write gpa, len
11030  *       exit
11031  *       copy data
11032  *   execute insn
11033  *
11034  * write:
11035  *   for each fragment
11036  *     for each mmio piece in the fragment
11037  *       write gpa, len
11038  *       copy data
11039  *       exit
11040  */
11041 static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
11042 {
11043         struct kvm_run *run = vcpu->run;
11044         struct kvm_mmio_fragment *frag;
11045         unsigned len;
11046
11047         BUG_ON(!vcpu->mmio_needed);
11048
11049         /* Complete previous fragment */
11050         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
11051         len = min(8u, frag->len);
11052         if (!vcpu->mmio_is_write)
11053                 memcpy(frag->data, run->mmio.data, len);
11054
11055         if (frag->len <= 8) {
11056                 /* Switch to the next fragment. */
11057                 frag++;
11058                 vcpu->mmio_cur_fragment++;
11059         } else {
11060                 /* Go forward to the next mmio piece. */
11061                 frag->data += len;
11062                 frag->gpa += len;
11063                 frag->len -= len;
11064         }
11065
11066         if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
11067                 vcpu->mmio_needed = 0;
11068
11069                 /* FIXME: return into emulator if single-stepping.  */
11070                 if (vcpu->mmio_is_write)
11071                         return 1;
11072                 vcpu->mmio_read_completed = 1;
11073                 return complete_emulated_io(vcpu);
11074         }
11075
11076         run->exit_reason = KVM_EXIT_MMIO;
11077         run->mmio.phys_addr = frag->gpa;
11078         if (vcpu->mmio_is_write)
11079                 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
11080         run->mmio.len = min(8u, frag->len);
11081         run->mmio.is_write = vcpu->mmio_is_write;
11082         vcpu->arch.complete_userspace_io = complete_emulated_mmio;
11083         return 0;
11084 }
11085
11086 /* Swap (qemu) user FPU context for the guest FPU context. */
11087 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
11088 {
11089         /* Exclude PKRU, it's restored separately immediately after VM-Exit. */
11090         fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true);
11091         trace_kvm_fpu(1);
11092 }
11093
11094 /* When vcpu_run ends, restore user space FPU context. */
11095 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
11096 {
11097         fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false);
11098         ++vcpu->stat.fpu_reload;
11099         trace_kvm_fpu(0);
11100 }
11101
11102 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
11103 {
11104         struct kvm_queued_exception *ex = &vcpu->arch.exception;
11105         struct kvm_run *kvm_run = vcpu->run;
11106         int r;
11107
11108         vcpu_load(vcpu);
11109         kvm_sigset_activate(vcpu);
11110         kvm_run->flags = 0;
11111         kvm_load_guest_fpu(vcpu);
11112
11113         kvm_vcpu_srcu_read_lock(vcpu);
11114         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
11115                 if (kvm_run->immediate_exit) {
11116                         r = -EINTR;
11117                         goto out;
11118                 }
11119
11120                 /*
11121                  * Don't bother switching APIC timer emulation from the
11122                  * hypervisor timer to the software timer, the only way for the
11123                  * APIC timer to be active is if userspace stuffed vCPU state,
11124                  * i.e. put the vCPU into a nonsensical state.  Only an INIT
11125                  * will transition the vCPU out of UNINITIALIZED (without more
11126                  * state stuffing from userspace), which will reset the local
11127                  * APIC and thus cancel the timer or drop the IRQ (if the timer
11128                  * already expired).
11129                  */
11130                 kvm_vcpu_srcu_read_unlock(vcpu);
11131                 kvm_vcpu_block(vcpu);
11132                 kvm_vcpu_srcu_read_lock(vcpu);
11133
11134                 if (kvm_apic_accept_events(vcpu) < 0) {
11135                         r = 0;
11136                         goto out;
11137                 }
11138                 r = -EAGAIN;
11139                 if (signal_pending(current)) {
11140                         r = -EINTR;
11141                         kvm_run->exit_reason = KVM_EXIT_INTR;
11142                         ++vcpu->stat.signal_exits;
11143                 }
11144                 goto out;
11145         }
11146
11147         if ((kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) ||
11148             (kvm_run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS)) {
11149                 r = -EINVAL;
11150                 goto out;
11151         }
11152
11153         if (kvm_run->kvm_dirty_regs) {
11154                 r = sync_regs(vcpu);
11155                 if (r != 0)
11156                         goto out;
11157         }
11158
11159         /* re-sync apic's tpr */
11160         if (!lapic_in_kernel(vcpu)) {
11161                 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
11162                         r = -EINVAL;
11163                         goto out;
11164                 }
11165         }
11166
11167         /*
11168          * If userspace set a pending exception and L2 is active, convert it to
11169          * a pending VM-Exit if L1 wants to intercept the exception.
11170          */
11171         if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
11172             kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
11173                                                         ex->error_code)) {
11174                 kvm_queue_exception_vmexit(vcpu, ex->vector,
11175                                            ex->has_error_code, ex->error_code,
11176                                            ex->has_payload, ex->payload);
11177                 ex->injected = false;
11178                 ex->pending = false;
11179         }
11180         vcpu->arch.exception_from_userspace = false;
11181
11182         if (unlikely(vcpu->arch.complete_userspace_io)) {
11183                 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
11184                 vcpu->arch.complete_userspace_io = NULL;
11185                 r = cui(vcpu);
11186                 if (r <= 0)
11187                         goto out;
11188         } else {
11189                 WARN_ON_ONCE(vcpu->arch.pio.count);
11190                 WARN_ON_ONCE(vcpu->mmio_needed);
11191         }
11192
11193         if (kvm_run->immediate_exit) {
11194                 r = -EINTR;
11195                 goto out;
11196         }
11197
11198         r = static_call(kvm_x86_vcpu_pre_run)(vcpu);
11199         if (r <= 0)
11200                 goto out;
11201
11202         r = vcpu_run(vcpu);
11203
11204 out:
11205         kvm_put_guest_fpu(vcpu);
11206         if (kvm_run->kvm_valid_regs)
11207                 store_regs(vcpu);
11208         post_kvm_run_save(vcpu);
11209         kvm_vcpu_srcu_read_unlock(vcpu);
11210
11211         kvm_sigset_deactivate(vcpu);
11212         vcpu_put(vcpu);
11213         return r;
11214 }
11215
11216 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11217 {
11218         if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
11219                 /*
11220                  * We are here if userspace calls get_regs() in the middle of
11221                  * instruction emulation. Registers state needs to be copied
11222                  * back from emulation context to vcpu. Userspace shouldn't do
11223                  * that usually, but some bad designed PV devices (vmware
11224                  * backdoor interface) need this to work
11225                  */
11226                 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt);
11227                 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11228         }
11229         regs->rax = kvm_rax_read(vcpu);
11230         regs->rbx = kvm_rbx_read(vcpu);
11231         regs->rcx = kvm_rcx_read(vcpu);
11232         regs->rdx = kvm_rdx_read(vcpu);
11233         regs->rsi = kvm_rsi_read(vcpu);
11234         regs->rdi = kvm_rdi_read(vcpu);
11235         regs->rsp = kvm_rsp_read(vcpu);
11236         regs->rbp = kvm_rbp_read(vcpu);
11237 #ifdef CONFIG_X86_64
11238         regs->r8 = kvm_r8_read(vcpu);
11239         regs->r9 = kvm_r9_read(vcpu);
11240         regs->r10 = kvm_r10_read(vcpu);
11241         regs->r11 = kvm_r11_read(vcpu);
11242         regs->r12 = kvm_r12_read(vcpu);
11243         regs->r13 = kvm_r13_read(vcpu);
11244         regs->r14 = kvm_r14_read(vcpu);
11245         regs->r15 = kvm_r15_read(vcpu);
11246 #endif
11247
11248         regs->rip = kvm_rip_read(vcpu);
11249         regs->rflags = kvm_get_rflags(vcpu);
11250 }
11251
11252 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11253 {
11254         vcpu_load(vcpu);
11255         __get_regs(vcpu, regs);
11256         vcpu_put(vcpu);
11257         return 0;
11258 }
11259
11260 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11261 {
11262         vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
11263         vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
11264
11265         kvm_rax_write(vcpu, regs->rax);
11266         kvm_rbx_write(vcpu, regs->rbx);
11267         kvm_rcx_write(vcpu, regs->rcx);
11268         kvm_rdx_write(vcpu, regs->rdx);
11269         kvm_rsi_write(vcpu, regs->rsi);
11270         kvm_rdi_write(vcpu, regs->rdi);
11271         kvm_rsp_write(vcpu, regs->rsp);
11272         kvm_rbp_write(vcpu, regs->rbp);
11273 #ifdef CONFIG_X86_64
11274         kvm_r8_write(vcpu, regs->r8);
11275         kvm_r9_write(vcpu, regs->r9);
11276         kvm_r10_write(vcpu, regs->r10);
11277         kvm_r11_write(vcpu, regs->r11);
11278         kvm_r12_write(vcpu, regs->r12);
11279         kvm_r13_write(vcpu, regs->r13);
11280         kvm_r14_write(vcpu, regs->r14);
11281         kvm_r15_write(vcpu, regs->r15);
11282 #endif
11283
11284         kvm_rip_write(vcpu, regs->rip);
11285         kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
11286
11287         vcpu->arch.exception.pending = false;
11288         vcpu->arch.exception_vmexit.pending = false;
11289
11290         kvm_make_request(KVM_REQ_EVENT, vcpu);
11291 }
11292
11293 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
11294 {
11295         vcpu_load(vcpu);
11296         __set_regs(vcpu, regs);
11297         vcpu_put(vcpu);
11298         return 0;
11299 }
11300
11301 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11302 {
11303         struct desc_ptr dt;
11304
11305         if (vcpu->arch.guest_state_protected)
11306                 goto skip_protected_regs;
11307
11308         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11309         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11310         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11311         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11312         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11313         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11314
11315         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11316         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11317
11318         static_call(kvm_x86_get_idt)(vcpu, &dt);
11319         sregs->idt.limit = dt.size;
11320         sregs->idt.base = dt.address;
11321         static_call(kvm_x86_get_gdt)(vcpu, &dt);
11322         sregs->gdt.limit = dt.size;
11323         sregs->gdt.base = dt.address;
11324
11325         sregs->cr2 = vcpu->arch.cr2;
11326         sregs->cr3 = kvm_read_cr3(vcpu);
11327
11328 skip_protected_regs:
11329         sregs->cr0 = kvm_read_cr0(vcpu);
11330         sregs->cr4 = kvm_read_cr4(vcpu);
11331         sregs->cr8 = kvm_get_cr8(vcpu);
11332         sregs->efer = vcpu->arch.efer;
11333         sregs->apic_base = kvm_get_apic_base(vcpu);
11334 }
11335
11336 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11337 {
11338         __get_sregs_common(vcpu, sregs);
11339
11340         if (vcpu->arch.guest_state_protected)
11341                 return;
11342
11343         if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft)
11344                 set_bit(vcpu->arch.interrupt.nr,
11345                         (unsigned long *)sregs->interrupt_bitmap);
11346 }
11347
11348 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11349 {
11350         int i;
11351
11352         __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2);
11353
11354         if (vcpu->arch.guest_state_protected)
11355                 return;
11356
11357         if (is_pae_paging(vcpu)) {
11358                 for (i = 0 ; i < 4 ; i++)
11359                         sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i);
11360                 sregs2->flags |= KVM_SREGS2_FLAGS_PDPTRS_VALID;
11361         }
11362 }
11363
11364 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
11365                                   struct kvm_sregs *sregs)
11366 {
11367         vcpu_load(vcpu);
11368         __get_sregs(vcpu, sregs);
11369         vcpu_put(vcpu);
11370         return 0;
11371 }
11372
11373 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
11374                                     struct kvm_mp_state *mp_state)
11375 {
11376         int r;
11377
11378         vcpu_load(vcpu);
11379         if (kvm_mpx_supported())
11380                 kvm_load_guest_fpu(vcpu);
11381
11382         r = kvm_apic_accept_events(vcpu);
11383         if (r < 0)
11384                 goto out;
11385         r = 0;
11386
11387         if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED ||
11388              vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) &&
11389             vcpu->arch.pv.pv_unhalted)
11390                 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
11391         else
11392                 mp_state->mp_state = vcpu->arch.mp_state;
11393
11394 out:
11395         if (kvm_mpx_supported())
11396                 kvm_put_guest_fpu(vcpu);
11397         vcpu_put(vcpu);
11398         return r;
11399 }
11400
11401 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
11402                                     struct kvm_mp_state *mp_state)
11403 {
11404         int ret = -EINVAL;
11405
11406         vcpu_load(vcpu);
11407
11408         switch (mp_state->mp_state) {
11409         case KVM_MP_STATE_UNINITIALIZED:
11410         case KVM_MP_STATE_HALTED:
11411         case KVM_MP_STATE_AP_RESET_HOLD:
11412         case KVM_MP_STATE_INIT_RECEIVED:
11413         case KVM_MP_STATE_SIPI_RECEIVED:
11414                 if (!lapic_in_kernel(vcpu))
11415                         goto out;
11416                 break;
11417
11418         case KVM_MP_STATE_RUNNABLE:
11419                 break;
11420
11421         default:
11422                 goto out;
11423         }
11424
11425         /*
11426          * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
11427          * forcing the guest into INIT/SIPI if those events are supposed to be
11428          * blocked.  KVM prioritizes SMI over INIT, so reject INIT/SIPI state
11429          * if an SMI is pending as well.
11430          */
11431         if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) &&
11432             (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
11433              mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
11434                 goto out;
11435
11436         if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
11437                 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
11438                 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
11439         } else
11440                 vcpu->arch.mp_state = mp_state->mp_state;
11441         kvm_make_request(KVM_REQ_EVENT, vcpu);
11442
11443         ret = 0;
11444 out:
11445         vcpu_put(vcpu);
11446         return ret;
11447 }
11448
11449 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
11450                     int reason, bool has_error_code, u32 error_code)
11451 {
11452         struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
11453         int ret;
11454
11455         init_emulate_ctxt(vcpu);
11456
11457         ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
11458                                    has_error_code, error_code);
11459         if (ret) {
11460                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
11461                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
11462                 vcpu->run->internal.ndata = 0;
11463                 return 0;
11464         }
11465
11466         kvm_rip_write(vcpu, ctxt->eip);
11467         kvm_set_rflags(vcpu, ctxt->eflags);
11468         return 1;
11469 }
11470 EXPORT_SYMBOL_GPL(kvm_task_switch);
11471
11472 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11473 {
11474         if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
11475                 /*
11476                  * When EFER.LME and CR0.PG are set, the processor is in
11477                  * 64-bit mode (though maybe in a 32-bit code segment).
11478                  * CR4.PAE and EFER.LMA must be set.
11479                  */
11480                 if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA))
11481                         return false;
11482                 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3))
11483                         return false;
11484         } else {
11485                 /*
11486                  * Not in 64-bit mode: EFER.LMA is clear and the code
11487                  * segment cannot be 64-bit.
11488                  */
11489                 if (sregs->efer & EFER_LMA || sregs->cs.l)
11490                         return false;
11491         }
11492
11493         return kvm_is_valid_cr4(vcpu, sregs->cr4) &&
11494                kvm_is_valid_cr0(vcpu, sregs->cr0);
11495 }
11496
11497 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
11498                 int *mmu_reset_needed, bool update_pdptrs)
11499 {
11500         struct msr_data apic_base_msr;
11501         int idx;
11502         struct desc_ptr dt;
11503
11504         if (!kvm_is_valid_sregs(vcpu, sregs))
11505                 return -EINVAL;
11506
11507         apic_base_msr.data = sregs->apic_base;
11508         apic_base_msr.host_initiated = true;
11509         if (kvm_set_apic_base(vcpu, &apic_base_msr))
11510                 return -EINVAL;
11511
11512         if (vcpu->arch.guest_state_protected)
11513                 return 0;
11514
11515         dt.size = sregs->idt.limit;
11516         dt.address = sregs->idt.base;
11517         static_call(kvm_x86_set_idt)(vcpu, &dt);
11518         dt.size = sregs->gdt.limit;
11519         dt.address = sregs->gdt.base;
11520         static_call(kvm_x86_set_gdt)(vcpu, &dt);
11521
11522         vcpu->arch.cr2 = sregs->cr2;
11523         *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
11524         vcpu->arch.cr3 = sregs->cr3;
11525         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
11526         static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
11527
11528         kvm_set_cr8(vcpu, sregs->cr8);
11529
11530         *mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
11531         static_call(kvm_x86_set_efer)(vcpu, sregs->efer);
11532
11533         *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
11534         static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0);
11535         vcpu->arch.cr0 = sregs->cr0;
11536
11537         *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
11538         static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4);
11539
11540         if (update_pdptrs) {
11541                 idx = srcu_read_lock(&vcpu->kvm->srcu);
11542                 if (is_pae_paging(vcpu)) {
11543                         load_pdptrs(vcpu, kvm_read_cr3(vcpu));
11544                         *mmu_reset_needed = 1;
11545                 }
11546                 srcu_read_unlock(&vcpu->kvm->srcu, idx);
11547         }
11548
11549         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
11550         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
11551         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
11552         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
11553         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
11554         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
11555
11556         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
11557         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
11558
11559         update_cr8_intercept(vcpu);
11560
11561         /* Older userspace won't unhalt the vcpu on reset. */
11562         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
11563             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
11564             !is_protmode(vcpu))
11565                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11566
11567         return 0;
11568 }
11569
11570 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
11571 {
11572         int pending_vec, max_bits;
11573         int mmu_reset_needed = 0;
11574         int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true);
11575
11576         if (ret)
11577                 return ret;
11578
11579         if (mmu_reset_needed)
11580                 kvm_mmu_reset_context(vcpu);
11581
11582         max_bits = KVM_NR_INTERRUPTS;
11583         pending_vec = find_first_bit(
11584                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
11585
11586         if (pending_vec < max_bits) {
11587                 kvm_queue_interrupt(vcpu, pending_vec, false);
11588                 pr_debug("Set back pending irq %d\n", pending_vec);
11589                 kvm_make_request(KVM_REQ_EVENT, vcpu);
11590         }
11591         return 0;
11592 }
11593
11594 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2)
11595 {
11596         int mmu_reset_needed = 0;
11597         bool valid_pdptrs = sregs2->flags & KVM_SREGS2_FLAGS_PDPTRS_VALID;
11598         bool pae = (sregs2->cr0 & X86_CR0_PG) && (sregs2->cr4 & X86_CR4_PAE) &&
11599                 !(sregs2->efer & EFER_LMA);
11600         int i, ret;
11601
11602         if (sregs2->flags & ~KVM_SREGS2_FLAGS_PDPTRS_VALID)
11603                 return -EINVAL;
11604
11605         if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected))
11606                 return -EINVAL;
11607
11608         ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2,
11609                                  &mmu_reset_needed, !valid_pdptrs);
11610         if (ret)
11611                 return ret;
11612
11613         if (valid_pdptrs) {
11614                 for (i = 0; i < 4 ; i++)
11615                         kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]);
11616
11617                 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
11618                 mmu_reset_needed = 1;
11619                 vcpu->arch.pdptrs_from_userspace = true;
11620         }
11621         if (mmu_reset_needed)
11622                 kvm_mmu_reset_context(vcpu);
11623         return 0;
11624 }
11625
11626 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
11627                                   struct kvm_sregs *sregs)
11628 {
11629         int ret;
11630
11631         vcpu_load(vcpu);
11632         ret = __set_sregs(vcpu, sregs);
11633         vcpu_put(vcpu);
11634         return ret;
11635 }
11636
11637 static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
11638 {
11639         bool set = false;
11640         struct kvm_vcpu *vcpu;
11641         unsigned long i;
11642
11643         if (!enable_apicv)
11644                 return;
11645
11646         down_write(&kvm->arch.apicv_update_lock);
11647
11648         kvm_for_each_vcpu(i, vcpu, kvm) {
11649                 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) {
11650                         set = true;
11651                         break;
11652                 }
11653         }
11654         __kvm_set_or_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_BLOCKIRQ, set);
11655         up_write(&kvm->arch.apicv_update_lock);
11656 }
11657
11658 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
11659                                         struct kvm_guest_debug *dbg)
11660 {
11661         unsigned long rflags;
11662         int i, r;
11663
11664         if (vcpu->arch.guest_state_protected)
11665                 return -EINVAL;
11666
11667         vcpu_load(vcpu);
11668
11669         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
11670                 r = -EBUSY;
11671                 if (kvm_is_exception_pending(vcpu))
11672                         goto out;
11673                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
11674                         kvm_queue_exception(vcpu, DB_VECTOR);
11675                 else
11676                         kvm_queue_exception(vcpu, BP_VECTOR);
11677         }
11678
11679         /*
11680          * Read rflags as long as potentially injected trace flags are still
11681          * filtered out.
11682          */
11683         rflags = kvm_get_rflags(vcpu);
11684
11685         vcpu->guest_debug = dbg->control;
11686         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
11687                 vcpu->guest_debug = 0;
11688
11689         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
11690                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
11691                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
11692                 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
11693         } else {
11694                 for (i = 0; i < KVM_NR_DB_REGS; i++)
11695                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
11696         }
11697         kvm_update_dr7(vcpu);
11698
11699         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
11700                 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu);
11701
11702         /*
11703          * Trigger an rflags update that will inject or remove the trace
11704          * flags.
11705          */
11706         kvm_set_rflags(vcpu, rflags);
11707
11708         static_call(kvm_x86_update_exception_bitmap)(vcpu);
11709
11710         kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm);
11711
11712         r = 0;
11713
11714 out:
11715         vcpu_put(vcpu);
11716         return r;
11717 }
11718
11719 /*
11720  * Translate a guest virtual address to a guest physical address.
11721  */
11722 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
11723                                     struct kvm_translation *tr)
11724 {
11725         unsigned long vaddr = tr->linear_address;
11726         gpa_t gpa;
11727         int idx;
11728
11729         vcpu_load(vcpu);
11730
11731         idx = srcu_read_lock(&vcpu->kvm->srcu);
11732         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
11733         srcu_read_unlock(&vcpu->kvm->srcu, idx);
11734         tr->physical_address = gpa;
11735         tr->valid = gpa != INVALID_GPA;
11736         tr->writeable = 1;
11737         tr->usermode = 0;
11738
11739         vcpu_put(vcpu);
11740         return 0;
11741 }
11742
11743 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
11744 {
11745         struct fxregs_state *fxsave;
11746
11747         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11748                 return 0;
11749
11750         vcpu_load(vcpu);
11751
11752         fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11753         memcpy(fpu->fpr, fxsave->st_space, 128);
11754         fpu->fcw = fxsave->cwd;
11755         fpu->fsw = fxsave->swd;
11756         fpu->ftwx = fxsave->twd;
11757         fpu->last_opcode = fxsave->fop;
11758         fpu->last_ip = fxsave->rip;
11759         fpu->last_dp = fxsave->rdp;
11760         memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space));
11761
11762         vcpu_put(vcpu);
11763         return 0;
11764 }
11765
11766 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
11767 {
11768         struct fxregs_state *fxsave;
11769
11770         if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
11771                 return 0;
11772
11773         vcpu_load(vcpu);
11774
11775         fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave;
11776
11777         memcpy(fxsave->st_space, fpu->fpr, 128);
11778         fxsave->cwd = fpu->fcw;
11779         fxsave->swd = fpu->fsw;
11780         fxsave->twd = fpu->ftwx;
11781         fxsave->fop = fpu->last_opcode;
11782         fxsave->rip = fpu->last_ip;
11783         fxsave->rdp = fpu->last_dp;
11784         memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space));
11785
11786         vcpu_put(vcpu);
11787         return 0;
11788 }
11789
11790 static void store_regs(struct kvm_vcpu *vcpu)
11791 {
11792         BUILD_BUG_ON(sizeof(struct kvm_sync_regs) > SYNC_REGS_SIZE_BYTES);
11793
11794         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS)
11795                 __get_regs(vcpu, &vcpu->run->s.regs.regs);
11796
11797         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS)
11798                 __get_sregs(vcpu, &vcpu->run->s.regs.sregs);
11799
11800         if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS)
11801                 kvm_vcpu_ioctl_x86_get_vcpu_events(
11802                                 vcpu, &vcpu->run->s.regs.events);
11803 }
11804
11805 static int sync_regs(struct kvm_vcpu *vcpu)
11806 {
11807         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) {
11808                 __set_regs(vcpu, &vcpu->run->s.regs.regs);
11809                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS;
11810         }
11811
11812         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) {
11813                 struct kvm_sregs sregs = vcpu->run->s.regs.sregs;
11814
11815                 if (__set_sregs(vcpu, &sregs))
11816                         return -EINVAL;
11817
11818                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS;
11819         }
11820
11821         if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) {
11822                 struct kvm_vcpu_events events = vcpu->run->s.regs.events;
11823
11824                 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events))
11825                         return -EINVAL;
11826
11827                 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS;
11828         }
11829
11830         return 0;
11831 }
11832
11833 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
11834 {
11835         if (kvm_check_tsc_unstable() && kvm->created_vcpus)
11836                 pr_warn_once("SMP vm created on host with unstable TSC; "
11837                              "guest TSC will not be reliable\n");
11838
11839         if (!kvm->arch.max_vcpu_ids)
11840                 kvm->arch.max_vcpu_ids = KVM_MAX_VCPU_IDS;
11841
11842         if (id >= kvm->arch.max_vcpu_ids)
11843                 return -EINVAL;
11844
11845         return static_call(kvm_x86_vcpu_precreate)(kvm);
11846 }
11847
11848 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
11849 {
11850         struct page *page;
11851         int r;
11852
11853         vcpu->arch.last_vmentry_cpu = -1;
11854         vcpu->arch.regs_avail = ~0;
11855         vcpu->arch.regs_dirty = ~0;
11856
11857         kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN);
11858
11859         if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
11860                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
11861         else
11862                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
11863
11864         r = kvm_mmu_create(vcpu);
11865         if (r < 0)
11866                 return r;
11867
11868         if (irqchip_in_kernel(vcpu->kvm)) {
11869                 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns);
11870                 if (r < 0)
11871                         goto fail_mmu_destroy;
11872
11873                 /*
11874                  * Defer evaluating inhibits until the vCPU is first run, as
11875                  * this vCPU will not get notified of any changes until this
11876                  * vCPU is visible to other vCPUs (marked online and added to
11877                  * the set of vCPUs).  Opportunistically mark APICv active as
11878                  * VMX in particularly is highly unlikely to have inhibits.
11879                  * Ignore the current per-VM APICv state so that vCPU creation
11880                  * is guaranteed to run with a deterministic value, the request
11881                  * will ensure the vCPU gets the correct state before VM-Entry.
11882                  */
11883                 if (enable_apicv) {
11884                         vcpu->arch.apic->apicv_active = true;
11885                         kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
11886                 }
11887         } else
11888                 static_branch_inc(&kvm_has_noapic_vcpu);
11889
11890         r = -ENOMEM;
11891
11892         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
11893         if (!page)
11894                 goto fail_free_lapic;
11895         vcpu->arch.pio_data = page_address(page);
11896
11897         vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64),
11898                                        GFP_KERNEL_ACCOUNT);
11899         vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
11900                                             GFP_KERNEL_ACCOUNT);
11901         if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
11902                 goto fail_free_mce_banks;
11903         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
11904
11905         if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
11906                                 GFP_KERNEL_ACCOUNT))
11907                 goto fail_free_mce_banks;
11908
11909         if (!alloc_emulate_ctxt(vcpu))
11910                 goto free_wbinvd_dirty_mask;
11911
11912         if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) {
11913                 pr_err("failed to allocate vcpu's fpu\n");
11914                 goto free_emulate_ctxt;
11915         }
11916
11917         vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
11918         vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
11919
11920         vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT;
11921
11922         kvm_async_pf_hash_reset(vcpu);
11923
11924         vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap;
11925         kvm_pmu_init(vcpu);
11926
11927         vcpu->arch.pending_external_vector = -1;
11928         vcpu->arch.preempted_in_kernel = false;
11929
11930 #if IS_ENABLED(CONFIG_HYPERV)
11931         vcpu->arch.hv_root_tdp = INVALID_PAGE;
11932 #endif
11933
11934         r = static_call(kvm_x86_vcpu_create)(vcpu);
11935         if (r)
11936                 goto free_guest_fpu;
11937
11938         vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
11939         vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
11940         kvm_xen_init_vcpu(vcpu);
11941         kvm_vcpu_mtrr_init(vcpu);
11942         vcpu_load(vcpu);
11943         kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz);
11944         kvm_vcpu_reset(vcpu, false);
11945         kvm_init_mmu(vcpu);
11946         vcpu_put(vcpu);
11947         return 0;
11948
11949 free_guest_fpu:
11950         fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
11951 free_emulate_ctxt:
11952         kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
11953 free_wbinvd_dirty_mask:
11954         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
11955 fail_free_mce_banks:
11956         kfree(vcpu->arch.mce_banks);
11957         kfree(vcpu->arch.mci_ctl2_banks);
11958         free_page((unsigned long)vcpu->arch.pio_data);
11959 fail_free_lapic:
11960         kvm_free_lapic(vcpu);
11961 fail_mmu_destroy:
11962         kvm_mmu_destroy(vcpu);
11963         return r;
11964 }
11965
11966 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
11967 {
11968         struct kvm *kvm = vcpu->kvm;
11969
11970         if (mutex_lock_killable(&vcpu->mutex))
11971                 return;
11972         vcpu_load(vcpu);
11973         kvm_synchronize_tsc(vcpu, 0);
11974         vcpu_put(vcpu);
11975
11976         /* poll control enabled by default */
11977         vcpu->arch.msr_kvm_poll_control = 1;
11978
11979         mutex_unlock(&vcpu->mutex);
11980
11981         if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0)
11982                 schedule_delayed_work(&kvm->arch.kvmclock_sync_work,
11983                                                 KVMCLOCK_SYNC_PERIOD);
11984 }
11985
11986 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
11987 {
11988         int idx;
11989
11990         kvmclock_reset(vcpu);
11991
11992         static_call(kvm_x86_vcpu_free)(vcpu);
11993
11994         kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt);
11995         free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
11996         fpu_free_guest_fpstate(&vcpu->arch.guest_fpu);
11997
11998         kvm_xen_destroy_vcpu(vcpu);
11999         kvm_hv_vcpu_uninit(vcpu);
12000         kvm_pmu_destroy(vcpu);
12001         kfree(vcpu->arch.mce_banks);
12002         kfree(vcpu->arch.mci_ctl2_banks);
12003         kvm_free_lapic(vcpu);
12004         idx = srcu_read_lock(&vcpu->kvm->srcu);
12005         kvm_mmu_destroy(vcpu);
12006         srcu_read_unlock(&vcpu->kvm->srcu, idx);
12007         free_page((unsigned long)vcpu->arch.pio_data);
12008         kvfree(vcpu->arch.cpuid_entries);
12009         if (!lapic_in_kernel(vcpu))
12010                 static_branch_dec(&kvm_has_noapic_vcpu);
12011 }
12012
12013 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
12014 {
12015         struct kvm_cpuid_entry2 *cpuid_0x1;
12016         unsigned long old_cr0 = kvm_read_cr0(vcpu);
12017         unsigned long new_cr0;
12018
12019         /*
12020          * Several of the "set" flows, e.g. ->set_cr0(), read other registers
12021          * to handle side effects.  RESET emulation hits those flows and relies
12022          * on emulated/virtualized registers, including those that are loaded
12023          * into hardware, to be zeroed at vCPU creation.  Use CRs as a sentinel
12024          * to detect improper or missing initialization.
12025          */
12026         WARN_ON_ONCE(!init_event &&
12027                      (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu)));
12028
12029         /*
12030          * SVM doesn't unconditionally VM-Exit on INIT and SHUTDOWN, thus it's
12031          * possible to INIT the vCPU while L2 is active.  Force the vCPU back
12032          * into L1 as EFER.SVME is cleared on INIT (along with all other EFER
12033          * bits), i.e. virtualization is disabled.
12034          */
12035         if (is_guest_mode(vcpu))
12036                 kvm_leave_nested(vcpu);
12037
12038         kvm_lapic_reset(vcpu, init_event);
12039
12040         WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu));
12041         vcpu->arch.hflags = 0;
12042
12043         vcpu->arch.smi_pending = 0;
12044         vcpu->arch.smi_count = 0;
12045         atomic_set(&vcpu->arch.nmi_queued, 0);
12046         vcpu->arch.nmi_pending = 0;
12047         vcpu->arch.nmi_injected = false;
12048         kvm_clear_interrupt_queue(vcpu);
12049         kvm_clear_exception_queue(vcpu);
12050
12051         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
12052         kvm_update_dr0123(vcpu);
12053         vcpu->arch.dr6 = DR6_ACTIVE_LOW;
12054         vcpu->arch.dr7 = DR7_FIXED_1;
12055         kvm_update_dr7(vcpu);
12056
12057         vcpu->arch.cr2 = 0;
12058
12059         kvm_make_request(KVM_REQ_EVENT, vcpu);
12060         vcpu->arch.apf.msr_en_val = 0;
12061         vcpu->arch.apf.msr_int_val = 0;
12062         vcpu->arch.st.msr_val = 0;
12063
12064         kvmclock_reset(vcpu);
12065
12066         kvm_clear_async_pf_completion_queue(vcpu);
12067         kvm_async_pf_hash_reset(vcpu);
12068         vcpu->arch.apf.halted = false;
12069
12070         if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) {
12071                 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
12072
12073                 /*
12074                  * All paths that lead to INIT are required to load the guest's
12075                  * FPU state (because most paths are buried in KVM_RUN).
12076                  */
12077                 if (init_event)
12078                         kvm_put_guest_fpu(vcpu);
12079
12080                 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDREGS);
12081                 fpstate_clear_xstate_component(fpstate, XFEATURE_BNDCSR);
12082
12083                 if (init_event)
12084                         kvm_load_guest_fpu(vcpu);
12085         }
12086
12087         if (!init_event) {
12088                 kvm_pmu_reset(vcpu);
12089                 vcpu->arch.smbase = 0x30000;
12090
12091                 vcpu->arch.msr_misc_features_enables = 0;
12092                 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |
12093                                                   MSR_IA32_MISC_ENABLE_BTS_UNAVAIL;
12094
12095                 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
12096                 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
12097         }
12098
12099         /* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
12100         memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
12101         kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP);
12102
12103         /*
12104          * Fall back to KVM's default Family/Model/Stepping of 0x600 (P6/Athlon)
12105          * if no CPUID match is found.  Note, it's impossible to get a match at
12106          * RESET since KVM emulates RESET before exposing the vCPU to userspace,
12107          * i.e. it's impossible for kvm_find_cpuid_entry() to find a valid entry
12108          * on RESET.  But, go through the motions in case that's ever remedied.
12109          */
12110         cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1);
12111         kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
12112
12113         static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
12114
12115         kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
12116         kvm_rip_write(vcpu, 0xfff0);
12117
12118         vcpu->arch.cr3 = 0;
12119         kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
12120
12121         /*
12122          * CR0.CD/NW are set on RESET, preserved on INIT.  Note, some versions
12123          * of Intel's SDM list CD/NW as being set on INIT, but they contradict
12124          * (or qualify) that with a footnote stating that CD/NW are preserved.
12125          */
12126         new_cr0 = X86_CR0_ET;
12127         if (init_event)
12128                 new_cr0 |= (old_cr0 & (X86_CR0_NW | X86_CR0_CD));
12129         else
12130                 new_cr0 |= X86_CR0_NW | X86_CR0_CD;
12131
12132         static_call(kvm_x86_set_cr0)(vcpu, new_cr0);
12133         static_call(kvm_x86_set_cr4)(vcpu, 0);
12134         static_call(kvm_x86_set_efer)(vcpu, 0);
12135         static_call(kvm_x86_update_exception_bitmap)(vcpu);
12136
12137         /*
12138          * On the standard CR0/CR4/EFER modification paths, there are several
12139          * complex conditions determining whether the MMU has to be reset and/or
12140          * which PCIDs have to be flushed.  However, CR0.WP and the paging-related
12141          * bits in CR4 and EFER are irrelevant if CR0.PG was '0'; and a reset+flush
12142          * is needed anyway if CR0.PG was '1' (which can only happen for INIT, as
12143          * CR0 will be '0' prior to RESET).  So we only need to check CR0.PG here.
12144          */
12145         if (old_cr0 & X86_CR0_PG) {
12146                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12147                 kvm_mmu_reset_context(vcpu);
12148         }
12149
12150         /*
12151          * Intel's SDM states that all TLB entries are flushed on INIT.  AMD's
12152          * APM states the TLBs are untouched by INIT, but it also states that
12153          * the TLBs are flushed on "External initialization of the processor."
12154          * Flush the guest TLB regardless of vendor, there is no meaningful
12155          * benefit in relying on the guest to flush the TLB immediately after
12156          * INIT.  A spurious TLB flush is benign and likely negligible from a
12157          * performance perspective.
12158          */
12159         if (init_event)
12160                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
12161 }
12162 EXPORT_SYMBOL_GPL(kvm_vcpu_reset);
12163
12164 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
12165 {
12166         struct kvm_segment cs;
12167
12168         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
12169         cs.selector = vector << 8;
12170         cs.base = vector << 12;
12171         kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
12172         kvm_rip_write(vcpu, 0);
12173 }
12174 EXPORT_SYMBOL_GPL(kvm_vcpu_deliver_sipi_vector);
12175
12176 int kvm_arch_hardware_enable(void)
12177 {
12178         struct kvm *kvm;
12179         struct kvm_vcpu *vcpu;
12180         unsigned long i;
12181         int ret;
12182         u64 local_tsc;
12183         u64 max_tsc = 0;
12184         bool stable, backwards_tsc = false;
12185
12186         kvm_user_return_msr_cpu_online();
12187
12188         ret = kvm_x86_check_processor_compatibility();
12189         if (ret)
12190                 return ret;
12191
12192         ret = static_call(kvm_x86_hardware_enable)();
12193         if (ret != 0)
12194                 return ret;
12195
12196         local_tsc = rdtsc();
12197         stable = !kvm_check_tsc_unstable();
12198         list_for_each_entry(kvm, &vm_list, vm_list) {
12199                 kvm_for_each_vcpu(i, vcpu, kvm) {
12200                         if (!stable && vcpu->cpu == smp_processor_id())
12201                                 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
12202                         if (stable && vcpu->arch.last_host_tsc > local_tsc) {
12203                                 backwards_tsc = true;
12204                                 if (vcpu->arch.last_host_tsc > max_tsc)
12205                                         max_tsc = vcpu->arch.last_host_tsc;
12206                         }
12207                 }
12208         }
12209
12210         /*
12211          * Sometimes, even reliable TSCs go backwards.  This happens on
12212          * platforms that reset TSC during suspend or hibernate actions, but
12213          * maintain synchronization.  We must compensate.  Fortunately, we can
12214          * detect that condition here, which happens early in CPU bringup,
12215          * before any KVM threads can be running.  Unfortunately, we can't
12216          * bring the TSCs fully up to date with real time, as we aren't yet far
12217          * enough into CPU bringup that we know how much real time has actually
12218          * elapsed; our helper function, ktime_get_boottime_ns() will be using boot
12219          * variables that haven't been updated yet.
12220          *
12221          * So we simply find the maximum observed TSC above, then record the
12222          * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
12223          * the adjustment will be applied.  Note that we accumulate
12224          * adjustments, in case multiple suspend cycles happen before some VCPU
12225          * gets a chance to run again.  In the event that no KVM threads get a
12226          * chance to run, we will miss the entire elapsed period, as we'll have
12227          * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
12228          * loose cycle time.  This isn't too big a deal, since the loss will be
12229          * uniform across all VCPUs (not to mention the scenario is extremely
12230          * unlikely). It is possible that a second hibernate recovery happens
12231          * much faster than a first, causing the observed TSC here to be
12232          * smaller; this would require additional padding adjustment, which is
12233          * why we set last_host_tsc to the local tsc observed here.
12234          *
12235          * N.B. - this code below runs only on platforms with reliable TSC,
12236          * as that is the only way backwards_tsc is set above.  Also note
12237          * that this runs for ALL vcpus, which is not a bug; all VCPUs should
12238          * have the same delta_cyc adjustment applied if backwards_tsc
12239          * is detected.  Note further, this adjustment is only done once,
12240          * as we reset last_host_tsc on all VCPUs to stop this from being
12241          * called multiple times (one for each physical CPU bringup).
12242          *
12243          * Platforms with unreliable TSCs don't have to deal with this, they
12244          * will be compensated by the logic in vcpu_load, which sets the TSC to
12245          * catchup mode.  This will catchup all VCPUs to real time, but cannot
12246          * guarantee that they stay in perfect synchronization.
12247          */
12248         if (backwards_tsc) {
12249                 u64 delta_cyc = max_tsc - local_tsc;
12250                 list_for_each_entry(kvm, &vm_list, vm_list) {
12251                         kvm->arch.backwards_tsc_observed = true;
12252                         kvm_for_each_vcpu(i, vcpu, kvm) {
12253                                 vcpu->arch.tsc_offset_adjustment += delta_cyc;
12254                                 vcpu->arch.last_host_tsc = local_tsc;
12255                                 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
12256                         }
12257
12258                         /*
12259                          * We have to disable TSC offset matching.. if you were
12260                          * booting a VM while issuing an S4 host suspend....
12261                          * you may have some problem.  Solving this issue is
12262                          * left as an exercise to the reader.
12263                          */
12264                         kvm->arch.last_tsc_nsec = 0;
12265                         kvm->arch.last_tsc_write = 0;
12266                 }
12267
12268         }
12269         return 0;
12270 }
12271
12272 void kvm_arch_hardware_disable(void)
12273 {
12274         static_call(kvm_x86_hardware_disable)();
12275         drop_user_return_notifiers();
12276 }
12277
12278 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu)
12279 {
12280         return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id;
12281 }
12282
12283 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
12284 {
12285         return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0;
12286 }
12287
12288 __read_mostly DEFINE_STATIC_KEY_FALSE(kvm_has_noapic_vcpu);
12289 EXPORT_SYMBOL_GPL(kvm_has_noapic_vcpu);
12290
12291 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
12292 {
12293         struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
12294
12295         vcpu->arch.l1tf_flush_l1d = true;
12296         if (pmu->version && unlikely(pmu->event_count)) {
12297                 pmu->need_cleanup = true;
12298                 kvm_make_request(KVM_REQ_PMU, vcpu);
12299         }
12300         static_call(kvm_x86_sched_in)(vcpu, cpu);
12301 }
12302
12303 void kvm_arch_free_vm(struct kvm *kvm)
12304 {
12305         kfree(to_kvm_hv(kvm)->hv_pa_pg);
12306         __kvm_arch_free_vm(kvm);
12307 }
12308
12309
12310 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
12311 {
12312         int ret;
12313         unsigned long flags;
12314
12315         if (type)
12316                 return -EINVAL;
12317
12318         ret = kvm_page_track_init(kvm);
12319         if (ret)
12320                 goto out;
12321
12322         kvm_mmu_init_vm(kvm);
12323
12324         ret = static_call(kvm_x86_vm_init)(kvm);
12325         if (ret)
12326                 goto out_uninit_mmu;
12327
12328         INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
12329         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
12330         atomic_set(&kvm->arch.noncoherent_dma_count, 0);
12331
12332         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
12333         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
12334         /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
12335         set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
12336                 &kvm->arch.irq_sources_bitmap);
12337
12338         raw_spin_lock_init(&kvm->arch.tsc_write_lock);
12339         mutex_init(&kvm->arch.apic_map_lock);
12340         seqcount_raw_spinlock_init(&kvm->arch.pvclock_sc, &kvm->arch.tsc_write_lock);
12341         kvm->arch.kvmclock_offset = -get_kvmclock_base_ns();
12342
12343         raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
12344         pvclock_update_vm_gtod_copy(kvm);
12345         raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
12346
12347         kvm->arch.default_tsc_khz = max_tsc_khz ? : tsc_khz;
12348         kvm->arch.guest_can_read_msr_platform_info = true;
12349         kvm->arch.enable_pmu = enable_pmu;
12350
12351 #if IS_ENABLED(CONFIG_HYPERV)
12352         spin_lock_init(&kvm->arch.hv_root_tdp_lock);
12353         kvm->arch.hv_root_tdp = INVALID_PAGE;
12354 #endif
12355
12356         INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
12357         INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
12358
12359         kvm_apicv_init(kvm);
12360         kvm_hv_init_vm(kvm);
12361         kvm_xen_init_vm(kvm);
12362
12363         return 0;
12364
12365 out_uninit_mmu:
12366         kvm_mmu_uninit_vm(kvm);
12367         kvm_page_track_cleanup(kvm);
12368 out:
12369         return ret;
12370 }
12371
12372 int kvm_arch_post_init_vm(struct kvm *kvm)
12373 {
12374         return kvm_mmu_post_init_vm(kvm);
12375 }
12376
12377 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
12378 {
12379         vcpu_load(vcpu);
12380         kvm_mmu_unload(vcpu);
12381         vcpu_put(vcpu);
12382 }
12383
12384 static void kvm_unload_vcpu_mmus(struct kvm *kvm)
12385 {
12386         unsigned long i;
12387         struct kvm_vcpu *vcpu;
12388
12389         kvm_for_each_vcpu(i, vcpu, kvm) {
12390                 kvm_clear_async_pf_completion_queue(vcpu);
12391                 kvm_unload_vcpu_mmu(vcpu);
12392         }
12393 }
12394
12395 void kvm_arch_sync_events(struct kvm *kvm)
12396 {
12397         cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work);
12398         cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work);
12399         kvm_free_pit(kvm);
12400 }
12401
12402 /**
12403  * __x86_set_memory_region: Setup KVM internal memory slot
12404  *
12405  * @kvm: the kvm pointer to the VM.
12406  * @id: the slot ID to setup.
12407  * @gpa: the GPA to install the slot (unused when @size == 0).
12408  * @size: the size of the slot. Set to zero to uninstall a slot.
12409  *
12410  * This function helps to setup a KVM internal memory slot.  Specify
12411  * @size > 0 to install a new slot, while @size == 0 to uninstall a
12412  * slot.  The return code can be one of the following:
12413  *
12414  *   HVA:           on success (uninstall will return a bogus HVA)
12415  *   -errno:        on error
12416  *
12417  * The caller should always use IS_ERR() to check the return value
12418  * before use.  Note, the KVM internal memory slots are guaranteed to
12419  * remain valid and unchanged until the VM is destroyed, i.e., the
12420  * GPA->HVA translation will not change.  However, the HVA is a user
12421  * address, i.e. its accessibility is not guaranteed, and must be
12422  * accessed via __copy_{to,from}_user().
12423  */
12424 void __user * __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
12425                                       u32 size)
12426 {
12427         int i, r;
12428         unsigned long hva, old_npages;
12429         struct kvm_memslots *slots = kvm_memslots(kvm);
12430         struct kvm_memory_slot *slot;
12431
12432         /* Called with kvm->slots_lock held.  */
12433         if (WARN_ON(id >= KVM_MEM_SLOTS_NUM))
12434                 return ERR_PTR_USR(-EINVAL);
12435
12436         slot = id_to_memslot(slots, id);
12437         if (size) {
12438                 if (slot && slot->npages)
12439                         return ERR_PTR_USR(-EEXIST);
12440
12441                 /*
12442                  * MAP_SHARED to prevent internal slot pages from being moved
12443                  * by fork()/COW.
12444                  */
12445                 hva = vm_mmap(NULL, 0, size, PROT_READ | PROT_WRITE,
12446                               MAP_SHARED | MAP_ANONYMOUS, 0);
12447                 if (IS_ERR_VALUE(hva))
12448                         return (void __user *)hva;
12449         } else {
12450                 if (!slot || !slot->npages)
12451                         return NULL;
12452
12453                 old_npages = slot->npages;
12454                 hva = slot->userspace_addr;
12455         }
12456
12457         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
12458                 struct kvm_userspace_memory_region m;
12459
12460                 m.slot = id | (i << 16);
12461                 m.flags = 0;
12462                 m.guest_phys_addr = gpa;
12463                 m.userspace_addr = hva;
12464                 m.memory_size = size;
12465                 r = __kvm_set_memory_region(kvm, &m);
12466                 if (r < 0)
12467                         return ERR_PTR_USR(r);
12468         }
12469
12470         if (!size)
12471                 vm_munmap(hva, old_npages * PAGE_SIZE);
12472
12473         return (void __user *)hva;
12474 }
12475 EXPORT_SYMBOL_GPL(__x86_set_memory_region);
12476
12477 void kvm_arch_pre_destroy_vm(struct kvm *kvm)
12478 {
12479         kvm_mmu_pre_destroy_vm(kvm);
12480 }
12481
12482 void kvm_arch_destroy_vm(struct kvm *kvm)
12483 {
12484         if (current->mm == kvm->mm) {
12485                 /*
12486                  * Free memory regions allocated on behalf of userspace,
12487                  * unless the memory map has changed due to process exit
12488                  * or fd copying.
12489                  */
12490                 mutex_lock(&kvm->slots_lock);
12491                 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
12492                                         0, 0);
12493                 __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
12494                                         0, 0);
12495                 __x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, 0, 0);
12496                 mutex_unlock(&kvm->slots_lock);
12497         }
12498         kvm_unload_vcpu_mmus(kvm);
12499         static_call_cond(kvm_x86_vm_destroy)(kvm);
12500         kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1));
12501         kvm_pic_destroy(kvm);
12502         kvm_ioapic_destroy(kvm);
12503         kvm_destroy_vcpus(kvm);
12504         kvfree(rcu_dereference_check(kvm->arch.apic_map, 1));
12505         kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1));
12506         kvm_mmu_uninit_vm(kvm);
12507         kvm_page_track_cleanup(kvm);
12508         kvm_xen_destroy_vm(kvm);
12509         kvm_hv_destroy_vm(kvm);
12510 }
12511
12512 static void memslot_rmap_free(struct kvm_memory_slot *slot)
12513 {
12514         int i;
12515
12516         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12517                 kvfree(slot->arch.rmap[i]);
12518                 slot->arch.rmap[i] = NULL;
12519         }
12520 }
12521
12522 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
12523 {
12524         int i;
12525
12526         memslot_rmap_free(slot);
12527
12528         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12529                 kvfree(slot->arch.lpage_info[i - 1]);
12530                 slot->arch.lpage_info[i - 1] = NULL;
12531         }
12532
12533         kvm_page_track_free_memslot(slot);
12534 }
12535
12536 int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
12537 {
12538         const int sz = sizeof(*slot->arch.rmap[0]);
12539         int i;
12540
12541         for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
12542                 int level = i + 1;
12543                 int lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12544
12545                 if (slot->arch.rmap[i])
12546                         continue;
12547
12548                 slot->arch.rmap[i] = __vcalloc(lpages, sz, GFP_KERNEL_ACCOUNT);
12549                 if (!slot->arch.rmap[i]) {
12550                         memslot_rmap_free(slot);
12551                         return -ENOMEM;
12552                 }
12553         }
12554
12555         return 0;
12556 }
12557
12558 static int kvm_alloc_memslot_metadata(struct kvm *kvm,
12559                                       struct kvm_memory_slot *slot)
12560 {
12561         unsigned long npages = slot->npages;
12562         int i, r;
12563
12564         /*
12565          * Clear out the previous array pointers for the KVM_MR_MOVE case.  The
12566          * old arrays will be freed by __kvm_set_memory_region() if installing
12567          * the new memslot is successful.
12568          */
12569         memset(&slot->arch, 0, sizeof(slot->arch));
12570
12571         if (kvm_memslots_have_rmaps(kvm)) {
12572                 r = memslot_rmap_alloc(slot, npages);
12573                 if (r)
12574                         return r;
12575         }
12576
12577         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12578                 struct kvm_lpage_info *linfo;
12579                 unsigned long ugfn;
12580                 int lpages;
12581                 int level = i + 1;
12582
12583                 lpages = __kvm_mmu_slot_lpages(slot, npages, level);
12584
12585                 linfo = __vcalloc(lpages, sizeof(*linfo), GFP_KERNEL_ACCOUNT);
12586                 if (!linfo)
12587                         goto out_free;
12588
12589                 slot->arch.lpage_info[i - 1] = linfo;
12590
12591                 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
12592                         linfo[0].disallow_lpage = 1;
12593                 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
12594                         linfo[lpages - 1].disallow_lpage = 1;
12595                 ugfn = slot->userspace_addr >> PAGE_SHIFT;
12596                 /*
12597                  * If the gfn and userspace address are not aligned wrt each
12598                  * other, disable large page support for this slot.
12599                  */
12600                 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
12601                         unsigned long j;
12602
12603                         for (j = 0; j < lpages; ++j)
12604                                 linfo[j].disallow_lpage = 1;
12605                 }
12606         }
12607
12608         if (kvm_page_track_create_memslot(kvm, slot, npages))
12609                 goto out_free;
12610
12611         return 0;
12612
12613 out_free:
12614         memslot_rmap_free(slot);
12615
12616         for (i = 1; i < KVM_NR_PAGE_SIZES; ++i) {
12617                 kvfree(slot->arch.lpage_info[i - 1]);
12618                 slot->arch.lpage_info[i - 1] = NULL;
12619         }
12620         return -ENOMEM;
12621 }
12622
12623 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
12624 {
12625         struct kvm_vcpu *vcpu;
12626         unsigned long i;
12627
12628         /*
12629          * memslots->generation has been incremented.
12630          * mmio generation may have reached its maximum value.
12631          */
12632         kvm_mmu_invalidate_mmio_sptes(kvm, gen);
12633
12634         /* Force re-initialization of steal_time cache */
12635         kvm_for_each_vcpu(i, vcpu, kvm)
12636                 kvm_vcpu_kick(vcpu);
12637 }
12638
12639 int kvm_arch_prepare_memory_region(struct kvm *kvm,
12640                                    const struct kvm_memory_slot *old,
12641                                    struct kvm_memory_slot *new,
12642                                    enum kvm_mr_change change)
12643 {
12644         /*
12645          * KVM doesn't support moving memslots when there are external page
12646          * trackers attached to the VM, i.e. if KVMGT is in use.
12647          */
12648         if (change == KVM_MR_MOVE && kvm_page_track_has_external_user(kvm))
12649                 return -EINVAL;
12650
12651         if (change == KVM_MR_CREATE || change == KVM_MR_MOVE) {
12652                 if ((new->base_gfn + new->npages - 1) > kvm_mmu_max_gfn())
12653                         return -EINVAL;
12654
12655                 return kvm_alloc_memslot_metadata(kvm, new);
12656         }
12657
12658         if (change == KVM_MR_FLAGS_ONLY)
12659                 memcpy(&new->arch, &old->arch, sizeof(old->arch));
12660         else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
12661                 return -EIO;
12662
12663         return 0;
12664 }
12665
12666
12667 static void kvm_mmu_update_cpu_dirty_logging(struct kvm *kvm, bool enable)
12668 {
12669         int nr_slots;
12670
12671         if (!kvm_x86_ops.cpu_dirty_log_size)
12672                 return;
12673
12674         nr_slots = atomic_read(&kvm->nr_memslots_dirty_logging);
12675         if ((enable && nr_slots == 1) || !nr_slots)
12676                 kvm_make_all_cpus_request(kvm, KVM_REQ_UPDATE_CPU_DIRTY_LOGGING);
12677 }
12678
12679 static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
12680                                      struct kvm_memory_slot *old,
12681                                      const struct kvm_memory_slot *new,
12682                                      enum kvm_mr_change change)
12683 {
12684         u32 old_flags = old ? old->flags : 0;
12685         u32 new_flags = new ? new->flags : 0;
12686         bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
12687
12688         /*
12689          * Update CPU dirty logging if dirty logging is being toggled.  This
12690          * applies to all operations.
12691          */
12692         if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
12693                 kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
12694
12695         /*
12696          * Nothing more to do for RO slots (which can't be dirtied and can't be
12697          * made writable) or CREATE/MOVE/DELETE of a slot.
12698          *
12699          * For a memslot with dirty logging disabled:
12700          * CREATE:      No dirty mappings will already exist.
12701          * MOVE/DELETE: The old mappings will already have been cleaned up by
12702          *              kvm_arch_flush_shadow_memslot()
12703          *
12704          * For a memslot with dirty logging enabled:
12705          * CREATE:      No shadow pages exist, thus nothing to write-protect
12706          *              and no dirty bits to clear.
12707          * MOVE/DELETE: The old mappings will already have been cleaned up by
12708          *              kvm_arch_flush_shadow_memslot().
12709          */
12710         if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
12711                 return;
12712
12713         /*
12714          * READONLY and non-flags changes were filtered out above, and the only
12715          * other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
12716          * logging isn't being toggled on or off.
12717          */
12718         if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
12719                 return;
12720
12721         if (!log_dirty_pages) {
12722                 /*
12723                  * Dirty logging tracks sptes in 4k granularity, meaning that
12724                  * large sptes have to be split.  If live migration succeeds,
12725                  * the guest in the source machine will be destroyed and large
12726                  * sptes will be created in the destination.  However, if the
12727                  * guest continues to run in the source machine (for example if
12728                  * live migration fails), small sptes will remain around and
12729                  * cause bad performance.
12730                  *
12731                  * Scan sptes if dirty logging has been stopped, dropping those
12732                  * which can be collapsed into a single large-page spte.  Later
12733                  * page faults will create the large-page sptes.
12734                  */
12735                 kvm_mmu_zap_collapsible_sptes(kvm, new);
12736         } else {
12737                 /*
12738                  * Initially-all-set does not require write protecting any page,
12739                  * because they're all assumed to be dirty.
12740                  */
12741                 if (kvm_dirty_log_manual_protect_and_init_set(kvm))
12742                         return;
12743
12744                 if (READ_ONCE(eager_page_split))
12745                         kvm_mmu_slot_try_split_huge_pages(kvm, new, PG_LEVEL_4K);
12746
12747                 if (kvm_x86_ops.cpu_dirty_log_size) {
12748                         kvm_mmu_slot_leaf_clear_dirty(kvm, new);
12749                         kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_2M);
12750                 } else {
12751                         kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
12752                 }
12753
12754                 /*
12755                  * Unconditionally flush the TLBs after enabling dirty logging.
12756                  * A flush is almost always going to be necessary (see below),
12757                  * and unconditionally flushing allows the helpers to omit
12758                  * the subtly complex checks when removing write access.
12759                  *
12760                  * Do the flush outside of mmu_lock to reduce the amount of
12761                  * time mmu_lock is held.  Flushing after dropping mmu_lock is
12762                  * safe as KVM only needs to guarantee the slot is fully
12763                  * write-protected before returning to userspace, i.e. before
12764                  * userspace can consume the dirty status.
12765                  *
12766                  * Flushing outside of mmu_lock requires KVM to be careful when
12767                  * making decisions based on writable status of an SPTE, e.g. a
12768                  * !writable SPTE doesn't guarantee a CPU can't perform writes.
12769                  *
12770                  * Specifically, KVM also write-protects guest page tables to
12771                  * monitor changes when using shadow paging, and must guarantee
12772                  * no CPUs can write to those page before mmu_lock is dropped.
12773                  * Because CPUs may have stale TLB entries at this point, a
12774                  * !writable SPTE doesn't guarantee CPUs can't perform writes.
12775                  *
12776                  * KVM also allows making SPTES writable outside of mmu_lock,
12777                  * e.g. to allow dirty logging without taking mmu_lock.
12778                  *
12779                  * To handle these scenarios, KVM uses a separate software-only
12780                  * bit (MMU-writable) to track if a SPTE is !writable due to
12781                  * a guest page table being write-protected (KVM clears the
12782                  * MMU-writable flag when write-protecting for shadow paging).
12783                  *
12784                  * The use of MMU-writable is also the primary motivation for
12785                  * the unconditional flush.  Because KVM must guarantee that a
12786                  * CPU doesn't contain stale, writable TLB entries for a
12787                  * !MMU-writable SPTE, KVM must flush if it encounters any
12788                  * MMU-writable SPTE regardless of whether the actual hardware
12789                  * writable bit was set.  I.e. KVM is almost guaranteed to need
12790                  * to flush, while unconditionally flushing allows the "remove
12791                  * write access" helpers to ignore MMU-writable entirely.
12792                  *
12793                  * See is_writable_pte() for more details (the case involving
12794                  * access-tracked SPTEs is particularly relevant).
12795                  */
12796                 kvm_flush_remote_tlbs_memslot(kvm, new);
12797         }
12798 }
12799
12800 void kvm_arch_commit_memory_region(struct kvm *kvm,
12801                                 struct kvm_memory_slot *old,
12802                                 const struct kvm_memory_slot *new,
12803                                 enum kvm_mr_change change)
12804 {
12805         if (change == KVM_MR_DELETE)
12806                 kvm_page_track_delete_slot(kvm, old);
12807
12808         if (!kvm->arch.n_requested_mmu_pages &&
12809             (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
12810                 unsigned long nr_mmu_pages;
12811
12812                 nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
12813                 nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
12814                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
12815         }
12816
12817         kvm_mmu_slot_apply_flags(kvm, old, new, change);
12818
12819         /* Free the arrays associated with the old memslot. */
12820         if (change == KVM_MR_MOVE)
12821                 kvm_arch_free_memslot(kvm, old);
12822 }
12823
12824 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
12825 {
12826         return (is_guest_mode(vcpu) &&
12827                 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu));
12828 }
12829
12830 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
12831 {
12832         if (!list_empty_careful(&vcpu->async_pf.done))
12833                 return true;
12834
12835         if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
12836             kvm_apic_init_sipi_allowed(vcpu))
12837                 return true;
12838
12839         if (vcpu->arch.pv.pv_unhalted)
12840                 return true;
12841
12842         if (kvm_is_exception_pending(vcpu))
12843                 return true;
12844
12845         if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
12846             (vcpu->arch.nmi_pending &&
12847              static_call(kvm_x86_nmi_allowed)(vcpu, false)))
12848                 return true;
12849
12850 #ifdef CONFIG_KVM_SMM
12851         if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
12852             (vcpu->arch.smi_pending &&
12853              static_call(kvm_x86_smi_allowed)(vcpu, false)))
12854                 return true;
12855 #endif
12856
12857         if (kvm_test_request(KVM_REQ_PMI, vcpu))
12858                 return true;
12859
12860         if (kvm_arch_interrupt_allowed(vcpu) &&
12861             (kvm_cpu_has_interrupt(vcpu) ||
12862             kvm_guest_apic_has_interrupt(vcpu)))
12863                 return true;
12864
12865         if (kvm_hv_has_stimer_pending(vcpu))
12866                 return true;
12867
12868         if (is_guest_mode(vcpu) &&
12869             kvm_x86_ops.nested_ops->has_events &&
12870             kvm_x86_ops.nested_ops->has_events(vcpu))
12871                 return true;
12872
12873         if (kvm_xen_has_pending_events(vcpu))
12874                 return true;
12875
12876         return false;
12877 }
12878
12879 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
12880 {
12881         return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
12882 }
12883
12884 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
12885 {
12886         if (kvm_vcpu_apicv_active(vcpu) &&
12887             static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu))
12888                 return true;
12889
12890         return false;
12891 }
12892
12893 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
12894 {
12895         if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
12896                 return true;
12897
12898         if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
12899 #ifdef CONFIG_KVM_SMM
12900                 kvm_test_request(KVM_REQ_SMI, vcpu) ||
12901 #endif
12902                  kvm_test_request(KVM_REQ_EVENT, vcpu))
12903                 return true;
12904
12905         return kvm_arch_dy_has_pending_interrupt(vcpu);
12906 }
12907
12908 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
12909 {
12910         if (vcpu->arch.guest_state_protected)
12911                 return true;
12912
12913         return vcpu->arch.preempted_in_kernel;
12914 }
12915
12916 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
12917 {
12918         return kvm_rip_read(vcpu);
12919 }
12920
12921 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
12922 {
12923         return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
12924 }
12925
12926 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
12927 {
12928         return static_call(kvm_x86_interrupt_allowed)(vcpu, false);
12929 }
12930
12931 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu)
12932 {
12933         /* Can't read the RIP when guest state is protected, just return 0 */
12934         if (vcpu->arch.guest_state_protected)
12935                 return 0;
12936
12937         if (is_64_bit_mode(vcpu))
12938                 return kvm_rip_read(vcpu);
12939         return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) +
12940                      kvm_rip_read(vcpu));
12941 }
12942 EXPORT_SYMBOL_GPL(kvm_get_linear_rip);
12943
12944 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
12945 {
12946         return kvm_get_linear_rip(vcpu) == linear_rip;
12947 }
12948 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
12949
12950 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
12951 {
12952         unsigned long rflags;
12953
12954         rflags = static_call(kvm_x86_get_rflags)(vcpu);
12955         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
12956                 rflags &= ~X86_EFLAGS_TF;
12957         return rflags;
12958 }
12959 EXPORT_SYMBOL_GPL(kvm_get_rflags);
12960
12961 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
12962 {
12963         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
12964             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
12965                 rflags |= X86_EFLAGS_TF;
12966         static_call(kvm_x86_set_rflags)(vcpu, rflags);
12967 }
12968
12969 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
12970 {
12971         __kvm_set_rflags(vcpu, rflags);
12972         kvm_make_request(KVM_REQ_EVENT, vcpu);
12973 }
12974 EXPORT_SYMBOL_GPL(kvm_set_rflags);
12975
12976 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
12977 {
12978         BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
12979
12980         return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
12981 }
12982
12983 static inline u32 kvm_async_pf_next_probe(u32 key)
12984 {
12985         return (key + 1) & (ASYNC_PF_PER_VCPU - 1);
12986 }
12987
12988 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
12989 {
12990         u32 key = kvm_async_pf_hash_fn(gfn);
12991
12992         while (vcpu->arch.apf.gfns[key] != ~0)
12993                 key = kvm_async_pf_next_probe(key);
12994
12995         vcpu->arch.apf.gfns[key] = gfn;
12996 }
12997
12998 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
12999 {
13000         int i;
13001         u32 key = kvm_async_pf_hash_fn(gfn);
13002
13003         for (i = 0; i < ASYNC_PF_PER_VCPU &&
13004                      (vcpu->arch.apf.gfns[key] != gfn &&
13005                       vcpu->arch.apf.gfns[key] != ~0); i++)
13006                 key = kvm_async_pf_next_probe(key);
13007
13008         return key;
13009 }
13010
13011 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13012 {
13013         return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
13014 }
13015
13016 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
13017 {
13018         u32 i, j, k;
13019
13020         i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
13021
13022         if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn))
13023                 return;
13024
13025         while (true) {
13026                 vcpu->arch.apf.gfns[i] = ~0;
13027                 do {
13028                         j = kvm_async_pf_next_probe(j);
13029                         if (vcpu->arch.apf.gfns[j] == ~0)
13030                                 return;
13031                         k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
13032                         /*
13033                          * k lies cyclically in ]i,j]
13034                          * |    i.k.j |
13035                          * |....j i.k.| or  |.k..j i...|
13036                          */
13037                 } while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
13038                 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
13039                 i = j;
13040         }
13041 }
13042
13043 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu)
13044 {
13045         u32 reason = KVM_PV_REASON_PAGE_NOT_PRESENT;
13046
13047         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason,
13048                                       sizeof(reason));
13049 }
13050
13051 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token)
13052 {
13053         unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13054
13055         return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13056                                              &token, offset, sizeof(token));
13057 }
13058
13059 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu)
13060 {
13061         unsigned int offset = offsetof(struct kvm_vcpu_pv_apf_data, token);
13062         u32 val;
13063
13064         if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data,
13065                                          &val, offset, sizeof(val)))
13066                 return false;
13067
13068         return !val;
13069 }
13070
13071 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu)
13072 {
13073
13074         if (!kvm_pv_async_pf_enabled(vcpu))
13075                 return false;
13076
13077         if (vcpu->arch.apf.send_user_only &&
13078             static_call(kvm_x86_get_cpl)(vcpu) == 0)
13079                 return false;
13080
13081         if (is_guest_mode(vcpu)) {
13082                 /*
13083                  * L1 needs to opt into the special #PF vmexits that are
13084                  * used to deliver async page faults.
13085                  */
13086                 return vcpu->arch.apf.delivery_as_pf_vmexit;
13087         } else {
13088                 /*
13089                  * Play it safe in case the guest temporarily disables paging.
13090                  * The real mode IDT in particular is unlikely to have a #PF
13091                  * exception setup.
13092                  */
13093                 return is_paging(vcpu);
13094         }
13095 }
13096
13097 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
13098 {
13099         if (unlikely(!lapic_in_kernel(vcpu) ||
13100                      kvm_event_needs_reinjection(vcpu) ||
13101                      kvm_is_exception_pending(vcpu)))
13102                 return false;
13103
13104         if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
13105                 return false;
13106
13107         /*
13108          * If interrupts are off we cannot even use an artificial
13109          * halt state.
13110          */
13111         return kvm_arch_interrupt_allowed(vcpu);
13112 }
13113
13114 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
13115                                      struct kvm_async_pf *work)
13116 {
13117         struct x86_exception fault;
13118
13119         trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa);
13120         kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
13121
13122         if (kvm_can_deliver_async_pf(vcpu) &&
13123             !apf_put_user_notpresent(vcpu)) {
13124                 fault.vector = PF_VECTOR;
13125                 fault.error_code_valid = true;
13126                 fault.error_code = 0;
13127                 fault.nested_page_fault = false;
13128                 fault.address = work->arch.token;
13129                 fault.async_page_fault = true;
13130                 kvm_inject_page_fault(vcpu, &fault);
13131                 return true;
13132         } else {
13133                 /*
13134                  * It is not possible to deliver a paravirtualized asynchronous
13135                  * page fault, but putting the guest in an artificial halt state
13136                  * can be beneficial nevertheless: if an interrupt arrives, we
13137                  * can deliver it timely and perhaps the guest will schedule
13138                  * another process.  When the instruction that triggered a page
13139                  * fault is retried, hopefully the page will be ready in the host.
13140                  */
13141                 kvm_make_request(KVM_REQ_APF_HALT, vcpu);
13142                 return false;
13143         }
13144 }
13145
13146 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
13147                                  struct kvm_async_pf *work)
13148 {
13149         struct kvm_lapic_irq irq = {
13150                 .delivery_mode = APIC_DM_FIXED,
13151                 .vector = vcpu->arch.apf.vec
13152         };
13153
13154         if (work->wakeup_all)
13155                 work->arch.token = ~0; /* broadcast wakeup */
13156         else
13157                 kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
13158         trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa);
13159
13160         if ((work->wakeup_all || work->notpresent_injected) &&
13161             kvm_pv_async_pf_enabled(vcpu) &&
13162             !apf_put_user_ready(vcpu, work->arch.token)) {
13163                 vcpu->arch.apf.pageready_pending = true;
13164                 kvm_apic_set_irq(vcpu, &irq, NULL);
13165         }
13166
13167         vcpu->arch.apf.halted = false;
13168         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
13169 }
13170
13171 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu)
13172 {
13173         kvm_make_request(KVM_REQ_APF_READY, vcpu);
13174         if (!vcpu->arch.apf.pageready_pending)
13175                 kvm_vcpu_kick(vcpu);
13176 }
13177
13178 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu)
13179 {
13180         if (!kvm_pv_async_pf_enabled(vcpu))
13181                 return true;
13182         else
13183                 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu);
13184 }
13185
13186 void kvm_arch_start_assignment(struct kvm *kvm)
13187 {
13188         if (atomic_inc_return(&kvm->arch.assigned_device_count) == 1)
13189                 static_call_cond(kvm_x86_pi_start_assignment)(kvm);
13190 }
13191 EXPORT_SYMBOL_GPL(kvm_arch_start_assignment);
13192
13193 void kvm_arch_end_assignment(struct kvm *kvm)
13194 {
13195         atomic_dec(&kvm->arch.assigned_device_count);
13196 }
13197 EXPORT_SYMBOL_GPL(kvm_arch_end_assignment);
13198
13199 bool noinstr kvm_arch_has_assigned_device(struct kvm *kvm)
13200 {
13201         return raw_atomic_read(&kvm->arch.assigned_device_count);
13202 }
13203 EXPORT_SYMBOL_GPL(kvm_arch_has_assigned_device);
13204
13205 void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
13206 {
13207         atomic_inc(&kvm->arch.noncoherent_dma_count);
13208 }
13209 EXPORT_SYMBOL_GPL(kvm_arch_register_noncoherent_dma);
13210
13211 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
13212 {
13213         atomic_dec(&kvm->arch.noncoherent_dma_count);
13214 }
13215 EXPORT_SYMBOL_GPL(kvm_arch_unregister_noncoherent_dma);
13216
13217 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
13218 {
13219         return atomic_read(&kvm->arch.noncoherent_dma_count);
13220 }
13221 EXPORT_SYMBOL_GPL(kvm_arch_has_noncoherent_dma);
13222
13223 bool kvm_arch_has_irq_bypass(void)
13224 {
13225         return enable_apicv && irq_remapping_cap(IRQ_POSTING_CAP);
13226 }
13227
13228 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
13229                                       struct irq_bypass_producer *prod)
13230 {
13231         struct kvm_kernel_irqfd *irqfd =
13232                 container_of(cons, struct kvm_kernel_irqfd, consumer);
13233         int ret;
13234
13235         irqfd->producer = prod;
13236         kvm_arch_start_assignment(irqfd->kvm);
13237         ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm,
13238                                          prod->irq, irqfd->gsi, 1);
13239
13240         if (ret)
13241                 kvm_arch_end_assignment(irqfd->kvm);
13242
13243         return ret;
13244 }
13245
13246 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
13247                                       struct irq_bypass_producer *prod)
13248 {
13249         int ret;
13250         struct kvm_kernel_irqfd *irqfd =
13251                 container_of(cons, struct kvm_kernel_irqfd, consumer);
13252
13253         WARN_ON(irqfd->producer != prod);
13254         irqfd->producer = NULL;
13255
13256         /*
13257          * When producer of consumer is unregistered, we change back to
13258          * remapped mode, so we can re-use the current implementation
13259          * when the irq is masked/disabled or the consumer side (KVM
13260          * int this case doesn't want to receive the interrupts.
13261         */
13262         ret = static_call(kvm_x86_pi_update_irte)(irqfd->kvm, prod->irq, irqfd->gsi, 0);
13263         if (ret)
13264                 printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
13265                        " fails: %d\n", irqfd->consumer.token, ret);
13266
13267         kvm_arch_end_assignment(irqfd->kvm);
13268 }
13269
13270 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
13271                                    uint32_t guest_irq, bool set)
13272 {
13273         return static_call(kvm_x86_pi_update_irte)(kvm, host_irq, guest_irq, set);
13274 }
13275
13276 bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old,
13277                                   struct kvm_kernel_irq_routing_entry *new)
13278 {
13279         if (new->type != KVM_IRQ_ROUTING_MSI)
13280                 return true;
13281
13282         return !!memcmp(&old->msi, &new->msi, sizeof(new->msi));
13283 }
13284
13285 bool kvm_vector_hashing_enabled(void)
13286 {
13287         return vector_hashing;
13288 }
13289
13290 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
13291 {
13292         return (vcpu->arch.msr_kvm_poll_control & 1) == 0;
13293 }
13294 EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
13295
13296
13297 int kvm_spec_ctrl_test_value(u64 value)
13298 {
13299         /*
13300          * test that setting IA32_SPEC_CTRL to given value
13301          * is allowed by the host processor
13302          */
13303
13304         u64 saved_value;
13305         unsigned long flags;
13306         int ret = 0;
13307
13308         local_irq_save(flags);
13309
13310         if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
13311                 ret = 1;
13312         else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
13313                 ret = 1;
13314         else
13315                 wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
13316
13317         local_irq_restore(flags);
13318
13319         return ret;
13320 }
13321 EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
13322
13323 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
13324 {
13325         struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
13326         struct x86_exception fault;
13327         u64 access = error_code &
13328                 (PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
13329
13330         if (!(error_code & PFERR_PRESENT_MASK) ||
13331             mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) {
13332                 /*
13333                  * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
13334                  * tables probably do not match the TLB.  Just proceed
13335                  * with the error code that the processor gave.
13336                  */
13337                 fault.vector = PF_VECTOR;
13338                 fault.error_code_valid = true;
13339                 fault.error_code = error_code;
13340                 fault.nested_page_fault = false;
13341                 fault.address = gva;
13342                 fault.async_page_fault = false;
13343         }
13344         vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault);
13345 }
13346 EXPORT_SYMBOL_GPL(kvm_fixup_and_inject_pf_error);
13347
13348 /*
13349  * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
13350  * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
13351  * indicates whether exit to userspace is needed.
13352  */
13353 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
13354                               struct x86_exception *e)
13355 {
13356         if (r == X86EMUL_PROPAGATE_FAULT) {
13357                 if (KVM_BUG_ON(!e, vcpu->kvm))
13358                         return -EIO;
13359
13360                 kvm_inject_emulated_page_fault(vcpu, e);
13361                 return 1;
13362         }
13363
13364         /*
13365          * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
13366          * while handling a VMX instruction KVM could've handled the request
13367          * correctly by exiting to userspace and performing I/O but there
13368          * doesn't seem to be a real use-case behind such requests, just return
13369          * KVM_EXIT_INTERNAL_ERROR for now.
13370          */
13371         kvm_prepare_emulation_failure_exit(vcpu);
13372
13373         return 0;
13374 }
13375 EXPORT_SYMBOL_GPL(kvm_handle_memory_failure);
13376
13377 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
13378 {
13379         bool pcid_enabled;
13380         struct x86_exception e;
13381         struct {
13382                 u64 pcid;
13383                 u64 gla;
13384         } operand;
13385         int r;
13386
13387         r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
13388         if (r != X86EMUL_CONTINUE)
13389                 return kvm_handle_memory_failure(vcpu, r, &e);
13390
13391         if (operand.pcid >> 12 != 0) {
13392                 kvm_inject_gp(vcpu, 0);
13393                 return 1;
13394         }
13395
13396         pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE);
13397
13398         switch (type) {
13399         case INVPCID_TYPE_INDIV_ADDR:
13400                 if ((!pcid_enabled && (operand.pcid != 0)) ||
13401                     is_noncanonical_address(operand.gla, vcpu)) {
13402                         kvm_inject_gp(vcpu, 0);
13403                         return 1;
13404                 }
13405                 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid);
13406                 return kvm_skip_emulated_instruction(vcpu);
13407
13408         case INVPCID_TYPE_SINGLE_CTXT:
13409                 if (!pcid_enabled && (operand.pcid != 0)) {
13410                         kvm_inject_gp(vcpu, 0);
13411                         return 1;
13412                 }
13413
13414                 kvm_invalidate_pcid(vcpu, operand.pcid);
13415                 return kvm_skip_emulated_instruction(vcpu);
13416
13417         case INVPCID_TYPE_ALL_NON_GLOBAL:
13418                 /*
13419                  * Currently, KVM doesn't mark global entries in the shadow
13420                  * page tables, so a non-global flush just degenerates to a
13421                  * global flush. If needed, we could optimize this later by
13422                  * keeping track of global entries in shadow page tables.
13423                  */
13424
13425                 fallthrough;
13426         case INVPCID_TYPE_ALL_INCL_GLOBAL:
13427                 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
13428                 return kvm_skip_emulated_instruction(vcpu);
13429
13430         default:
13431                 kvm_inject_gp(vcpu, 0);
13432                 return 1;
13433         }
13434 }
13435 EXPORT_SYMBOL_GPL(kvm_handle_invpcid);
13436
13437 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu)
13438 {
13439         struct kvm_run *run = vcpu->run;
13440         struct kvm_mmio_fragment *frag;
13441         unsigned int len;
13442
13443         BUG_ON(!vcpu->mmio_needed);
13444
13445         /* Complete previous fragment */
13446         frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
13447         len = min(8u, frag->len);
13448         if (!vcpu->mmio_is_write)
13449                 memcpy(frag->data, run->mmio.data, len);
13450
13451         if (frag->len <= 8) {
13452                 /* Switch to the next fragment. */
13453                 frag++;
13454                 vcpu->mmio_cur_fragment++;
13455         } else {
13456                 /* Go forward to the next mmio piece. */
13457                 frag->data += len;
13458                 frag->gpa += len;
13459                 frag->len -= len;
13460         }
13461
13462         if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
13463                 vcpu->mmio_needed = 0;
13464
13465                 // VMG change, at this point, we're always done
13466                 // RIP has already been advanced
13467                 return 1;
13468         }
13469
13470         // More MMIO is needed
13471         run->mmio.phys_addr = frag->gpa;
13472         run->mmio.len = min(8u, frag->len);
13473         run->mmio.is_write = vcpu->mmio_is_write;
13474         if (run->mmio.is_write)
13475                 memcpy(run->mmio.data, frag->data, min(8u, frag->len));
13476         run->exit_reason = KVM_EXIT_MMIO;
13477
13478         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13479
13480         return 0;
13481 }
13482
13483 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13484                           void *data)
13485 {
13486         int handled;
13487         struct kvm_mmio_fragment *frag;
13488
13489         if (!data)
13490                 return -EINVAL;
13491
13492         handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13493         if (handled == bytes)
13494                 return 1;
13495
13496         bytes -= handled;
13497         gpa += handled;
13498         data += handled;
13499
13500         /*TODO: Check if need to increment number of frags */
13501         frag = vcpu->mmio_fragments;
13502         vcpu->mmio_nr_fragments = 1;
13503         frag->len = bytes;
13504         frag->gpa = gpa;
13505         frag->data = data;
13506
13507         vcpu->mmio_needed = 1;
13508         vcpu->mmio_cur_fragment = 0;
13509
13510         vcpu->run->mmio.phys_addr = gpa;
13511         vcpu->run->mmio.len = min(8u, frag->len);
13512         vcpu->run->mmio.is_write = 1;
13513         memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
13514         vcpu->run->exit_reason = KVM_EXIT_MMIO;
13515
13516         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13517
13518         return 0;
13519 }
13520 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_write);
13521
13522 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes,
13523                          void *data)
13524 {
13525         int handled;
13526         struct kvm_mmio_fragment *frag;
13527
13528         if (!data)
13529                 return -EINVAL;
13530
13531         handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data);
13532         if (handled == bytes)
13533                 return 1;
13534
13535         bytes -= handled;
13536         gpa += handled;
13537         data += handled;
13538
13539         /*TODO: Check if need to increment number of frags */
13540         frag = vcpu->mmio_fragments;
13541         vcpu->mmio_nr_fragments = 1;
13542         frag->len = bytes;
13543         frag->gpa = gpa;
13544         frag->data = data;
13545
13546         vcpu->mmio_needed = 1;
13547         vcpu->mmio_cur_fragment = 0;
13548
13549         vcpu->run->mmio.phys_addr = gpa;
13550         vcpu->run->mmio.len = min(8u, frag->len);
13551         vcpu->run->mmio.is_write = 0;
13552         vcpu->run->exit_reason = KVM_EXIT_MMIO;
13553
13554         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio;
13555
13556         return 0;
13557 }
13558 EXPORT_SYMBOL_GPL(kvm_sev_es_mmio_read);
13559
13560 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size)
13561 {
13562         vcpu->arch.sev_pio_count -= count;
13563         vcpu->arch.sev_pio_data += count * size;
13564 }
13565
13566 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13567                            unsigned int port);
13568
13569 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu)
13570 {
13571         int size = vcpu->arch.pio.size;
13572         int port = vcpu->arch.pio.port;
13573
13574         vcpu->arch.pio.count = 0;
13575         if (vcpu->arch.sev_pio_count)
13576                 return kvm_sev_es_outs(vcpu, size, port);
13577         return 1;
13578 }
13579
13580 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13581                            unsigned int port)
13582 {
13583         for (;;) {
13584                 unsigned int count =
13585                         min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13586                 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count);
13587
13588                 /* memcpy done already by emulator_pio_out.  */
13589                 advance_sev_es_emulated_pio(vcpu, count, size);
13590                 if (!ret)
13591                         break;
13592
13593                 /* Emulation done by the kernel.  */
13594                 if (!vcpu->arch.sev_pio_count)
13595                         return 1;
13596         }
13597
13598         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs;
13599         return 0;
13600 }
13601
13602 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13603                           unsigned int port);
13604
13605 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu)
13606 {
13607         unsigned count = vcpu->arch.pio.count;
13608         int size = vcpu->arch.pio.size;
13609         int port = vcpu->arch.pio.port;
13610
13611         complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data);
13612         advance_sev_es_emulated_pio(vcpu, count, size);
13613         if (vcpu->arch.sev_pio_count)
13614                 return kvm_sev_es_ins(vcpu, size, port);
13615         return 1;
13616 }
13617
13618 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13619                           unsigned int port)
13620 {
13621         for (;;) {
13622                 unsigned int count =
13623                         min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count);
13624                 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count))
13625                         break;
13626
13627                 /* Emulation done by the kernel.  */
13628                 advance_sev_es_emulated_pio(vcpu, count, size);
13629                 if (!vcpu->arch.sev_pio_count)
13630                         return 1;
13631         }
13632
13633         vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins;
13634         return 0;
13635 }
13636
13637 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
13638                          unsigned int port, void *data,  unsigned int count,
13639                          int in)
13640 {
13641         vcpu->arch.sev_pio_data = data;
13642         vcpu->arch.sev_pio_count = count;
13643         return in ? kvm_sev_es_ins(vcpu, size, port)
13644                   : kvm_sev_es_outs(vcpu, size, port);
13645 }
13646 EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
13647
13648 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
13649 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
13650 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
13651 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
13652 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
13653 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
13654 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
13655 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
13656 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
13657 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
13658 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
13659 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter_failed);
13660 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
13661 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
13662 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
13663 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_write_tsc_offset);
13664 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ple_window_update);
13665 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pml_full);
13666 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_pi_irte_update);
13667 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
13668 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
13669 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
13670 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_kick_vcpu_slowpath);
13671 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_doorbell);
13672 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
13673 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
13674 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
13675 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
13676 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
13677
13678 static int __init kvm_x86_init(void)
13679 {
13680         kvm_mmu_x86_module_init();
13681         mitigate_smt_rsb &= boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible();
13682         return 0;
13683 }
13684 module_init(kvm_x86_init);
13685
13686 static void __exit kvm_x86_exit(void)
13687 {
13688         /*
13689          * If module_init() is implemented, module_exit() must also be
13690          * implemented to allow module unload.
13691          */
13692 }
13693 module_exit(kvm_x86_exit);