2 * Kernel-based Virtual Machine driver for Linux
4 * derived from drivers/kvm/kvm_main.c
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2008 Qumranet, Inc.
8 * Copyright IBM Corporation, 2008
9 * Copyright 2010 Red Hat, Inc. and/or its affilates.
12 * Avi Kivity <avi@qumranet.com>
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Amit Shah <amit.shah@qumranet.com>
15 * Ben-Ami Yassour <benami@il.ibm.com>
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
22 #include <linux/kvm_host.h>
27 #include "kvm_cache_regs.h"
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <trace/events/kvm.h>
48 #define CREATE_TRACE_POINTS
51 #include <asm/debugreg.h>
59 #define MAX_IO_MSRS 256
60 #define CR0_RESERVED_BITS \
61 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
62 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
63 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
64 #define CR4_RESERVED_BITS \
65 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
66 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
67 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
69 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
71 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
73 #define KVM_MAX_MCE_BANKS 32
74 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
77 * - enable syscall per default because its emulated by KVM
78 * - enable LME and LMA per default on 64 bit KVM
81 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
83 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
86 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
87 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
91 struct kvm_cpuid_entry2 __user *entries);
93 struct kvm_x86_ops *kvm_x86_ops;
94 EXPORT_SYMBOL_GPL(kvm_x86_ops);
97 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
99 #define KVM_NR_SHARED_MSRS 16
101 struct kvm_shared_msrs_global {
103 u32 msrs[KVM_NR_SHARED_MSRS];
106 struct kvm_shared_msrs {
107 struct user_return_notifier urn;
109 struct kvm_shared_msr_values {
112 } values[KVM_NR_SHARED_MSRS];
115 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
116 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
118 struct kvm_stats_debugfs_item debugfs_entries[] = {
119 { "pf_fixed", VCPU_STAT(pf_fixed) },
120 { "pf_guest", VCPU_STAT(pf_guest) },
121 { "tlb_flush", VCPU_STAT(tlb_flush) },
122 { "invlpg", VCPU_STAT(invlpg) },
123 { "exits", VCPU_STAT(exits) },
124 { "io_exits", VCPU_STAT(io_exits) },
125 { "mmio_exits", VCPU_STAT(mmio_exits) },
126 { "signal_exits", VCPU_STAT(signal_exits) },
127 { "irq_window", VCPU_STAT(irq_window_exits) },
128 { "nmi_window", VCPU_STAT(nmi_window_exits) },
129 { "halt_exits", VCPU_STAT(halt_exits) },
130 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
131 { "hypercalls", VCPU_STAT(hypercalls) },
132 { "request_irq", VCPU_STAT(request_irq_exits) },
133 { "irq_exits", VCPU_STAT(irq_exits) },
134 { "host_state_reload", VCPU_STAT(host_state_reload) },
135 { "efer_reload", VCPU_STAT(efer_reload) },
136 { "fpu_reload", VCPU_STAT(fpu_reload) },
137 { "insn_emulation", VCPU_STAT(insn_emulation) },
138 { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
139 { "irq_injections", VCPU_STAT(irq_injections) },
140 { "nmi_injections", VCPU_STAT(nmi_injections) },
141 { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
142 { "mmu_pte_write", VM_STAT(mmu_pte_write) },
143 { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
144 { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
145 { "mmu_flooded", VM_STAT(mmu_flooded) },
146 { "mmu_recycled", VM_STAT(mmu_recycled) },
147 { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
148 { "mmu_unsync", VM_STAT(mmu_unsync) },
149 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
150 { "largepages", VM_STAT(lpages) },
154 u64 __read_mostly host_xcr0;
156 static void kvm_on_user_return(struct user_return_notifier *urn)
159 struct kvm_shared_msrs *locals
160 = container_of(urn, struct kvm_shared_msrs, urn);
161 struct kvm_shared_msr_values *values;
163 for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
164 values = &locals->values[slot];
165 if (values->host != values->curr) {
166 wrmsrl(shared_msrs_global.msrs[slot], values->host);
167 values->curr = values->host;
170 locals->registered = false;
171 user_return_notifier_unregister(urn);
174 static void shared_msr_update(unsigned slot, u32 msr)
176 struct kvm_shared_msrs *smsr;
179 smsr = &__get_cpu_var(shared_msrs);
180 /* only read, and nobody should modify it at this time,
181 * so don't need lock */
182 if (slot >= shared_msrs_global.nr) {
183 printk(KERN_ERR "kvm: invalid MSR slot!");
186 rdmsrl_safe(msr, &value);
187 smsr->values[slot].host = value;
188 smsr->values[slot].curr = value;
191 void kvm_define_shared_msr(unsigned slot, u32 msr)
193 if (slot >= shared_msrs_global.nr)
194 shared_msrs_global.nr = slot + 1;
195 shared_msrs_global.msrs[slot] = msr;
196 /* we need ensured the shared_msr_global have been updated */
199 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
201 static void kvm_shared_msr_cpu_online(void)
205 for (i = 0; i < shared_msrs_global.nr; ++i)
206 shared_msr_update(i, shared_msrs_global.msrs[i]);
209 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
211 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
213 if (((value ^ smsr->values[slot].curr) & mask) == 0)
215 smsr->values[slot].curr = value;
216 wrmsrl(shared_msrs_global.msrs[slot], value);
217 if (!smsr->registered) {
218 smsr->urn.on_user_return = kvm_on_user_return;
219 user_return_notifier_register(&smsr->urn);
220 smsr->registered = true;
223 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
225 static void drop_user_return_notifiers(void *ignore)
227 struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
229 if (smsr->registered)
230 kvm_on_user_return(&smsr->urn);
233 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
235 if (irqchip_in_kernel(vcpu->kvm))
236 return vcpu->arch.apic_base;
238 return vcpu->arch.apic_base;
240 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
242 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
244 /* TODO: reserve bits check */
245 if (irqchip_in_kernel(vcpu->kvm))
246 kvm_lapic_set_base(vcpu, data);
248 vcpu->arch.apic_base = data;
250 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
252 #define EXCPT_BENIGN 0
253 #define EXCPT_CONTRIBUTORY 1
256 static int exception_class(int vector)
266 return EXCPT_CONTRIBUTORY;
273 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
274 unsigned nr, bool has_error, u32 error_code,
280 if (!vcpu->arch.exception.pending) {
282 vcpu->arch.exception.pending = true;
283 vcpu->arch.exception.has_error_code = has_error;
284 vcpu->arch.exception.nr = nr;
285 vcpu->arch.exception.error_code = error_code;
286 vcpu->arch.exception.reinject = reinject;
290 /* to check exception */
291 prev_nr = vcpu->arch.exception.nr;
292 if (prev_nr == DF_VECTOR) {
293 /* triple fault -> shutdown */
294 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
297 class1 = exception_class(prev_nr);
298 class2 = exception_class(nr);
299 if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
300 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
301 /* generate double fault per SDM Table 5-5 */
302 vcpu->arch.exception.pending = true;
303 vcpu->arch.exception.has_error_code = true;
304 vcpu->arch.exception.nr = DF_VECTOR;
305 vcpu->arch.exception.error_code = 0;
307 /* replace previous exception with a new one in a hope
308 that instruction re-execution will regenerate lost
313 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
315 kvm_multiple_exception(vcpu, nr, false, 0, false);
317 EXPORT_SYMBOL_GPL(kvm_queue_exception);
319 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
321 kvm_multiple_exception(vcpu, nr, false, 0, true);
323 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
325 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
328 ++vcpu->stat.pf_guest;
329 vcpu->arch.cr2 = addr;
330 kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
333 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
335 vcpu->arch.nmi_pending = 1;
337 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
339 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
341 kvm_multiple_exception(vcpu, nr, true, error_code, false);
343 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
345 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
347 kvm_multiple_exception(vcpu, nr, true, error_code, true);
349 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
352 * Checks if cpl <= required_cpl; if true, return true. Otherwise queue
353 * a #GP and return false.
355 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
357 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
359 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
362 EXPORT_SYMBOL_GPL(kvm_require_cpl);
365 * Load the pae pdptrs. Return true is they are all valid.
367 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
369 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
370 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
373 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
375 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
376 offset * sizeof(u64), sizeof(pdpte));
381 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
382 if (is_present_gpte(pdpte[i]) &&
383 (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
390 memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
391 __set_bit(VCPU_EXREG_PDPTR,
392 (unsigned long *)&vcpu->arch.regs_avail);
393 __set_bit(VCPU_EXREG_PDPTR,
394 (unsigned long *)&vcpu->arch.regs_dirty);
399 EXPORT_SYMBOL_GPL(load_pdptrs);
401 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
403 u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
407 if (is_long_mode(vcpu) || !is_pae(vcpu))
410 if (!test_bit(VCPU_EXREG_PDPTR,
411 (unsigned long *)&vcpu->arch.regs_avail))
414 r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
417 changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
423 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
425 unsigned long old_cr0 = kvm_read_cr0(vcpu);
426 unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
427 X86_CR0_CD | X86_CR0_NW;
432 if (cr0 & 0xffffffff00000000UL)
436 cr0 &= ~CR0_RESERVED_BITS;
438 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
441 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
444 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
446 if ((vcpu->arch.efer & EFER_LME)) {
451 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
456 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
460 kvm_x86_ops->set_cr0(vcpu, cr0);
462 if ((cr0 ^ old_cr0) & update_bits)
463 kvm_mmu_reset_context(vcpu);
466 EXPORT_SYMBOL_GPL(kvm_set_cr0);
468 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
470 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
472 EXPORT_SYMBOL_GPL(kvm_lmsw);
474 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
478 /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
479 if (index != XCR_XFEATURE_ENABLED_MASK)
482 if (kvm_x86_ops->get_cpl(vcpu) != 0)
484 if (!(xcr0 & XSTATE_FP))
486 if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
488 if (xcr0 & ~host_xcr0)
490 vcpu->arch.xcr0 = xcr0;
491 vcpu->guest_xcr0_loaded = 0;
495 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
497 if (__kvm_set_xcr(vcpu, index, xcr)) {
498 kvm_inject_gp(vcpu, 0);
503 EXPORT_SYMBOL_GPL(kvm_set_xcr);
505 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
507 struct kvm_cpuid_entry2 *best;
509 best = kvm_find_cpuid_entry(vcpu, 1, 0);
510 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
513 static void update_cpuid(struct kvm_vcpu *vcpu)
515 struct kvm_cpuid_entry2 *best;
517 best = kvm_find_cpuid_entry(vcpu, 1, 0);
521 /* Update OSXSAVE bit */
522 if (cpu_has_xsave && best->function == 0x1) {
523 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
524 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
525 best->ecx |= bit(X86_FEATURE_OSXSAVE);
529 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
531 unsigned long old_cr4 = kvm_read_cr4(vcpu);
532 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
534 if (cr4 & CR4_RESERVED_BITS)
537 if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
540 if (is_long_mode(vcpu)) {
541 if (!(cr4 & X86_CR4_PAE))
543 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
544 && ((cr4 ^ old_cr4) & pdptr_bits)
545 && !load_pdptrs(vcpu, vcpu->arch.cr3))
548 if (cr4 & X86_CR4_VMXE)
551 kvm_x86_ops->set_cr4(vcpu, cr4);
553 if ((cr4 ^ old_cr4) & pdptr_bits)
554 kvm_mmu_reset_context(vcpu);
556 if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
561 EXPORT_SYMBOL_GPL(kvm_set_cr4);
563 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
565 if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
566 kvm_mmu_sync_roots(vcpu);
567 kvm_mmu_flush_tlb(vcpu);
571 if (is_long_mode(vcpu)) {
572 if (cr3 & CR3_L_MODE_RESERVED_BITS)
576 if (cr3 & CR3_PAE_RESERVED_BITS)
578 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
582 * We don't check reserved bits in nonpae mode, because
583 * this isn't enforced, and VMware depends on this.
588 * Does the new cr3 value map to physical memory? (Note, we
589 * catch an invalid cr3 even in real-mode, because it would
590 * cause trouble later on when we turn on paging anyway.)
592 * A real CPU would silently accept an invalid cr3 and would
593 * attempt to use it - with largely undefined (and often hard
594 * to debug) behavior on the guest side.
596 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
598 vcpu->arch.cr3 = cr3;
599 vcpu->arch.mmu.new_cr3(vcpu);
602 EXPORT_SYMBOL_GPL(kvm_set_cr3);
604 int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
606 if (cr8 & CR8_RESERVED_BITS)
608 if (irqchip_in_kernel(vcpu->kvm))
609 kvm_lapic_set_tpr(vcpu, cr8);
611 vcpu->arch.cr8 = cr8;
615 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
617 if (__kvm_set_cr8(vcpu, cr8))
618 kvm_inject_gp(vcpu, 0);
620 EXPORT_SYMBOL_GPL(kvm_set_cr8);
622 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
624 if (irqchip_in_kernel(vcpu->kvm))
625 return kvm_lapic_get_cr8(vcpu);
627 return vcpu->arch.cr8;
629 EXPORT_SYMBOL_GPL(kvm_get_cr8);
631 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
635 vcpu->arch.db[dr] = val;
636 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
637 vcpu->arch.eff_db[dr] = val;
640 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
644 if (val & 0xffffffff00000000ULL)
646 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
649 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
653 if (val & 0xffffffff00000000ULL)
655 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
656 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
657 kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
658 vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
666 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
670 res = __kvm_set_dr(vcpu, dr, val);
672 kvm_queue_exception(vcpu, UD_VECTOR);
674 kvm_inject_gp(vcpu, 0);
678 EXPORT_SYMBOL_GPL(kvm_set_dr);
680 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
684 *val = vcpu->arch.db[dr];
687 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
691 *val = vcpu->arch.dr6;
694 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
698 *val = vcpu->arch.dr7;
705 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
707 if (_kvm_get_dr(vcpu, dr, val)) {
708 kvm_queue_exception(vcpu, UD_VECTOR);
713 EXPORT_SYMBOL_GPL(kvm_get_dr);
716 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
717 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
719 * This list is modified at module load time to reflect the
720 * capabilities of the host cpu. This capabilities test skips MSRs that are
721 * kvm-specific. Those are put in the beginning of the list.
724 #define KVM_SAVE_MSRS_BEGIN 7
725 static u32 msrs_to_save[] = {
726 MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
727 MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
728 HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
729 HV_X64_MSR_APIC_ASSIST_PAGE,
730 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
733 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
735 MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
738 static unsigned num_msrs_to_save;
740 static u32 emulated_msrs[] = {
741 MSR_IA32_MISC_ENABLE,
746 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
748 u64 old_efer = vcpu->arch.efer;
750 if (efer & efer_reserved_bits)
754 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
757 if (efer & EFER_FFXSR) {
758 struct kvm_cpuid_entry2 *feat;
760 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
761 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
765 if (efer & EFER_SVME) {
766 struct kvm_cpuid_entry2 *feat;
768 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
769 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
774 efer |= vcpu->arch.efer & EFER_LMA;
776 kvm_x86_ops->set_efer(vcpu, efer);
778 vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
779 kvm_mmu_reset_context(vcpu);
781 /* Update reserved bits */
782 if ((efer ^ old_efer) & EFER_NX)
783 kvm_mmu_reset_context(vcpu);
788 void kvm_enable_efer_bits(u64 mask)
790 efer_reserved_bits &= ~mask;
792 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
796 * Writes msr value into into the appropriate "register".
797 * Returns 0 on success, non-0 otherwise.
798 * Assumes vcpu_load() was already called.
800 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
802 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
806 * Adapt set_msr() to msr_io()'s calling convention
808 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
810 return kvm_set_msr(vcpu, index, *data);
813 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
817 struct pvclock_wall_clock wc;
818 struct timespec boot;
823 r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
828 ++version; /* first time write, random junk */
832 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
835 * The guest calculates current wall clock time by adding
836 * system time (updated by kvm_write_guest_time below) to the
837 * wall clock specified here. guest system time equals host
838 * system time for us, thus we must fill in host boot time here.
842 wc.sec = boot.tv_sec;
843 wc.nsec = boot.tv_nsec;
844 wc.version = version;
846 kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
849 kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
852 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
854 uint32_t quotient, remainder;
856 /* Don't try to replace with do_div(), this one calculates
857 * "(dividend << 32) / divisor" */
859 : "=a" (quotient), "=d" (remainder)
860 : "0" (0), "1" (dividend), "r" (divisor) );
864 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
866 uint64_t nsecs = 1000000000LL;
871 tps64 = tsc_khz * 1000LL;
872 while (tps64 > nsecs*2) {
877 tps32 = (uint32_t)tps64;
878 while (tps32 <= (uint32_t)nsecs) {
883 hv_clock->tsc_shift = shift;
884 hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
886 pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
887 __func__, tsc_khz, hv_clock->tsc_shift,
888 hv_clock->tsc_to_system_mul);
891 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
893 static void kvm_write_guest_time(struct kvm_vcpu *v)
897 struct kvm_vcpu_arch *vcpu = &v->arch;
899 unsigned long this_tsc_khz;
901 if ((!vcpu->time_page))
904 this_tsc_khz = get_cpu_var(cpu_tsc_khz);
905 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
906 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
907 vcpu->hv_clock_tsc_khz = this_tsc_khz;
909 put_cpu_var(cpu_tsc_khz);
911 /* Keep irq disabled to prevent changes to the clock */
912 local_irq_save(flags);
913 kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
915 monotonic_to_bootbased(&ts);
916 local_irq_restore(flags);
918 /* With all the info we got, fill in the values */
920 vcpu->hv_clock.system_time = ts.tv_nsec +
921 (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
923 vcpu->hv_clock.flags = 0;
926 * The interface expects us to write an even number signaling that the
927 * update is finished. Since the guest won't see the intermediate
928 * state, we just increase by 2 at the end.
930 vcpu->hv_clock.version += 2;
932 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
934 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
935 sizeof(vcpu->hv_clock));
937 kunmap_atomic(shared_kaddr, KM_USER0);
939 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
942 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
944 struct kvm_vcpu_arch *vcpu = &v->arch;
946 if (!vcpu->time_page)
948 kvm_make_request(KVM_REQ_KVMCLOCK_UPDATE, v);
952 static bool msr_mtrr_valid(unsigned msr)
955 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
956 case MSR_MTRRfix64K_00000:
957 case MSR_MTRRfix16K_80000:
958 case MSR_MTRRfix16K_A0000:
959 case MSR_MTRRfix4K_C0000:
960 case MSR_MTRRfix4K_C8000:
961 case MSR_MTRRfix4K_D0000:
962 case MSR_MTRRfix4K_D8000:
963 case MSR_MTRRfix4K_E0000:
964 case MSR_MTRRfix4K_E8000:
965 case MSR_MTRRfix4K_F0000:
966 case MSR_MTRRfix4K_F8000:
967 case MSR_MTRRdefType:
968 case MSR_IA32_CR_PAT:
976 static bool valid_pat_type(unsigned t)
978 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
981 static bool valid_mtrr_type(unsigned t)
983 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
986 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
990 if (!msr_mtrr_valid(msr))
993 if (msr == MSR_IA32_CR_PAT) {
994 for (i = 0; i < 8; i++)
995 if (!valid_pat_type((data >> (i * 8)) & 0xff))
998 } else if (msr == MSR_MTRRdefType) {
1001 return valid_mtrr_type(data & 0xff);
1002 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1003 for (i = 0; i < 8 ; i++)
1004 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1009 /* variable MTRRs */
1010 return valid_mtrr_type(data & 0xff);
1013 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1015 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1017 if (!mtrr_valid(vcpu, msr, data))
1020 if (msr == MSR_MTRRdefType) {
1021 vcpu->arch.mtrr_state.def_type = data;
1022 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1023 } else if (msr == MSR_MTRRfix64K_00000)
1025 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1026 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1027 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1028 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1029 else if (msr == MSR_IA32_CR_PAT)
1030 vcpu->arch.pat = data;
1031 else { /* Variable MTRRs */
1032 int idx, is_mtrr_mask;
1035 idx = (msr - 0x200) / 2;
1036 is_mtrr_mask = msr - 0x200 - 2 * idx;
1039 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1042 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1046 kvm_mmu_reset_context(vcpu);
1050 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1052 u64 mcg_cap = vcpu->arch.mcg_cap;
1053 unsigned bank_num = mcg_cap & 0xff;
1056 case MSR_IA32_MCG_STATUS:
1057 vcpu->arch.mcg_status = data;
1059 case MSR_IA32_MCG_CTL:
1060 if (!(mcg_cap & MCG_CTL_P))
1062 if (data != 0 && data != ~(u64)0)
1064 vcpu->arch.mcg_ctl = data;
1067 if (msr >= MSR_IA32_MC0_CTL &&
1068 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1069 u32 offset = msr - MSR_IA32_MC0_CTL;
1070 /* only 0 or all 1s can be written to IA32_MCi_CTL
1071 * some Linux kernels though clear bit 10 in bank 4 to
1072 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1073 * this to avoid an uncatched #GP in the guest
1075 if ((offset & 0x3) == 0 &&
1076 data != 0 && (data | (1 << 10)) != ~(u64)0)
1078 vcpu->arch.mce_banks[offset] = data;
1086 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1088 struct kvm *kvm = vcpu->kvm;
1089 int lm = is_long_mode(vcpu);
1090 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1091 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1092 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1093 : kvm->arch.xen_hvm_config.blob_size_32;
1094 u32 page_num = data & ~PAGE_MASK;
1095 u64 page_addr = data & PAGE_MASK;
1100 if (page_num >= blob_size)
1103 page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1107 if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1109 if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1118 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1120 return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1123 static bool kvm_hv_msr_partition_wide(u32 msr)
1127 case HV_X64_MSR_GUEST_OS_ID:
1128 case HV_X64_MSR_HYPERCALL:
1136 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1138 struct kvm *kvm = vcpu->kvm;
1141 case HV_X64_MSR_GUEST_OS_ID:
1142 kvm->arch.hv_guest_os_id = data;
1143 /* setting guest os id to zero disables hypercall page */
1144 if (!kvm->arch.hv_guest_os_id)
1145 kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1147 case HV_X64_MSR_HYPERCALL: {
1152 /* if guest os id is not set hypercall should remain disabled */
1153 if (!kvm->arch.hv_guest_os_id)
1155 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1156 kvm->arch.hv_hypercall = data;
1159 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1160 addr = gfn_to_hva(kvm, gfn);
1161 if (kvm_is_error_hva(addr))
1163 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1164 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1165 if (copy_to_user((void __user *)addr, instructions, 4))
1167 kvm->arch.hv_hypercall = data;
1171 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1172 "data 0x%llx\n", msr, data);
1178 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1181 case HV_X64_MSR_APIC_ASSIST_PAGE: {
1184 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1185 vcpu->arch.hv_vapic = data;
1188 addr = gfn_to_hva(vcpu->kvm, data >>
1189 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1190 if (kvm_is_error_hva(addr))
1192 if (clear_user((void __user *)addr, PAGE_SIZE))
1194 vcpu->arch.hv_vapic = data;
1197 case HV_X64_MSR_EOI:
1198 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1199 case HV_X64_MSR_ICR:
1200 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1201 case HV_X64_MSR_TPR:
1202 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1204 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1205 "data 0x%llx\n", msr, data);
1212 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1216 return set_efer(vcpu, data);
1218 data &= ~(u64)0x40; /* ignore flush filter disable */
1219 data &= ~(u64)0x100; /* ignore ignne emulation enable */
1221 pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1226 case MSR_FAM10H_MMIO_CONF_BASE:
1228 pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1233 case MSR_AMD64_NB_CFG:
1235 case MSR_IA32_DEBUGCTLMSR:
1237 /* We support the non-activated case already */
1239 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1240 /* Values other than LBR and BTF are vendor-specific,
1241 thus reserved and should throw a #GP */
1244 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1247 case MSR_IA32_UCODE_REV:
1248 case MSR_IA32_UCODE_WRITE:
1249 case MSR_VM_HSAVE_PA:
1250 case MSR_AMD64_PATCH_LOADER:
1252 case 0x200 ... 0x2ff:
1253 return set_msr_mtrr(vcpu, msr, data);
1254 case MSR_IA32_APICBASE:
1255 kvm_set_apic_base(vcpu, data);
1257 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1258 return kvm_x2apic_msr_write(vcpu, msr, data);
1259 case MSR_IA32_MISC_ENABLE:
1260 vcpu->arch.ia32_misc_enable_msr = data;
1262 case MSR_KVM_WALL_CLOCK_NEW:
1263 case MSR_KVM_WALL_CLOCK:
1264 vcpu->kvm->arch.wall_clock = data;
1265 kvm_write_wall_clock(vcpu->kvm, data);
1267 case MSR_KVM_SYSTEM_TIME_NEW:
1268 case MSR_KVM_SYSTEM_TIME: {
1269 if (vcpu->arch.time_page) {
1270 kvm_release_page_dirty(vcpu->arch.time_page);
1271 vcpu->arch.time_page = NULL;
1274 vcpu->arch.time = data;
1276 /* we verify if the enable bit is set... */
1280 /* ...but clean it before doing the actual write */
1281 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1283 vcpu->arch.time_page =
1284 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1286 if (is_error_page(vcpu->arch.time_page)) {
1287 kvm_release_page_clean(vcpu->arch.time_page);
1288 vcpu->arch.time_page = NULL;
1291 kvm_request_guest_time_update(vcpu);
1294 case MSR_IA32_MCG_CTL:
1295 case MSR_IA32_MCG_STATUS:
1296 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1297 return set_msr_mce(vcpu, msr, data);
1299 /* Performance counters are not protected by a CPUID bit,
1300 * so we should check all of them in the generic path for the sake of
1301 * cross vendor migration.
1302 * Writing a zero into the event select MSRs disables them,
1303 * which we perfectly emulate ;-). Any other value should be at least
1304 * reported, some guests depend on them.
1306 case MSR_P6_EVNTSEL0:
1307 case MSR_P6_EVNTSEL1:
1308 case MSR_K7_EVNTSEL0:
1309 case MSR_K7_EVNTSEL1:
1310 case MSR_K7_EVNTSEL2:
1311 case MSR_K7_EVNTSEL3:
1313 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1314 "0x%x data 0x%llx\n", msr, data);
1316 /* at least RHEL 4 unconditionally writes to the perfctr registers,
1317 * so we ignore writes to make it happy.
1319 case MSR_P6_PERFCTR0:
1320 case MSR_P6_PERFCTR1:
1321 case MSR_K7_PERFCTR0:
1322 case MSR_K7_PERFCTR1:
1323 case MSR_K7_PERFCTR2:
1324 case MSR_K7_PERFCTR3:
1325 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1326 "0x%x data 0x%llx\n", msr, data);
1328 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1329 if (kvm_hv_msr_partition_wide(msr)) {
1331 mutex_lock(&vcpu->kvm->lock);
1332 r = set_msr_hyperv_pw(vcpu, msr, data);
1333 mutex_unlock(&vcpu->kvm->lock);
1336 return set_msr_hyperv(vcpu, msr, data);
1339 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1340 return xen_hvm_config(vcpu, data);
1342 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1346 pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1353 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1357 * Reads an msr value (of 'msr_index') into 'pdata'.
1358 * Returns 0 on success, non-0 otherwise.
1359 * Assumes vcpu_load() was already called.
1361 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1363 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1366 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1368 u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1370 if (!msr_mtrr_valid(msr))
1373 if (msr == MSR_MTRRdefType)
1374 *pdata = vcpu->arch.mtrr_state.def_type +
1375 (vcpu->arch.mtrr_state.enabled << 10);
1376 else if (msr == MSR_MTRRfix64K_00000)
1378 else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1379 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1380 else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1381 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1382 else if (msr == MSR_IA32_CR_PAT)
1383 *pdata = vcpu->arch.pat;
1384 else { /* Variable MTRRs */
1385 int idx, is_mtrr_mask;
1388 idx = (msr - 0x200) / 2;
1389 is_mtrr_mask = msr - 0x200 - 2 * idx;
1392 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1395 (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1402 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1405 u64 mcg_cap = vcpu->arch.mcg_cap;
1406 unsigned bank_num = mcg_cap & 0xff;
1409 case MSR_IA32_P5_MC_ADDR:
1410 case MSR_IA32_P5_MC_TYPE:
1413 case MSR_IA32_MCG_CAP:
1414 data = vcpu->arch.mcg_cap;
1416 case MSR_IA32_MCG_CTL:
1417 if (!(mcg_cap & MCG_CTL_P))
1419 data = vcpu->arch.mcg_ctl;
1421 case MSR_IA32_MCG_STATUS:
1422 data = vcpu->arch.mcg_status;
1425 if (msr >= MSR_IA32_MC0_CTL &&
1426 msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1427 u32 offset = msr - MSR_IA32_MC0_CTL;
1428 data = vcpu->arch.mce_banks[offset];
1437 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1440 struct kvm *kvm = vcpu->kvm;
1443 case HV_X64_MSR_GUEST_OS_ID:
1444 data = kvm->arch.hv_guest_os_id;
1446 case HV_X64_MSR_HYPERCALL:
1447 data = kvm->arch.hv_hypercall;
1450 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1458 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1463 case HV_X64_MSR_VP_INDEX: {
1466 kvm_for_each_vcpu(r, v, vcpu->kvm)
1471 case HV_X64_MSR_EOI:
1472 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1473 case HV_X64_MSR_ICR:
1474 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1475 case HV_X64_MSR_TPR:
1476 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1478 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1485 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1490 case MSR_IA32_PLATFORM_ID:
1491 case MSR_IA32_UCODE_REV:
1492 case MSR_IA32_EBL_CR_POWERON:
1493 case MSR_IA32_DEBUGCTLMSR:
1494 case MSR_IA32_LASTBRANCHFROMIP:
1495 case MSR_IA32_LASTBRANCHTOIP:
1496 case MSR_IA32_LASTINTFROMIP:
1497 case MSR_IA32_LASTINTTOIP:
1500 case MSR_VM_HSAVE_PA:
1501 case MSR_P6_PERFCTR0:
1502 case MSR_P6_PERFCTR1:
1503 case MSR_P6_EVNTSEL0:
1504 case MSR_P6_EVNTSEL1:
1505 case MSR_K7_EVNTSEL0:
1506 case MSR_K7_PERFCTR0:
1507 case MSR_K8_INT_PENDING_MSG:
1508 case MSR_AMD64_NB_CFG:
1509 case MSR_FAM10H_MMIO_CONF_BASE:
1513 data = 0x500 | KVM_NR_VAR_MTRR;
1515 case 0x200 ... 0x2ff:
1516 return get_msr_mtrr(vcpu, msr, pdata);
1517 case 0xcd: /* fsb frequency */
1520 case MSR_IA32_APICBASE:
1521 data = kvm_get_apic_base(vcpu);
1523 case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1524 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1526 case MSR_IA32_MISC_ENABLE:
1527 data = vcpu->arch.ia32_misc_enable_msr;
1529 case MSR_IA32_PERF_STATUS:
1530 /* TSC increment by tick */
1532 /* CPU multiplier */
1533 data |= (((uint64_t)4ULL) << 40);
1536 data = vcpu->arch.efer;
1538 case MSR_KVM_WALL_CLOCK:
1539 case MSR_KVM_WALL_CLOCK_NEW:
1540 data = vcpu->kvm->arch.wall_clock;
1542 case MSR_KVM_SYSTEM_TIME:
1543 case MSR_KVM_SYSTEM_TIME_NEW:
1544 data = vcpu->arch.time;
1546 case MSR_IA32_P5_MC_ADDR:
1547 case MSR_IA32_P5_MC_TYPE:
1548 case MSR_IA32_MCG_CAP:
1549 case MSR_IA32_MCG_CTL:
1550 case MSR_IA32_MCG_STATUS:
1551 case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1552 return get_msr_mce(vcpu, msr, pdata);
1553 case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1554 if (kvm_hv_msr_partition_wide(msr)) {
1556 mutex_lock(&vcpu->kvm->lock);
1557 r = get_msr_hyperv_pw(vcpu, msr, pdata);
1558 mutex_unlock(&vcpu->kvm->lock);
1561 return get_msr_hyperv(vcpu, msr, pdata);
1565 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1568 pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1576 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1579 * Read or write a bunch of msrs. All parameters are kernel addresses.
1581 * @return number of msrs set successfully.
1583 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1584 struct kvm_msr_entry *entries,
1585 int (*do_msr)(struct kvm_vcpu *vcpu,
1586 unsigned index, u64 *data))
1590 idx = srcu_read_lock(&vcpu->kvm->srcu);
1591 for (i = 0; i < msrs->nmsrs; ++i)
1592 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1594 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1600 * Read or write a bunch of msrs. Parameters are user addresses.
1602 * @return number of msrs set successfully.
1604 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1605 int (*do_msr)(struct kvm_vcpu *vcpu,
1606 unsigned index, u64 *data),
1609 struct kvm_msrs msrs;
1610 struct kvm_msr_entry *entries;
1615 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1619 if (msrs.nmsrs >= MAX_IO_MSRS)
1623 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1624 entries = kmalloc(size, GFP_KERNEL);
1629 if (copy_from_user(entries, user_msrs->entries, size))
1632 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1637 if (writeback && copy_to_user(user_msrs->entries, entries, size))
1648 int kvm_dev_ioctl_check_extension(long ext)
1653 case KVM_CAP_IRQCHIP:
1655 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1656 case KVM_CAP_SET_TSS_ADDR:
1657 case KVM_CAP_EXT_CPUID:
1658 case KVM_CAP_CLOCKSOURCE:
1660 case KVM_CAP_NOP_IO_DELAY:
1661 case KVM_CAP_MP_STATE:
1662 case KVM_CAP_SYNC_MMU:
1663 case KVM_CAP_REINJECT_CONTROL:
1664 case KVM_CAP_IRQ_INJECT_STATUS:
1665 case KVM_CAP_ASSIGN_DEV_IRQ:
1667 case KVM_CAP_IOEVENTFD:
1669 case KVM_CAP_PIT_STATE2:
1670 case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1671 case KVM_CAP_XEN_HVM:
1672 case KVM_CAP_ADJUST_CLOCK:
1673 case KVM_CAP_VCPU_EVENTS:
1674 case KVM_CAP_HYPERV:
1675 case KVM_CAP_HYPERV_VAPIC:
1676 case KVM_CAP_HYPERV_SPIN:
1677 case KVM_CAP_PCI_SEGMENT:
1678 case KVM_CAP_DEBUGREGS:
1679 case KVM_CAP_X86_ROBUST_SINGLESTEP:
1683 case KVM_CAP_COALESCED_MMIO:
1684 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1687 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1689 case KVM_CAP_NR_VCPUS:
1692 case KVM_CAP_NR_MEMSLOTS:
1693 r = KVM_MEMORY_SLOTS;
1695 case KVM_CAP_PV_MMU: /* obsolete */
1702 r = KVM_MAX_MCE_BANKS;
1715 long kvm_arch_dev_ioctl(struct file *filp,
1716 unsigned int ioctl, unsigned long arg)
1718 void __user *argp = (void __user *)arg;
1722 case KVM_GET_MSR_INDEX_LIST: {
1723 struct kvm_msr_list __user *user_msr_list = argp;
1724 struct kvm_msr_list msr_list;
1728 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1731 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1732 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1735 if (n < msr_list.nmsrs)
1738 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1739 num_msrs_to_save * sizeof(u32)))
1741 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1743 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1748 case KVM_GET_SUPPORTED_CPUID: {
1749 struct kvm_cpuid2 __user *cpuid_arg = argp;
1750 struct kvm_cpuid2 cpuid;
1753 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1755 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1756 cpuid_arg->entries);
1761 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1766 case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1769 mce_cap = KVM_MCE_CAP_SUPPORTED;
1771 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1783 static void wbinvd_ipi(void *garbage)
1788 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
1790 return vcpu->kvm->arch.iommu_domain &&
1791 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
1794 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1796 /* Address WBINVD may be executed by guest */
1797 if (need_emulate_wbinvd(vcpu)) {
1798 if (kvm_x86_ops->has_wbinvd_exit())
1799 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
1800 else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
1801 smp_call_function_single(vcpu->cpu,
1802 wbinvd_ipi, NULL, 1);
1805 kvm_x86_ops->vcpu_load(vcpu, cpu);
1806 if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1807 unsigned long khz = cpufreq_quick_get(cpu);
1810 per_cpu(cpu_tsc_khz, cpu) = khz;
1812 kvm_request_guest_time_update(vcpu);
1815 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1817 kvm_x86_ops->vcpu_put(vcpu);
1818 kvm_put_guest_fpu(vcpu);
1821 static int is_efer_nx(void)
1823 unsigned long long efer = 0;
1825 rdmsrl_safe(MSR_EFER, &efer);
1826 return efer & EFER_NX;
1829 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1832 struct kvm_cpuid_entry2 *e, *entry;
1835 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1836 e = &vcpu->arch.cpuid_entries[i];
1837 if (e->function == 0x80000001) {
1842 if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1843 entry->edx &= ~(1 << 20);
1844 printk(KERN_INFO "kvm: guest NX capability removed\n");
1848 /* when an old userspace process fills a new kernel module */
1849 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1850 struct kvm_cpuid *cpuid,
1851 struct kvm_cpuid_entry __user *entries)
1854 struct kvm_cpuid_entry *cpuid_entries;
1857 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1860 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1864 if (copy_from_user(cpuid_entries, entries,
1865 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1867 for (i = 0; i < cpuid->nent; i++) {
1868 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1869 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1870 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1871 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1872 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1873 vcpu->arch.cpuid_entries[i].index = 0;
1874 vcpu->arch.cpuid_entries[i].flags = 0;
1875 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1876 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1877 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1879 vcpu->arch.cpuid_nent = cpuid->nent;
1880 cpuid_fix_nx_cap(vcpu);
1882 kvm_apic_set_version(vcpu);
1883 kvm_x86_ops->cpuid_update(vcpu);
1887 vfree(cpuid_entries);
1892 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1893 struct kvm_cpuid2 *cpuid,
1894 struct kvm_cpuid_entry2 __user *entries)
1899 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1902 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1903 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1905 vcpu->arch.cpuid_nent = cpuid->nent;
1906 kvm_apic_set_version(vcpu);
1907 kvm_x86_ops->cpuid_update(vcpu);
1915 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1916 struct kvm_cpuid2 *cpuid,
1917 struct kvm_cpuid_entry2 __user *entries)
1922 if (cpuid->nent < vcpu->arch.cpuid_nent)
1925 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1926 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1931 cpuid->nent = vcpu->arch.cpuid_nent;
1935 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1938 entry->function = function;
1939 entry->index = index;
1940 cpuid_count(entry->function, entry->index,
1941 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1945 #define F(x) bit(X86_FEATURE_##x)
1947 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1948 u32 index, int *nent, int maxnent)
1950 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1951 #ifdef CONFIG_X86_64
1952 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1954 unsigned f_lm = F(LM);
1956 unsigned f_gbpages = 0;
1959 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
1962 const u32 kvm_supported_word0_x86_features =
1963 F(FPU) | F(VME) | F(DE) | F(PSE) |
1964 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1965 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1966 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1967 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1968 0 /* Reserved, DS, ACPI */ | F(MMX) |
1969 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1970 0 /* HTT, TM, Reserved, PBE */;
1971 /* cpuid 0x80000001.edx */
1972 const u32 kvm_supported_word1_x86_features =
1973 F(FPU) | F(VME) | F(DE) | F(PSE) |
1974 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1975 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1976 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1977 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1978 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1979 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
1980 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1982 const u32 kvm_supported_word4_x86_features =
1983 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
1984 0 /* DS-CPL, VMX, SMX, EST */ |
1985 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1986 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1987 0 /* Reserved, DCA */ | F(XMM4_1) |
1988 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1989 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX);
1990 /* cpuid 0x80000001.ecx */
1991 const u32 kvm_supported_word6_x86_features =
1992 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
1993 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1994 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) |
1995 0 /* SKINIT */ | 0 /* WDT */;
1997 /* all calls to cpuid_count() should be made on the same cpu */
1999 do_cpuid_1_ent(entry, function, index);
2004 entry->eax = min(entry->eax, (u32)0xd);
2007 entry->edx &= kvm_supported_word0_x86_features;
2008 entry->ecx &= kvm_supported_word4_x86_features;
2009 /* we support x2apic emulation even if host does not support
2010 * it since we emulate x2apic in software */
2011 entry->ecx |= F(X2APIC);
2013 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
2014 * may return different values. This forces us to get_cpu() before
2015 * issuing the first command, and also to emulate this annoying behavior
2016 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2018 int t, times = entry->eax & 0xff;
2020 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2021 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2022 for (t = 1; t < times && *nent < maxnent; ++t) {
2023 do_cpuid_1_ent(&entry[t], function, 0);
2024 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2029 /* function 4 and 0xb have additional index. */
2033 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2034 /* read more entries until cache_type is zero */
2035 for (i = 1; *nent < maxnent; ++i) {
2036 cache_type = entry[i - 1].eax & 0x1f;
2039 do_cpuid_1_ent(&entry[i], function, i);
2041 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2049 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2050 /* read more entries until level_type is zero */
2051 for (i = 1; *nent < maxnent; ++i) {
2052 level_type = entry[i - 1].ecx & 0xff00;
2055 do_cpuid_1_ent(&entry[i], function, i);
2057 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2065 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2066 for (i = 1; *nent < maxnent; ++i) {
2067 if (entry[i - 1].eax == 0 && i != 2)
2069 do_cpuid_1_ent(&entry[i], function, i);
2071 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2076 case KVM_CPUID_SIGNATURE: {
2077 char signature[12] = "KVMKVMKVM\0\0";
2078 u32 *sigptr = (u32 *)signature;
2080 entry->ebx = sigptr[0];
2081 entry->ecx = sigptr[1];
2082 entry->edx = sigptr[2];
2085 case KVM_CPUID_FEATURES:
2086 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2087 (1 << KVM_FEATURE_NOP_IO_DELAY) |
2088 (1 << KVM_FEATURE_CLOCKSOURCE2) |
2089 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2095 entry->eax = min(entry->eax, 0x8000001a);
2098 entry->edx &= kvm_supported_word1_x86_features;
2099 entry->ecx &= kvm_supported_word6_x86_features;
2103 kvm_x86_ops->set_supported_cpuid(function, entry);
2110 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2111 struct kvm_cpuid_entry2 __user *entries)
2113 struct kvm_cpuid_entry2 *cpuid_entries;
2114 int limit, nent = 0, r = -E2BIG;
2117 if (cpuid->nent < 1)
2119 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2120 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2122 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2126 do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2127 limit = cpuid_entries[0].eax;
2128 for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2129 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2130 &nent, cpuid->nent);
2132 if (nent >= cpuid->nent)
2135 do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2136 limit = cpuid_entries[nent - 1].eax;
2137 for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2138 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2139 &nent, cpuid->nent);
2144 if (nent >= cpuid->nent)
2147 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2151 if (nent >= cpuid->nent)
2154 do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2158 if (nent >= cpuid->nent)
2162 if (copy_to_user(entries, cpuid_entries,
2163 nent * sizeof(struct kvm_cpuid_entry2)))
2169 vfree(cpuid_entries);
2174 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2175 struct kvm_lapic_state *s)
2177 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2182 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2183 struct kvm_lapic_state *s)
2185 memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2186 kvm_apic_post_state_restore(vcpu);
2187 update_cr8_intercept(vcpu);
2192 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2193 struct kvm_interrupt *irq)
2195 if (irq->irq < 0 || irq->irq >= 256)
2197 if (irqchip_in_kernel(vcpu->kvm))
2200 kvm_queue_interrupt(vcpu, irq->irq, false);
2205 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2207 kvm_inject_nmi(vcpu);
2212 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2213 struct kvm_tpr_access_ctl *tac)
2217 vcpu->arch.tpr_access_reporting = !!tac->enabled;
2221 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2225 unsigned bank_num = mcg_cap & 0xff, bank;
2228 if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2230 if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2233 vcpu->arch.mcg_cap = mcg_cap;
2234 /* Init IA32_MCG_CTL to all 1s */
2235 if (mcg_cap & MCG_CTL_P)
2236 vcpu->arch.mcg_ctl = ~(u64)0;
2237 /* Init IA32_MCi_CTL to all 1s */
2238 for (bank = 0; bank < bank_num; bank++)
2239 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2244 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2245 struct kvm_x86_mce *mce)
2247 u64 mcg_cap = vcpu->arch.mcg_cap;
2248 unsigned bank_num = mcg_cap & 0xff;
2249 u64 *banks = vcpu->arch.mce_banks;
2251 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2254 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2255 * reporting is disabled
2257 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2258 vcpu->arch.mcg_ctl != ~(u64)0)
2260 banks += 4 * mce->bank;
2262 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2263 * reporting is disabled for the bank
2265 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2267 if (mce->status & MCI_STATUS_UC) {
2268 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2269 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2270 printk(KERN_DEBUG "kvm: set_mce: "
2271 "injects mce exception while "
2272 "previous one is in progress!\n");
2273 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2276 if (banks[1] & MCI_STATUS_VAL)
2277 mce->status |= MCI_STATUS_OVER;
2278 banks[2] = mce->addr;
2279 banks[3] = mce->misc;
2280 vcpu->arch.mcg_status = mce->mcg_status;
2281 banks[1] = mce->status;
2282 kvm_queue_exception(vcpu, MC_VECTOR);
2283 } else if (!(banks[1] & MCI_STATUS_VAL)
2284 || !(banks[1] & MCI_STATUS_UC)) {
2285 if (banks[1] & MCI_STATUS_VAL)
2286 mce->status |= MCI_STATUS_OVER;
2287 banks[2] = mce->addr;
2288 banks[3] = mce->misc;
2289 banks[1] = mce->status;
2291 banks[1] |= MCI_STATUS_OVER;
2295 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2296 struct kvm_vcpu_events *events)
2298 events->exception.injected =
2299 vcpu->arch.exception.pending &&
2300 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2301 events->exception.nr = vcpu->arch.exception.nr;
2302 events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2303 events->exception.pad = 0;
2304 events->exception.error_code = vcpu->arch.exception.error_code;
2306 events->interrupt.injected =
2307 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2308 events->interrupt.nr = vcpu->arch.interrupt.nr;
2309 events->interrupt.soft = 0;
2310 events->interrupt.shadow =
2311 kvm_x86_ops->get_interrupt_shadow(vcpu,
2312 KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2314 events->nmi.injected = vcpu->arch.nmi_injected;
2315 events->nmi.pending = vcpu->arch.nmi_pending;
2316 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2317 events->nmi.pad = 0;
2319 events->sipi_vector = vcpu->arch.sipi_vector;
2321 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2322 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2323 | KVM_VCPUEVENT_VALID_SHADOW);
2324 memset(&events->reserved, 0, sizeof(events->reserved));
2327 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2328 struct kvm_vcpu_events *events)
2330 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2331 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2332 | KVM_VCPUEVENT_VALID_SHADOW))
2335 vcpu->arch.exception.pending = events->exception.injected;
2336 vcpu->arch.exception.nr = events->exception.nr;
2337 vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2338 vcpu->arch.exception.error_code = events->exception.error_code;
2340 vcpu->arch.interrupt.pending = events->interrupt.injected;
2341 vcpu->arch.interrupt.nr = events->interrupt.nr;
2342 vcpu->arch.interrupt.soft = events->interrupt.soft;
2343 if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2344 kvm_pic_clear_isr_ack(vcpu->kvm);
2345 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2346 kvm_x86_ops->set_interrupt_shadow(vcpu,
2347 events->interrupt.shadow);
2349 vcpu->arch.nmi_injected = events->nmi.injected;
2350 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2351 vcpu->arch.nmi_pending = events->nmi.pending;
2352 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2354 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2355 vcpu->arch.sipi_vector = events->sipi_vector;
2360 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2361 struct kvm_debugregs *dbgregs)
2363 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2364 dbgregs->dr6 = vcpu->arch.dr6;
2365 dbgregs->dr7 = vcpu->arch.dr7;
2367 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2370 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2371 struct kvm_debugregs *dbgregs)
2376 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2377 vcpu->arch.dr6 = dbgregs->dr6;
2378 vcpu->arch.dr7 = dbgregs->dr7;
2383 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2384 struct kvm_xsave *guest_xsave)
2387 memcpy(guest_xsave->region,
2388 &vcpu->arch.guest_fpu.state->xsave,
2391 memcpy(guest_xsave->region,
2392 &vcpu->arch.guest_fpu.state->fxsave,
2393 sizeof(struct i387_fxsave_struct));
2394 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2399 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2400 struct kvm_xsave *guest_xsave)
2403 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2406 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2407 guest_xsave->region, xstate_size);
2409 if (xstate_bv & ~XSTATE_FPSSE)
2411 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2412 guest_xsave->region, sizeof(struct i387_fxsave_struct));
2417 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2418 struct kvm_xcrs *guest_xcrs)
2420 if (!cpu_has_xsave) {
2421 guest_xcrs->nr_xcrs = 0;
2425 guest_xcrs->nr_xcrs = 1;
2426 guest_xcrs->flags = 0;
2427 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2428 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2431 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2432 struct kvm_xcrs *guest_xcrs)
2439 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2442 for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2443 /* Only support XCR0 currently */
2444 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2445 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2446 guest_xcrs->xcrs[0].value);
2454 long kvm_arch_vcpu_ioctl(struct file *filp,
2455 unsigned int ioctl, unsigned long arg)
2457 struct kvm_vcpu *vcpu = filp->private_data;
2458 void __user *argp = (void __user *)arg;
2461 struct kvm_lapic_state *lapic;
2462 struct kvm_xsave *xsave;
2463 struct kvm_xcrs *xcrs;
2469 case KVM_GET_LAPIC: {
2471 if (!vcpu->arch.apic)
2473 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2478 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2482 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2487 case KVM_SET_LAPIC: {
2489 if (!vcpu->arch.apic)
2491 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2496 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
2498 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2504 case KVM_INTERRUPT: {
2505 struct kvm_interrupt irq;
2508 if (copy_from_user(&irq, argp, sizeof irq))
2510 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2517 r = kvm_vcpu_ioctl_nmi(vcpu);
2523 case KVM_SET_CPUID: {
2524 struct kvm_cpuid __user *cpuid_arg = argp;
2525 struct kvm_cpuid cpuid;
2528 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2530 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2535 case KVM_SET_CPUID2: {
2536 struct kvm_cpuid2 __user *cpuid_arg = argp;
2537 struct kvm_cpuid2 cpuid;
2540 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2542 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2543 cpuid_arg->entries);
2548 case KVM_GET_CPUID2: {
2549 struct kvm_cpuid2 __user *cpuid_arg = argp;
2550 struct kvm_cpuid2 cpuid;
2553 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2555 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2556 cpuid_arg->entries);
2560 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2566 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2569 r = msr_io(vcpu, argp, do_set_msr, 0);
2571 case KVM_TPR_ACCESS_REPORTING: {
2572 struct kvm_tpr_access_ctl tac;
2575 if (copy_from_user(&tac, argp, sizeof tac))
2577 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2581 if (copy_to_user(argp, &tac, sizeof tac))
2586 case KVM_SET_VAPIC_ADDR: {
2587 struct kvm_vapic_addr va;
2590 if (!irqchip_in_kernel(vcpu->kvm))
2593 if (copy_from_user(&va, argp, sizeof va))
2596 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2599 case KVM_X86_SETUP_MCE: {
2603 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2605 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2608 case KVM_X86_SET_MCE: {
2609 struct kvm_x86_mce mce;
2612 if (copy_from_user(&mce, argp, sizeof mce))
2614 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2617 case KVM_GET_VCPU_EVENTS: {
2618 struct kvm_vcpu_events events;
2620 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2623 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2628 case KVM_SET_VCPU_EVENTS: {
2629 struct kvm_vcpu_events events;
2632 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2635 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2638 case KVM_GET_DEBUGREGS: {
2639 struct kvm_debugregs dbgregs;
2641 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2644 if (copy_to_user(argp, &dbgregs,
2645 sizeof(struct kvm_debugregs)))
2650 case KVM_SET_DEBUGREGS: {
2651 struct kvm_debugregs dbgregs;
2654 if (copy_from_user(&dbgregs, argp,
2655 sizeof(struct kvm_debugregs)))
2658 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2661 case KVM_GET_XSAVE: {
2662 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2667 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2670 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2675 case KVM_SET_XSAVE: {
2676 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2682 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
2685 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2688 case KVM_GET_XCRS: {
2689 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2694 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2697 if (copy_to_user(argp, u.xcrs,
2698 sizeof(struct kvm_xcrs)))
2703 case KVM_SET_XCRS: {
2704 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2710 if (copy_from_user(u.xcrs, argp,
2711 sizeof(struct kvm_xcrs)))
2714 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2725 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2729 if (addr > (unsigned int)(-3 * PAGE_SIZE))
2731 ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2735 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2738 kvm->arch.ept_identity_map_addr = ident_addr;
2742 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2743 u32 kvm_nr_mmu_pages)
2745 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2748 mutex_lock(&kvm->slots_lock);
2749 spin_lock(&kvm->mmu_lock);
2751 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2752 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2754 spin_unlock(&kvm->mmu_lock);
2755 mutex_unlock(&kvm->slots_lock);
2759 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2761 return kvm->arch.n_alloc_mmu_pages;
2764 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2769 switch (chip->chip_id) {
2770 case KVM_IRQCHIP_PIC_MASTER:
2771 memcpy(&chip->chip.pic,
2772 &pic_irqchip(kvm)->pics[0],
2773 sizeof(struct kvm_pic_state));
2775 case KVM_IRQCHIP_PIC_SLAVE:
2776 memcpy(&chip->chip.pic,
2777 &pic_irqchip(kvm)->pics[1],
2778 sizeof(struct kvm_pic_state));
2780 case KVM_IRQCHIP_IOAPIC:
2781 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2790 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2795 switch (chip->chip_id) {
2796 case KVM_IRQCHIP_PIC_MASTER:
2797 raw_spin_lock(&pic_irqchip(kvm)->lock);
2798 memcpy(&pic_irqchip(kvm)->pics[0],
2800 sizeof(struct kvm_pic_state));
2801 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2803 case KVM_IRQCHIP_PIC_SLAVE:
2804 raw_spin_lock(&pic_irqchip(kvm)->lock);
2805 memcpy(&pic_irqchip(kvm)->pics[1],
2807 sizeof(struct kvm_pic_state));
2808 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2810 case KVM_IRQCHIP_IOAPIC:
2811 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2817 kvm_pic_update_irq(pic_irqchip(kvm));
2821 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2825 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2826 memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2827 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2831 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2835 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2836 memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2837 kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2838 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2842 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2846 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2847 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2848 sizeof(ps->channels));
2849 ps->flags = kvm->arch.vpit->pit_state.flags;
2850 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2851 memset(&ps->reserved, 0, sizeof(ps->reserved));
2855 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2857 int r = 0, start = 0;
2858 u32 prev_legacy, cur_legacy;
2859 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2860 prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2861 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2862 if (!prev_legacy && cur_legacy)
2864 memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2865 sizeof(kvm->arch.vpit->pit_state.channels));
2866 kvm->arch.vpit->pit_state.flags = ps->flags;
2867 kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2868 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2872 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2873 struct kvm_reinject_control *control)
2875 if (!kvm->arch.vpit)
2877 mutex_lock(&kvm->arch.vpit->pit_state.lock);
2878 kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2879 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2884 * Get (and clear) the dirty memory log for a memory slot.
2886 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2887 struct kvm_dirty_log *log)
2890 struct kvm_memory_slot *memslot;
2892 unsigned long is_dirty = 0;
2894 mutex_lock(&kvm->slots_lock);
2897 if (log->slot >= KVM_MEMORY_SLOTS)
2900 memslot = &kvm->memslots->memslots[log->slot];
2902 if (!memslot->dirty_bitmap)
2905 n = kvm_dirty_bitmap_bytes(memslot);
2907 for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2908 is_dirty = memslot->dirty_bitmap[i];
2910 /* If nothing is dirty, don't bother messing with page tables. */
2912 struct kvm_memslots *slots, *old_slots;
2913 unsigned long *dirty_bitmap;
2916 dirty_bitmap = vmalloc(n);
2919 memset(dirty_bitmap, 0, n);
2922 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
2924 vfree(dirty_bitmap);
2927 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
2928 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
2930 old_slots = kvm->memslots;
2931 rcu_assign_pointer(kvm->memslots, slots);
2932 synchronize_srcu_expedited(&kvm->srcu);
2933 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
2936 spin_lock(&kvm->mmu_lock);
2937 kvm_mmu_slot_remove_write_access(kvm, log->slot);
2938 spin_unlock(&kvm->mmu_lock);
2941 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
2942 vfree(dirty_bitmap);
2945 vfree(dirty_bitmap);
2948 if (clear_user(log->dirty_bitmap, n))
2954 mutex_unlock(&kvm->slots_lock);
2958 long kvm_arch_vm_ioctl(struct file *filp,
2959 unsigned int ioctl, unsigned long arg)
2961 struct kvm *kvm = filp->private_data;
2962 void __user *argp = (void __user *)arg;
2965 * This union makes it completely explicit to gcc-3.x
2966 * that these two variables' stack usage should be
2967 * combined, not added together.
2970 struct kvm_pit_state ps;
2971 struct kvm_pit_state2 ps2;
2972 struct kvm_pit_config pit_config;
2976 case KVM_SET_TSS_ADDR:
2977 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
2981 case KVM_SET_IDENTITY_MAP_ADDR: {
2985 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
2987 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
2992 case KVM_SET_NR_MMU_PAGES:
2993 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
2997 case KVM_GET_NR_MMU_PAGES:
2998 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3000 case KVM_CREATE_IRQCHIP: {
3001 struct kvm_pic *vpic;
3003 mutex_lock(&kvm->lock);
3006 goto create_irqchip_unlock;
3008 vpic = kvm_create_pic(kvm);
3010 r = kvm_ioapic_init(kvm);
3012 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3015 goto create_irqchip_unlock;
3018 goto create_irqchip_unlock;
3020 kvm->arch.vpic = vpic;
3022 r = kvm_setup_default_irq_routing(kvm);
3024 mutex_lock(&kvm->irq_lock);
3025 kvm_ioapic_destroy(kvm);
3026 kvm_destroy_pic(kvm);
3027 mutex_unlock(&kvm->irq_lock);
3029 create_irqchip_unlock:
3030 mutex_unlock(&kvm->lock);
3033 case KVM_CREATE_PIT:
3034 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3036 case KVM_CREATE_PIT2:
3038 if (copy_from_user(&u.pit_config, argp,
3039 sizeof(struct kvm_pit_config)))
3042 mutex_lock(&kvm->slots_lock);
3045 goto create_pit_unlock;
3047 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3051 mutex_unlock(&kvm->slots_lock);
3053 case KVM_IRQ_LINE_STATUS:
3054 case KVM_IRQ_LINE: {
3055 struct kvm_irq_level irq_event;
3058 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3061 if (irqchip_in_kernel(kvm)) {
3063 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3064 irq_event.irq, irq_event.level);
3065 if (ioctl == KVM_IRQ_LINE_STATUS) {
3067 irq_event.status = status;
3068 if (copy_to_user(argp, &irq_event,
3076 case KVM_GET_IRQCHIP: {
3077 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3078 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3084 if (copy_from_user(chip, argp, sizeof *chip))
3085 goto get_irqchip_out;
3087 if (!irqchip_in_kernel(kvm))
3088 goto get_irqchip_out;
3089 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3091 goto get_irqchip_out;
3093 if (copy_to_user(argp, chip, sizeof *chip))
3094 goto get_irqchip_out;
3102 case KVM_SET_IRQCHIP: {
3103 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3104 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3110 if (copy_from_user(chip, argp, sizeof *chip))
3111 goto set_irqchip_out;
3113 if (!irqchip_in_kernel(kvm))
3114 goto set_irqchip_out;
3115 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3117 goto set_irqchip_out;
3127 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3130 if (!kvm->arch.vpit)
3132 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3136 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3143 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3146 if (!kvm->arch.vpit)
3148 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3154 case KVM_GET_PIT2: {
3156 if (!kvm->arch.vpit)
3158 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3162 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3167 case KVM_SET_PIT2: {
3169 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3172 if (!kvm->arch.vpit)
3174 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3180 case KVM_REINJECT_CONTROL: {
3181 struct kvm_reinject_control control;
3183 if (copy_from_user(&control, argp, sizeof(control)))
3185 r = kvm_vm_ioctl_reinject(kvm, &control);
3191 case KVM_XEN_HVM_CONFIG: {
3193 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3194 sizeof(struct kvm_xen_hvm_config)))
3197 if (kvm->arch.xen_hvm_config.flags)
3202 case KVM_SET_CLOCK: {
3203 struct timespec now;
3204 struct kvm_clock_data user_ns;
3209 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3218 now_ns = timespec_to_ns(&now);
3219 delta = user_ns.clock - now_ns;
3220 kvm->arch.kvmclock_offset = delta;
3223 case KVM_GET_CLOCK: {
3224 struct timespec now;
3225 struct kvm_clock_data user_ns;
3229 now_ns = timespec_to_ns(&now);
3230 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3232 memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3235 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3248 static void kvm_init_msr_list(void)
3253 /* skip the first msrs in the list. KVM-specific */
3254 for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3255 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3258 msrs_to_save[j] = msrs_to_save[i];
3261 num_msrs_to_save = j;
3264 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3267 if (vcpu->arch.apic &&
3268 !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3271 return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3274 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3276 if (vcpu->arch.apic &&
3277 !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3280 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3283 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3284 struct kvm_segment *var, int seg)
3286 kvm_x86_ops->set_segment(vcpu, var, seg);
3289 void kvm_get_segment(struct kvm_vcpu *vcpu,
3290 struct kvm_segment *var, int seg)
3292 kvm_x86_ops->get_segment(vcpu, var, seg);
3295 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3297 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3298 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3301 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3303 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3304 access |= PFERR_FETCH_MASK;
3305 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3308 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3310 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3311 access |= PFERR_WRITE_MASK;
3312 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3315 /* uses this to access any guest's mapped memory without checking CPL */
3316 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3318 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3321 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3322 struct kvm_vcpu *vcpu, u32 access,
3326 int r = X86EMUL_CONTINUE;
3329 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
3330 unsigned offset = addr & (PAGE_SIZE-1);
3331 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3334 if (gpa == UNMAPPED_GVA) {
3335 r = X86EMUL_PROPAGATE_FAULT;
3338 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3340 r = X86EMUL_IO_NEEDED;
3352 /* used for instruction fetching */
3353 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3354 struct kvm_vcpu *vcpu, u32 *error)
3356 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3357 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3358 access | PFERR_FETCH_MASK, error);
3361 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3362 struct kvm_vcpu *vcpu, u32 *error)
3364 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3365 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3369 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3370 struct kvm_vcpu *vcpu, u32 *error)
3372 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3375 static int kvm_write_guest_virt_system(gva_t addr, void *val,
3377 struct kvm_vcpu *vcpu,
3381 int r = X86EMUL_CONTINUE;
3384 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
3385 PFERR_WRITE_MASK, error);
3386 unsigned offset = addr & (PAGE_SIZE-1);
3387 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3390 if (gpa == UNMAPPED_GVA) {
3391 r = X86EMUL_PROPAGATE_FAULT;
3394 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3396 r = X86EMUL_IO_NEEDED;
3408 static int emulator_read_emulated(unsigned long addr,
3411 unsigned int *error_code,
3412 struct kvm_vcpu *vcpu)
3416 if (vcpu->mmio_read_completed) {
3417 memcpy(val, vcpu->mmio_data, bytes);
3418 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3419 vcpu->mmio_phys_addr, *(u64 *)val);
3420 vcpu->mmio_read_completed = 0;
3421 return X86EMUL_CONTINUE;
3424 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
3426 if (gpa == UNMAPPED_GVA)
3427 return X86EMUL_PROPAGATE_FAULT;
3429 /* For APIC access vmexit */
3430 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3433 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
3434 == X86EMUL_CONTINUE)
3435 return X86EMUL_CONTINUE;
3439 * Is this MMIO handled locally?
3441 if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3442 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
3443 return X86EMUL_CONTINUE;
3446 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3448 vcpu->mmio_needed = 1;
3449 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3450 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3451 vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3452 vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
3454 return X86EMUL_IO_NEEDED;
3457 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3458 const void *val, int bytes)
3462 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3465 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
3469 static int emulator_write_emulated_onepage(unsigned long addr,
3472 unsigned int *error_code,
3473 struct kvm_vcpu *vcpu)
3477 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
3479 if (gpa == UNMAPPED_GVA)
3480 return X86EMUL_PROPAGATE_FAULT;
3482 /* For APIC access vmexit */
3483 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3486 if (emulator_write_phys(vcpu, gpa, val, bytes))
3487 return X86EMUL_CONTINUE;
3490 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3492 * Is this MMIO handled locally?
3494 if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
3495 return X86EMUL_CONTINUE;
3497 vcpu->mmio_needed = 1;
3498 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3499 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3500 vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3501 vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
3502 memcpy(vcpu->run->mmio.data, val, bytes);
3504 return X86EMUL_CONTINUE;
3507 int emulator_write_emulated(unsigned long addr,
3510 unsigned int *error_code,
3511 struct kvm_vcpu *vcpu)
3513 /* Crossing a page boundary? */
3514 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3517 now = -addr & ~PAGE_MASK;
3518 rc = emulator_write_emulated_onepage(addr, val, now, error_code,
3520 if (rc != X86EMUL_CONTINUE)
3526 return emulator_write_emulated_onepage(addr, val, bytes, error_code,
3530 #define CMPXCHG_TYPE(t, ptr, old, new) \
3531 (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3533 #ifdef CONFIG_X86_64
3534 # define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3536 # define CMPXCHG64(ptr, old, new) \
3537 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3540 static int emulator_cmpxchg_emulated(unsigned long addr,
3544 unsigned int *error_code,
3545 struct kvm_vcpu *vcpu)
3552 /* guests cmpxchg8b have to be emulated atomically */
3553 if (bytes > 8 || (bytes & (bytes - 1)))
3556 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3558 if (gpa == UNMAPPED_GVA ||
3559 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3562 if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3565 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3566 if (is_error_page(page)) {
3567 kvm_release_page_clean(page);
3571 kaddr = kmap_atomic(page, KM_USER0);
3572 kaddr += offset_in_page(gpa);
3575 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3578 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3581 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3584 exchanged = CMPXCHG64(kaddr, old, new);
3589 kunmap_atomic(kaddr, KM_USER0);
3590 kvm_release_page_dirty(page);
3593 return X86EMUL_CMPXCHG_FAILED;
3595 kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
3597 return X86EMUL_CONTINUE;
3600 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3602 return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
3605 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3607 /* TODO: String I/O for in kernel device */
3610 if (vcpu->arch.pio.in)
3611 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3612 vcpu->arch.pio.size, pd);
3614 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3615 vcpu->arch.pio.port, vcpu->arch.pio.size,
3621 static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3622 unsigned int count, struct kvm_vcpu *vcpu)
3624 if (vcpu->arch.pio.count)
3627 trace_kvm_pio(1, port, size, 1);
3629 vcpu->arch.pio.port = port;
3630 vcpu->arch.pio.in = 1;
3631 vcpu->arch.pio.count = count;
3632 vcpu->arch.pio.size = size;
3634 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3636 memcpy(val, vcpu->arch.pio_data, size * count);
3637 vcpu->arch.pio.count = 0;
3641 vcpu->run->exit_reason = KVM_EXIT_IO;
3642 vcpu->run->io.direction = KVM_EXIT_IO_IN;
3643 vcpu->run->io.size = size;
3644 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3645 vcpu->run->io.count = count;
3646 vcpu->run->io.port = port;
3651 static int emulator_pio_out_emulated(int size, unsigned short port,
3652 const void *val, unsigned int count,
3653 struct kvm_vcpu *vcpu)
3655 trace_kvm_pio(0, port, size, 1);
3657 vcpu->arch.pio.port = port;
3658 vcpu->arch.pio.in = 0;
3659 vcpu->arch.pio.count = count;
3660 vcpu->arch.pio.size = size;
3662 memcpy(vcpu->arch.pio_data, val, size * count);
3664 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3665 vcpu->arch.pio.count = 0;
3669 vcpu->run->exit_reason = KVM_EXIT_IO;
3670 vcpu->run->io.direction = KVM_EXIT_IO_OUT;
3671 vcpu->run->io.size = size;
3672 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3673 vcpu->run->io.count = count;
3674 vcpu->run->io.port = port;
3679 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3681 return kvm_x86_ops->get_segment_base(vcpu, seg);
3684 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3686 kvm_mmu_invlpg(vcpu, address);
3687 return X86EMUL_CONTINUE;
3690 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
3692 if (!need_emulate_wbinvd(vcpu))
3693 return X86EMUL_CONTINUE;
3695 if (kvm_x86_ops->has_wbinvd_exit()) {
3696 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
3697 wbinvd_ipi, NULL, 1);
3698 cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
3701 return X86EMUL_CONTINUE;
3703 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
3705 int emulate_clts(struct kvm_vcpu *vcpu)
3707 kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3708 kvm_x86_ops->fpu_activate(vcpu);
3709 return X86EMUL_CONTINUE;
3712 int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
3714 return _kvm_get_dr(vcpu, dr, dest);
3717 int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
3720 return __kvm_set_dr(vcpu, dr, value);
3723 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3725 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3728 static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
3730 unsigned long value;
3734 value = kvm_read_cr0(vcpu);
3737 value = vcpu->arch.cr2;
3740 value = vcpu->arch.cr3;
3743 value = kvm_read_cr4(vcpu);
3746 value = kvm_get_cr8(vcpu);
3749 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3756 static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
3762 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3765 vcpu->arch.cr2 = val;
3768 res = kvm_set_cr3(vcpu, val);
3771 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3774 res = __kvm_set_cr8(vcpu, val & 0xfUL);
3777 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3784 static int emulator_get_cpl(struct kvm_vcpu *vcpu)
3786 return kvm_x86_ops->get_cpl(vcpu);
3789 static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3791 kvm_x86_ops->get_gdt(vcpu, dt);
3794 static unsigned long emulator_get_cached_segment_base(int seg,
3795 struct kvm_vcpu *vcpu)
3797 return get_segment_base(vcpu, seg);
3800 static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
3801 struct kvm_vcpu *vcpu)
3803 struct kvm_segment var;
3805 kvm_get_segment(vcpu, &var, seg);
3812 set_desc_limit(desc, var.limit);
3813 set_desc_base(desc, (unsigned long)var.base);
3814 desc->type = var.type;
3816 desc->dpl = var.dpl;
3817 desc->p = var.present;
3818 desc->avl = var.avl;
3826 static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
3827 struct kvm_vcpu *vcpu)
3829 struct kvm_segment var;
3831 /* needed to preserve selector */
3832 kvm_get_segment(vcpu, &var, seg);
3834 var.base = get_desc_base(desc);
3835 var.limit = get_desc_limit(desc);
3837 var.limit = (var.limit << 12) | 0xfff;
3838 var.type = desc->type;
3839 var.present = desc->p;
3840 var.dpl = desc->dpl;
3845 var.avl = desc->avl;
3846 var.present = desc->p;
3847 var.unusable = !var.present;
3850 kvm_set_segment(vcpu, &var, seg);
3854 static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
3856 struct kvm_segment kvm_seg;
3858 kvm_get_segment(vcpu, &kvm_seg, seg);
3859 return kvm_seg.selector;
3862 static void emulator_set_segment_selector(u16 sel, int seg,
3863 struct kvm_vcpu *vcpu)
3865 struct kvm_segment kvm_seg;
3867 kvm_get_segment(vcpu, &kvm_seg, seg);
3868 kvm_seg.selector = sel;
3869 kvm_set_segment(vcpu, &kvm_seg, seg);
3872 static struct x86_emulate_ops emulate_ops = {
3873 .read_std = kvm_read_guest_virt_system,
3874 .write_std = kvm_write_guest_virt_system,
3875 .fetch = kvm_fetch_guest_virt,
3876 .read_emulated = emulator_read_emulated,
3877 .write_emulated = emulator_write_emulated,
3878 .cmpxchg_emulated = emulator_cmpxchg_emulated,
3879 .pio_in_emulated = emulator_pio_in_emulated,
3880 .pio_out_emulated = emulator_pio_out_emulated,
3881 .get_cached_descriptor = emulator_get_cached_descriptor,
3882 .set_cached_descriptor = emulator_set_cached_descriptor,
3883 .get_segment_selector = emulator_get_segment_selector,
3884 .set_segment_selector = emulator_set_segment_selector,
3885 .get_cached_segment_base = emulator_get_cached_segment_base,
3886 .get_gdt = emulator_get_gdt,
3887 .get_cr = emulator_get_cr,
3888 .set_cr = emulator_set_cr,
3889 .cpl = emulator_get_cpl,
3890 .get_dr = emulator_get_dr,
3891 .set_dr = emulator_set_dr,
3892 .set_msr = kvm_set_msr,
3893 .get_msr = kvm_get_msr,
3896 static void cache_all_regs(struct kvm_vcpu *vcpu)
3898 kvm_register_read(vcpu, VCPU_REGS_RAX);
3899 kvm_register_read(vcpu, VCPU_REGS_RSP);
3900 kvm_register_read(vcpu, VCPU_REGS_RIP);
3901 vcpu->arch.regs_dirty = ~0;
3904 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
3906 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
3908 * an sti; sti; sequence only disable interrupts for the first
3909 * instruction. So, if the last instruction, be it emulated or
3910 * not, left the system with the INT_STI flag enabled, it
3911 * means that the last instruction is an sti. We should not
3912 * leave the flag on in this case. The same goes for mov ss
3914 if (!(int_shadow & mask))
3915 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
3918 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
3920 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
3921 if (ctxt->exception == PF_VECTOR)
3922 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
3923 else if (ctxt->error_code_valid)
3924 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
3926 kvm_queue_exception(vcpu, ctxt->exception);
3929 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
3931 ++vcpu->stat.insn_emulation_fail;
3932 trace_kvm_emulate_insn_failed(vcpu);
3933 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3934 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3935 vcpu->run->internal.ndata = 0;
3936 kvm_queue_exception(vcpu, UD_VECTOR);
3937 return EMULATE_FAIL;
3940 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
3948 * if emulation was due to access to shadowed page table
3949 * and it failed try to unshadow page and re-entetr the
3950 * guest to let CPU execute the instruction.
3952 if (kvm_mmu_unprotect_page_virt(vcpu, gva))
3955 gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
3957 if (gpa == UNMAPPED_GVA)
3958 return true; /* let cpu generate fault */
3960 if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
3966 int emulate_instruction(struct kvm_vcpu *vcpu,
3972 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
3974 kvm_clear_exception_queue(vcpu);
3975 vcpu->arch.mmio_fault_cr2 = cr2;
3977 * TODO: fix emulate.c to use guest_read/write_register
3978 * instead of direct ->regs accesses, can save hundred cycles
3979 * on Intel for instructions that don't read/change RSP, for
3982 cache_all_regs(vcpu);
3984 if (!(emulation_type & EMULTYPE_NO_DECODE)) {
3986 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
3988 vcpu->arch.emulate_ctxt.vcpu = vcpu;
3989 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
3990 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
3991 vcpu->arch.emulate_ctxt.mode =
3992 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
3993 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
3994 ? X86EMUL_MODE_VM86 : cs_l
3995 ? X86EMUL_MODE_PROT64 : cs_db
3996 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
3997 memset(c, 0, sizeof(struct decode_cache));
3998 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
3999 vcpu->arch.emulate_ctxt.interruptibility = 0;
4000 vcpu->arch.emulate_ctxt.exception = -1;
4002 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
4003 trace_kvm_emulate_insn_start(vcpu);
4005 /* Only allow emulation of specific instructions on #UD
4006 * (namely VMMCALL, sysenter, sysexit, syscall)*/
4007 if (emulation_type & EMULTYPE_TRAP_UD) {
4009 return EMULATE_FAIL;
4011 case 0x01: /* VMMCALL */
4012 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4013 return EMULATE_FAIL;
4015 case 0x34: /* sysenter */
4016 case 0x35: /* sysexit */
4017 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4018 return EMULATE_FAIL;
4020 case 0x05: /* syscall */
4021 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4022 return EMULATE_FAIL;
4025 return EMULATE_FAIL;
4028 if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
4029 return EMULATE_FAIL;
4032 ++vcpu->stat.insn_emulation;
4034 if (reexecute_instruction(vcpu, cr2))
4035 return EMULATE_DONE;
4036 if (emulation_type & EMULTYPE_SKIP)
4037 return EMULATE_FAIL;
4038 return handle_emulation_failure(vcpu);
4042 if (emulation_type & EMULTYPE_SKIP) {
4043 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
4044 return EMULATE_DONE;
4047 /* this is needed for vmware backdor interface to work since it
4048 changes registers values during IO operation */
4049 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4052 r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
4054 if (r) { /* emulation failed */
4055 if (reexecute_instruction(vcpu, cr2))
4056 return EMULATE_DONE;
4058 return handle_emulation_failure(vcpu);
4061 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4062 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4063 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4064 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4066 if (vcpu->arch.emulate_ctxt.exception >= 0) {
4067 inject_emulated_exception(vcpu);
4068 return EMULATE_DONE;
4071 if (vcpu->arch.pio.count) {
4072 if (!vcpu->arch.pio.in)
4073 vcpu->arch.pio.count = 0;
4074 return EMULATE_DO_MMIO;
4077 if (vcpu->mmio_needed) {
4078 if (vcpu->mmio_is_write)
4079 vcpu->mmio_needed = 0;
4080 return EMULATE_DO_MMIO;
4083 if (vcpu->arch.emulate_ctxt.restart)
4086 return EMULATE_DONE;
4088 EXPORT_SYMBOL_GPL(emulate_instruction);
4090 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4092 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4093 int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
4094 /* do not return to emulator after return from userspace */
4095 vcpu->arch.pio.count = 0;
4098 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4100 static void bounce_off(void *info)
4105 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4108 struct cpufreq_freqs *freq = data;
4110 struct kvm_vcpu *vcpu;
4111 int i, send_ipi = 0;
4113 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4115 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4117 per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
4119 spin_lock(&kvm_lock);
4120 list_for_each_entry(kvm, &vm_list, vm_list) {
4121 kvm_for_each_vcpu(i, vcpu, kvm) {
4122 if (vcpu->cpu != freq->cpu)
4124 if (!kvm_request_guest_time_update(vcpu))
4126 if (vcpu->cpu != smp_processor_id())
4130 spin_unlock(&kvm_lock);
4132 if (freq->old < freq->new && send_ipi) {
4134 * We upscale the frequency. Must make the guest
4135 * doesn't see old kvmclock values while running with
4136 * the new frequency, otherwise we risk the guest sees
4137 * time go backwards.
4139 * In case we update the frequency for another cpu
4140 * (which might be in guest context) send an interrupt
4141 * to kick the cpu out of guest context. Next time
4142 * guest context is entered kvmclock will be updated,
4143 * so the guest will not see stale values.
4145 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
4150 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4151 .notifier_call = kvmclock_cpufreq_notifier
4154 static void kvm_timer_init(void)
4158 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4159 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4160 CPUFREQ_TRANSITION_NOTIFIER);
4161 for_each_online_cpu(cpu) {
4162 unsigned long khz = cpufreq_get(cpu);
4165 per_cpu(cpu_tsc_khz, cpu) = khz;
4168 for_each_possible_cpu(cpu)
4169 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
4173 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4175 static int kvm_is_in_guest(void)
4177 return percpu_read(current_vcpu) != NULL;
4180 static int kvm_is_user_mode(void)
4184 if (percpu_read(current_vcpu))
4185 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
4187 return user_mode != 0;
4190 static unsigned long kvm_get_guest_ip(void)
4192 unsigned long ip = 0;
4194 if (percpu_read(current_vcpu))
4195 ip = kvm_rip_read(percpu_read(current_vcpu));
4200 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4201 .is_in_guest = kvm_is_in_guest,
4202 .is_user_mode = kvm_is_user_mode,
4203 .get_guest_ip = kvm_get_guest_ip,
4206 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4208 percpu_write(current_vcpu, vcpu);
4210 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4212 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4214 percpu_write(current_vcpu, NULL);
4216 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4218 int kvm_arch_init(void *opaque)
4221 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4224 printk(KERN_ERR "kvm: already loaded the other module\n");
4229 if (!ops->cpu_has_kvm_support()) {
4230 printk(KERN_ERR "kvm: no hardware support\n");
4234 if (ops->disabled_by_bios()) {
4235 printk(KERN_ERR "kvm: disabled by bios\n");
4240 r = kvm_mmu_module_init();
4244 kvm_init_msr_list();
4247 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
4248 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
4249 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4250 PT_DIRTY_MASK, PT64_NX_MASK, 0);
4254 perf_register_guest_info_callbacks(&kvm_guest_cbs);
4257 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4265 void kvm_arch_exit(void)
4267 perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4269 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4270 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4271 CPUFREQ_TRANSITION_NOTIFIER);
4273 kvm_mmu_module_exit();
4276 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4278 ++vcpu->stat.halt_exits;
4279 if (irqchip_in_kernel(vcpu->kvm)) {
4280 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4283 vcpu->run->exit_reason = KVM_EXIT_HLT;
4287 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4289 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
4292 if (is_long_mode(vcpu))
4295 return a0 | ((gpa_t)a1 << 32);
4298 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4300 u64 param, ingpa, outgpa, ret;
4301 uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4302 bool fast, longmode;
4306 * hypercall generates UD from non zero cpl and real mode
4309 if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4310 kvm_queue_exception(vcpu, UD_VECTOR);
4314 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4315 longmode = is_long_mode(vcpu) && cs_l == 1;
4318 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4319 (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4320 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4321 (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4322 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4323 (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4325 #ifdef CONFIG_X86_64
4327 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4328 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4329 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4333 code = param & 0xffff;
4334 fast = (param >> 16) & 0x1;
4335 rep_cnt = (param >> 32) & 0xfff;
4336 rep_idx = (param >> 48) & 0xfff;
4338 trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4341 case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4342 kvm_vcpu_on_spin(vcpu);
4345 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4349 ret = res | (((u64)rep_done & 0xfff) << 32);
4351 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4353 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4354 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4360 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4362 unsigned long nr, a0, a1, a2, a3, ret;
4365 if (kvm_hv_hypercall_enabled(vcpu->kvm))
4366 return kvm_hv_hypercall(vcpu);
4368 nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4369 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4370 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4371 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4372 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
4374 trace_kvm_hypercall(nr, a0, a1, a2, a3);
4376 if (!is_long_mode(vcpu)) {
4384 if (kvm_x86_ops->get_cpl(vcpu) != 0) {
4390 case KVM_HC_VAPIC_POLL_IRQ:
4394 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
4401 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4402 ++vcpu->stat.hypercalls;
4405 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
4407 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
4409 char instruction[3];
4410 unsigned long rip = kvm_rip_read(vcpu);
4413 * Blow out the MMU to ensure that no other VCPU has an active mapping
4414 * to ensure that the updated hypercall appears atomically across all
4417 kvm_mmu_zap_all(vcpu->kvm);
4419 kvm_x86_ops->patch_hypercall(vcpu, instruction);
4421 return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
4424 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4426 struct desc_ptr dt = { limit, base };
4428 kvm_x86_ops->set_gdt(vcpu, &dt);
4431 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4433 struct desc_ptr dt = { limit, base };
4435 kvm_x86_ops->set_idt(vcpu, &dt);
4438 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4440 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4441 int j, nent = vcpu->arch.cpuid_nent;
4443 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4444 /* when no next entry is found, the current entry[i] is reselected */
4445 for (j = i + 1; ; j = (j + 1) % nent) {
4446 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
4447 if (ej->function == e->function) {
4448 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4452 return 0; /* silence gcc, even though control never reaches here */
4455 /* find an entry with matching function, matching index (if needed), and that
4456 * should be read next (if it's stateful) */
4457 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4458 u32 function, u32 index)
4460 if (e->function != function)
4462 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4464 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
4465 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
4470 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4471 u32 function, u32 index)
4474 struct kvm_cpuid_entry2 *best = NULL;
4476 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
4477 struct kvm_cpuid_entry2 *e;
4479 e = &vcpu->arch.cpuid_entries[i];
4480 if (is_matching_cpuid_entry(e, function, index)) {
4481 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4482 move_to_next_stateful_cpuid_entry(vcpu, i);
4487 * Both basic or both extended?
4489 if (((e->function ^ function) & 0x80000000) == 0)
4490 if (!best || e->function > best->function)
4495 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
4497 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4499 struct kvm_cpuid_entry2 *best;
4501 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
4502 if (!best || best->eax < 0x80000008)
4504 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4506 return best->eax & 0xff;
4511 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4513 u32 function, index;
4514 struct kvm_cpuid_entry2 *best;
4516 function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4517 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4518 kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4519 kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4520 kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4521 kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4522 best = kvm_find_cpuid_entry(vcpu, function, index);
4524 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4525 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4526 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4527 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
4529 kvm_x86_ops->skip_emulated_instruction(vcpu);
4530 trace_kvm_cpuid(function,
4531 kvm_register_read(vcpu, VCPU_REGS_RAX),
4532 kvm_register_read(vcpu, VCPU_REGS_RBX),
4533 kvm_register_read(vcpu, VCPU_REGS_RCX),
4534 kvm_register_read(vcpu, VCPU_REGS_RDX));
4536 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
4539 * Check if userspace requested an interrupt window, and that the
4540 * interrupt window is open.
4542 * No need to exit to userspace if we already have an interrupt queued.
4544 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
4546 return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
4547 vcpu->run->request_interrupt_window &&
4548 kvm_arch_interrupt_allowed(vcpu));
4551 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
4553 struct kvm_run *kvm_run = vcpu->run;
4555 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
4556 kvm_run->cr8 = kvm_get_cr8(vcpu);
4557 kvm_run->apic_base = kvm_get_apic_base(vcpu);
4558 if (irqchip_in_kernel(vcpu->kvm))
4559 kvm_run->ready_for_interrupt_injection = 1;
4561 kvm_run->ready_for_interrupt_injection =
4562 kvm_arch_interrupt_allowed(vcpu) &&
4563 !kvm_cpu_has_interrupt(vcpu) &&
4564 !kvm_event_needs_reinjection(vcpu);
4567 static void vapic_enter(struct kvm_vcpu *vcpu)
4569 struct kvm_lapic *apic = vcpu->arch.apic;
4572 if (!apic || !apic->vapic_addr)
4575 page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4577 vcpu->arch.apic->vapic_page = page;
4580 static void vapic_exit(struct kvm_vcpu *vcpu)
4582 struct kvm_lapic *apic = vcpu->arch.apic;
4585 if (!apic || !apic->vapic_addr)
4588 idx = srcu_read_lock(&vcpu->kvm->srcu);
4589 kvm_release_page_dirty(apic->vapic_page);
4590 mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4591 srcu_read_unlock(&vcpu->kvm->srcu, idx);
4594 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4598 if (!kvm_x86_ops->update_cr8_intercept)
4601 if (!vcpu->arch.apic)
4604 if (!vcpu->arch.apic->vapic_addr)
4605 max_irr = kvm_lapic_find_highest_irr(vcpu);
4612 tpr = kvm_lapic_get_cr8(vcpu);
4614 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4617 static void inject_pending_event(struct kvm_vcpu *vcpu)
4619 /* try to reinject previous events if any */
4620 if (vcpu->arch.exception.pending) {
4621 trace_kvm_inj_exception(vcpu->arch.exception.nr,
4622 vcpu->arch.exception.has_error_code,
4623 vcpu->arch.exception.error_code);
4624 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4625 vcpu->arch.exception.has_error_code,
4626 vcpu->arch.exception.error_code,
4627 vcpu->arch.exception.reinject);
4631 if (vcpu->arch.nmi_injected) {
4632 kvm_x86_ops->set_nmi(vcpu);
4636 if (vcpu->arch.interrupt.pending) {
4637 kvm_x86_ops->set_irq(vcpu);
4641 /* try to inject new event if pending */
4642 if (vcpu->arch.nmi_pending) {
4643 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4644 vcpu->arch.nmi_pending = false;
4645 vcpu->arch.nmi_injected = true;
4646 kvm_x86_ops->set_nmi(vcpu);
4648 } else if (kvm_cpu_has_interrupt(vcpu)) {
4649 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
4650 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4652 kvm_x86_ops->set_irq(vcpu);
4657 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
4659 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
4660 !vcpu->guest_xcr0_loaded) {
4661 /* kvm_set_xcr() also depends on this */
4662 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
4663 vcpu->guest_xcr0_loaded = 1;
4667 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
4669 if (vcpu->guest_xcr0_loaded) {
4670 if (vcpu->arch.xcr0 != host_xcr0)
4671 xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
4672 vcpu->guest_xcr0_loaded = 0;
4676 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4679 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
4680 vcpu->run->request_interrupt_window;
4682 if (vcpu->requests) {
4683 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
4684 kvm_mmu_unload(vcpu);
4685 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
4686 __kvm_migrate_timers(vcpu);
4687 if (kvm_check_request(KVM_REQ_KVMCLOCK_UPDATE, vcpu))
4688 kvm_write_guest_time(vcpu);
4689 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
4690 kvm_mmu_sync_roots(vcpu);
4691 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
4692 kvm_x86_ops->tlb_flush(vcpu);
4693 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
4694 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
4698 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
4699 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4703 if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
4704 vcpu->fpu_active = 0;
4705 kvm_x86_ops->fpu_deactivate(vcpu);
4709 r = kvm_mmu_reload(vcpu);
4715 kvm_x86_ops->prepare_guest_switch(vcpu);
4716 if (vcpu->fpu_active)
4717 kvm_load_guest_fpu(vcpu);
4718 kvm_load_guest_xcr0(vcpu);
4720 atomic_set(&vcpu->guest_mode, 1);
4723 local_irq_disable();
4725 if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
4726 || need_resched() || signal_pending(current)) {
4727 atomic_set(&vcpu->guest_mode, 0);
4735 inject_pending_event(vcpu);
4737 /* enable NMI/IRQ window open exits if needed */
4738 if (vcpu->arch.nmi_pending)
4739 kvm_x86_ops->enable_nmi_window(vcpu);
4740 else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4741 kvm_x86_ops->enable_irq_window(vcpu);
4743 if (kvm_lapic_enabled(vcpu)) {
4744 update_cr8_intercept(vcpu);
4745 kvm_lapic_sync_to_vapic(vcpu);
4748 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4752 if (unlikely(vcpu->arch.switch_db_regs)) {
4754 set_debugreg(vcpu->arch.eff_db[0], 0);
4755 set_debugreg(vcpu->arch.eff_db[1], 1);
4756 set_debugreg(vcpu->arch.eff_db[2], 2);
4757 set_debugreg(vcpu->arch.eff_db[3], 3);
4760 trace_kvm_entry(vcpu->vcpu_id);
4761 kvm_x86_ops->run(vcpu);
4764 * If the guest has used debug registers, at least dr7
4765 * will be disabled while returning to the host.
4766 * If we don't have active breakpoints in the host, we don't
4767 * care about the messed up debug address registers. But if
4768 * we have some of them active, restore the old state.
4770 if (hw_breakpoint_active())
4771 hw_breakpoint_restore();
4773 atomic_set(&vcpu->guest_mode, 0);
4780 * We must have an instruction between local_irq_enable() and
4781 * kvm_guest_exit(), so the timer interrupt isn't delayed by
4782 * the interrupt shadow. The stat.exits increment will do nicely.
4783 * But we need to prevent reordering, hence this barrier():
4791 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4794 * Profile KVM exit RIPs:
4796 if (unlikely(prof_on == KVM_PROFILING)) {
4797 unsigned long rip = kvm_rip_read(vcpu);
4798 profile_hit(KVM_PROFILING, (void *)rip);
4802 kvm_lapic_sync_from_vapic(vcpu);
4804 r = kvm_x86_ops->handle_exit(vcpu);
4810 static int __vcpu_run(struct kvm_vcpu *vcpu)
4813 struct kvm *kvm = vcpu->kvm;
4815 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
4816 pr_debug("vcpu %d received sipi with vector # %x\n",
4817 vcpu->vcpu_id, vcpu->arch.sipi_vector);
4818 kvm_lapic_reset(vcpu);
4819 r = kvm_arch_vcpu_reset(vcpu);
4822 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4825 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4830 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
4831 r = vcpu_enter_guest(vcpu);
4833 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4834 kvm_vcpu_block(vcpu);
4835 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4836 if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
4838 switch(vcpu->arch.mp_state) {
4839 case KVM_MP_STATE_HALTED:
4840 vcpu->arch.mp_state =
4841 KVM_MP_STATE_RUNNABLE;
4842 case KVM_MP_STATE_RUNNABLE:
4844 case KVM_MP_STATE_SIPI_RECEIVED:
4855 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4856 if (kvm_cpu_has_pending_timer(vcpu))
4857 kvm_inject_pending_timer_irqs(vcpu);
4859 if (dm_request_for_irq_injection(vcpu)) {
4861 vcpu->run->exit_reason = KVM_EXIT_INTR;
4862 ++vcpu->stat.request_irq_exits;
4864 if (signal_pending(current)) {
4866 vcpu->run->exit_reason = KVM_EXIT_INTR;
4867 ++vcpu->stat.signal_exits;
4869 if (need_resched()) {
4870 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4872 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4876 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4883 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4888 if (vcpu->sigset_active)
4889 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4891 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4892 kvm_vcpu_block(vcpu);
4893 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
4898 /* re-sync apic's tpr */
4899 if (!irqchip_in_kernel(vcpu->kvm))
4900 kvm_set_cr8(vcpu, kvm_run->cr8);
4902 if (vcpu->arch.pio.count || vcpu->mmio_needed ||
4903 vcpu->arch.emulate_ctxt.restart) {
4904 if (vcpu->mmio_needed) {
4905 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4906 vcpu->mmio_read_completed = 1;
4907 vcpu->mmio_needed = 0;
4909 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4910 r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
4911 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4912 if (r != EMULATE_DONE) {
4917 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4918 kvm_register_write(vcpu, VCPU_REGS_RAX,
4919 kvm_run->hypercall.ret);
4921 r = __vcpu_run(vcpu);
4924 post_kvm_run_save(vcpu);
4925 if (vcpu->sigset_active)
4926 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
4931 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4933 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
4934 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
4935 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
4936 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
4937 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
4938 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
4939 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
4940 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
4941 #ifdef CONFIG_X86_64
4942 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
4943 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
4944 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
4945 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
4946 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
4947 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
4948 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
4949 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
4952 regs->rip = kvm_rip_read(vcpu);
4953 regs->rflags = kvm_get_rflags(vcpu);
4958 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
4960 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
4961 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
4962 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
4963 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
4964 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
4965 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
4966 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
4967 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
4968 #ifdef CONFIG_X86_64
4969 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
4970 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
4971 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
4972 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
4973 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
4974 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
4975 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
4976 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
4979 kvm_rip_write(vcpu, regs->rip);
4980 kvm_set_rflags(vcpu, regs->rflags);
4982 vcpu->arch.exception.pending = false;
4987 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
4989 struct kvm_segment cs;
4991 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
4995 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
4997 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
4998 struct kvm_sregs *sregs)
5002 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5003 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5004 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5005 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5006 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5007 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5009 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5010 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5012 kvm_x86_ops->get_idt(vcpu, &dt);
5013 sregs->idt.limit = dt.size;
5014 sregs->idt.base = dt.address;
5015 kvm_x86_ops->get_gdt(vcpu, &dt);
5016 sregs->gdt.limit = dt.size;
5017 sregs->gdt.base = dt.address;
5019 sregs->cr0 = kvm_read_cr0(vcpu);
5020 sregs->cr2 = vcpu->arch.cr2;
5021 sregs->cr3 = vcpu->arch.cr3;
5022 sregs->cr4 = kvm_read_cr4(vcpu);
5023 sregs->cr8 = kvm_get_cr8(vcpu);
5024 sregs->efer = vcpu->arch.efer;
5025 sregs->apic_base = kvm_get_apic_base(vcpu);
5027 memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5029 if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5030 set_bit(vcpu->arch.interrupt.nr,
5031 (unsigned long *)sregs->interrupt_bitmap);
5036 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5037 struct kvm_mp_state *mp_state)
5039 mp_state->mp_state = vcpu->arch.mp_state;
5043 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5044 struct kvm_mp_state *mp_state)
5046 vcpu->arch.mp_state = mp_state->mp_state;
5050 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5051 bool has_error_code, u32 error_code)
5053 struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
5054 int cs_db, cs_l, ret;
5055 cache_all_regs(vcpu);
5057 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5059 vcpu->arch.emulate_ctxt.vcpu = vcpu;
5060 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
5061 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
5062 vcpu->arch.emulate_ctxt.mode =
5063 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5064 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
5065 ? X86EMUL_MODE_VM86 : cs_l
5066 ? X86EMUL_MODE_PROT64 : cs_db
5067 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
5068 memset(c, 0, sizeof(struct decode_cache));
5069 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
5071 ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
5072 tss_selector, reason, has_error_code,
5076 return EMULATE_FAIL;
5078 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5079 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5080 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5081 return EMULATE_DONE;
5083 EXPORT_SYMBOL_GPL(kvm_task_switch);
5085 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5086 struct kvm_sregs *sregs)
5088 int mmu_reset_needed = 0;
5089 int pending_vec, max_bits;
5092 dt.size = sregs->idt.limit;
5093 dt.address = sregs->idt.base;
5094 kvm_x86_ops->set_idt(vcpu, &dt);
5095 dt.size = sregs->gdt.limit;
5096 dt.address = sregs->gdt.base;
5097 kvm_x86_ops->set_gdt(vcpu, &dt);
5099 vcpu->arch.cr2 = sregs->cr2;
5100 mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
5101 vcpu->arch.cr3 = sregs->cr3;
5103 kvm_set_cr8(vcpu, sregs->cr8);
5105 mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5106 kvm_x86_ops->set_efer(vcpu, sregs->efer);
5107 kvm_set_apic_base(vcpu, sregs->apic_base);
5109 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5110 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5111 vcpu->arch.cr0 = sregs->cr0;
5113 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5114 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5115 if (sregs->cr4 & X86_CR4_OSXSAVE)
5117 if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5118 load_pdptrs(vcpu, vcpu->arch.cr3);
5119 mmu_reset_needed = 1;
5122 if (mmu_reset_needed)
5123 kvm_mmu_reset_context(vcpu);
5125 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5126 pending_vec = find_first_bit(
5127 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5128 if (pending_vec < max_bits) {
5129 kvm_queue_interrupt(vcpu, pending_vec, false);
5130 pr_debug("Set back pending irq %d\n", pending_vec);
5131 if (irqchip_in_kernel(vcpu->kvm))
5132 kvm_pic_clear_isr_ack(vcpu->kvm);
5135 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5136 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5137 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5138 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5139 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5140 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5142 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5143 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5145 update_cr8_intercept(vcpu);
5147 /* Older userspace won't unhalt the vcpu on reset. */
5148 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5149 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5151 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5156 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5157 struct kvm_guest_debug *dbg)
5159 unsigned long rflags;
5162 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5164 if (vcpu->arch.exception.pending)
5166 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5167 kvm_queue_exception(vcpu, DB_VECTOR);
5169 kvm_queue_exception(vcpu, BP_VECTOR);
5173 * Read rflags as long as potentially injected trace flags are still
5176 rflags = kvm_get_rflags(vcpu);
5178 vcpu->guest_debug = dbg->control;
5179 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5180 vcpu->guest_debug = 0;
5182 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5183 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5184 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5185 vcpu->arch.switch_db_regs =
5186 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5188 for (i = 0; i < KVM_NR_DB_REGS; i++)
5189 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5190 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5193 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5194 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5195 get_segment_base(vcpu, VCPU_SREG_CS);
5198 * Trigger an rflags update that will inject or remove the trace
5201 kvm_set_rflags(vcpu, rflags);
5203 kvm_x86_ops->set_guest_debug(vcpu, dbg);
5213 * Translate a guest virtual address to a guest physical address.
5215 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5216 struct kvm_translation *tr)
5218 unsigned long vaddr = tr->linear_address;
5222 idx = srcu_read_lock(&vcpu->kvm->srcu);
5223 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5224 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5225 tr->physical_address = gpa;
5226 tr->valid = gpa != UNMAPPED_GVA;
5233 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5235 struct i387_fxsave_struct *fxsave =
5236 &vcpu->arch.guest_fpu.state->fxsave;
5238 memcpy(fpu->fpr, fxsave->st_space, 128);
5239 fpu->fcw = fxsave->cwd;
5240 fpu->fsw = fxsave->swd;
5241 fpu->ftwx = fxsave->twd;
5242 fpu->last_opcode = fxsave->fop;
5243 fpu->last_ip = fxsave->rip;
5244 fpu->last_dp = fxsave->rdp;
5245 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5250 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5252 struct i387_fxsave_struct *fxsave =
5253 &vcpu->arch.guest_fpu.state->fxsave;
5255 memcpy(fxsave->st_space, fpu->fpr, 128);
5256 fxsave->cwd = fpu->fcw;
5257 fxsave->swd = fpu->fsw;
5258 fxsave->twd = fpu->ftwx;
5259 fxsave->fop = fpu->last_opcode;
5260 fxsave->rip = fpu->last_ip;
5261 fxsave->rdp = fpu->last_dp;
5262 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5267 int fx_init(struct kvm_vcpu *vcpu)
5271 err = fpu_alloc(&vcpu->arch.guest_fpu);
5275 fpu_finit(&vcpu->arch.guest_fpu);
5278 * Ensure guest xcr0 is valid for loading
5280 vcpu->arch.xcr0 = XSTATE_FP;
5282 vcpu->arch.cr0 |= X86_CR0_ET;
5286 EXPORT_SYMBOL_GPL(fx_init);
5288 static void fx_free(struct kvm_vcpu *vcpu)
5290 fpu_free(&vcpu->arch.guest_fpu);
5293 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5295 if (vcpu->guest_fpu_loaded)
5299 * Restore all possible states in the guest,
5300 * and assume host would use all available bits.
5301 * Guest xcr0 would be loaded later.
5303 kvm_put_guest_xcr0(vcpu);
5304 vcpu->guest_fpu_loaded = 1;
5305 unlazy_fpu(current);
5306 fpu_restore_checking(&vcpu->arch.guest_fpu);
5310 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5312 kvm_put_guest_xcr0(vcpu);
5314 if (!vcpu->guest_fpu_loaded)
5317 vcpu->guest_fpu_loaded = 0;
5318 fpu_save_init(&vcpu->arch.guest_fpu);
5319 ++vcpu->stat.fpu_reload;
5320 kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5324 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5326 if (vcpu->arch.time_page) {
5327 kvm_release_page_dirty(vcpu->arch.time_page);
5328 vcpu->arch.time_page = NULL;
5331 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5333 kvm_x86_ops->vcpu_free(vcpu);
5336 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5339 return kvm_x86_ops->vcpu_create(kvm, id);
5342 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5346 vcpu->arch.mtrr_state.have_fixed = 1;
5348 r = kvm_arch_vcpu_reset(vcpu);
5350 r = kvm_mmu_setup(vcpu);
5357 kvm_x86_ops->vcpu_free(vcpu);
5361 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5364 kvm_mmu_unload(vcpu);
5368 kvm_x86_ops->vcpu_free(vcpu);
5371 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5373 vcpu->arch.nmi_pending = false;
5374 vcpu->arch.nmi_injected = false;
5376 vcpu->arch.switch_db_regs = 0;
5377 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5378 vcpu->arch.dr6 = DR6_FIXED_1;
5379 vcpu->arch.dr7 = DR7_FIXED_1;
5381 return kvm_x86_ops->vcpu_reset(vcpu);
5384 int kvm_arch_hardware_enable(void *garbage)
5387 * Since this may be called from a hotplug notifcation,
5388 * we can't get the CPU frequency directly.
5390 if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5391 int cpu = raw_smp_processor_id();
5392 per_cpu(cpu_tsc_khz, cpu) = 0;
5395 kvm_shared_msr_cpu_online();
5397 return kvm_x86_ops->hardware_enable(garbage);
5400 void kvm_arch_hardware_disable(void *garbage)
5402 kvm_x86_ops->hardware_disable(garbage);
5403 drop_user_return_notifiers(garbage);
5406 int kvm_arch_hardware_setup(void)
5408 return kvm_x86_ops->hardware_setup();
5411 void kvm_arch_hardware_unsetup(void)
5413 kvm_x86_ops->hardware_unsetup();
5416 void kvm_arch_check_processor_compat(void *rtn)
5418 kvm_x86_ops->check_processor_compatibility(rtn);
5421 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5427 BUG_ON(vcpu->kvm == NULL);
5430 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5431 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5432 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5434 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
5436 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5441 vcpu->arch.pio_data = page_address(page);
5443 r = kvm_mmu_create(vcpu);
5445 goto fail_free_pio_data;
5447 if (irqchip_in_kernel(kvm)) {
5448 r = kvm_create_lapic(vcpu);
5450 goto fail_mmu_destroy;
5453 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5455 if (!vcpu->arch.mce_banks) {
5457 goto fail_free_lapic;
5459 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5461 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
5462 goto fail_free_mce_banks;
5465 fail_free_mce_banks:
5466 kfree(vcpu->arch.mce_banks);
5468 kvm_free_lapic(vcpu);
5470 kvm_mmu_destroy(vcpu);
5472 free_page((unsigned long)vcpu->arch.pio_data);
5477 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5481 kfree(vcpu->arch.mce_banks);
5482 kvm_free_lapic(vcpu);
5483 idx = srcu_read_lock(&vcpu->kvm->srcu);
5484 kvm_mmu_destroy(vcpu);
5485 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5486 free_page((unsigned long)vcpu->arch.pio_data);
5489 struct kvm *kvm_arch_create_vm(void)
5491 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5494 return ERR_PTR(-ENOMEM);
5496 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5497 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5499 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5500 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5502 rdtscll(kvm->arch.vm_init_tsc);
5507 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5510 kvm_mmu_unload(vcpu);
5514 static void kvm_free_vcpus(struct kvm *kvm)
5517 struct kvm_vcpu *vcpu;
5520 * Unpin any mmu pages first.
5522 kvm_for_each_vcpu(i, vcpu, kvm)
5523 kvm_unload_vcpu_mmu(vcpu);
5524 kvm_for_each_vcpu(i, vcpu, kvm)
5525 kvm_arch_vcpu_free(vcpu);
5527 mutex_lock(&kvm->lock);
5528 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5529 kvm->vcpus[i] = NULL;
5531 atomic_set(&kvm->online_vcpus, 0);
5532 mutex_unlock(&kvm->lock);
5535 void kvm_arch_sync_events(struct kvm *kvm)
5537 kvm_free_all_assigned_devices(kvm);
5541 void kvm_arch_destroy_vm(struct kvm *kvm)
5543 kvm_iommu_unmap_guest(kvm);
5544 kfree(kvm->arch.vpic);
5545 kfree(kvm->arch.vioapic);
5546 kvm_free_vcpus(kvm);
5547 kvm_free_physmem(kvm);
5548 if (kvm->arch.apic_access_page)
5549 put_page(kvm->arch.apic_access_page);
5550 if (kvm->arch.ept_identity_pagetable)
5551 put_page(kvm->arch.ept_identity_pagetable);
5552 cleanup_srcu_struct(&kvm->srcu);
5556 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5557 struct kvm_memory_slot *memslot,
5558 struct kvm_memory_slot old,
5559 struct kvm_userspace_memory_region *mem,
5562 int npages = memslot->npages;
5563 int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
5565 /* Prevent internal slot pages from being moved by fork()/COW. */
5566 if (memslot->id >= KVM_MEMORY_SLOTS)
5567 map_flags = MAP_SHARED | MAP_ANONYMOUS;
5569 /*To keep backward compatibility with older userspace,
5570 *x86 needs to hanlde !user_alloc case.
5573 if (npages && !old.rmap) {
5574 unsigned long userspace_addr;
5576 down_write(¤t->mm->mmap_sem);
5577 userspace_addr = do_mmap(NULL, 0,
5579 PROT_READ | PROT_WRITE,
5582 up_write(¤t->mm->mmap_sem);
5584 if (IS_ERR((void *)userspace_addr))
5585 return PTR_ERR((void *)userspace_addr);
5587 memslot->userspace_addr = userspace_addr;
5595 void kvm_arch_commit_memory_region(struct kvm *kvm,
5596 struct kvm_userspace_memory_region *mem,
5597 struct kvm_memory_slot old,
5601 int npages = mem->memory_size >> PAGE_SHIFT;
5603 if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5606 down_write(¤t->mm->mmap_sem);
5607 ret = do_munmap(current->mm, old.userspace_addr,
5608 old.npages * PAGE_SIZE);
5609 up_write(¤t->mm->mmap_sem);
5612 "kvm_vm_ioctl_set_memory_region: "
5613 "failed to munmap memory\n");
5616 spin_lock(&kvm->mmu_lock);
5617 if (!kvm->arch.n_requested_mmu_pages) {
5618 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5619 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5622 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5623 spin_unlock(&kvm->mmu_lock);
5626 void kvm_arch_flush_shadow(struct kvm *kvm)
5628 kvm_mmu_zap_all(kvm);
5629 kvm_reload_remote_mmus(kvm);
5632 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5634 return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
5635 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5636 || vcpu->arch.nmi_pending ||
5637 (kvm_arch_interrupt_allowed(vcpu) &&
5638 kvm_cpu_has_interrupt(vcpu));
5641 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5644 int cpu = vcpu->cpu;
5646 if (waitqueue_active(&vcpu->wq)) {
5647 wake_up_interruptible(&vcpu->wq);
5648 ++vcpu->stat.halt_wakeup;
5652 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5653 if (atomic_xchg(&vcpu->guest_mode, 0))
5654 smp_send_reschedule(cpu);
5658 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5660 return kvm_x86_ops->interrupt_allowed(vcpu);
5663 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
5665 unsigned long current_rip = kvm_rip_read(vcpu) +
5666 get_segment_base(vcpu, VCPU_SREG_CS);
5668 return current_rip == linear_rip;
5670 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
5672 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5674 unsigned long rflags;
5676 rflags = kvm_x86_ops->get_rflags(vcpu);
5677 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5678 rflags &= ~X86_EFLAGS_TF;
5681 EXPORT_SYMBOL_GPL(kvm_get_rflags);
5683 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5685 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5686 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
5687 rflags |= X86_EFLAGS_TF;
5688 kvm_x86_ops->set_rflags(vcpu, rflags);
5690 EXPORT_SYMBOL_GPL(kvm_set_rflags);
5692 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5693 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5694 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5695 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5696 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
5697 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
5698 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
5699 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
5700 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
5701 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
5702 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
5703 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);