void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
+ kvm_make_request(KVM_REQ_NMI, vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
- vcpu->arch.nmi_pending = 1;
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
unsigned long flags;
s64 sdiff;
- spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
+ raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = data - native_read_tsc();
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_offset = offset;
kvm_x86_ops->write_tsc_offset(vcpu, offset);
- spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
+ raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
return 0;
}
+static void kvmclock_reset(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.time_page) {
+ kvm_release_page_dirty(vcpu->arch.time_page);
+ vcpu->arch.time_page = NULL;
+ }
+}
+
int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
} else
return set_msr_hyperv(vcpu, msr, data);
break;
+ case MSR_IA32_BBL_CR_CTL3:
+ /* Drop writes to this legacy MSR -- see rdmsr
+ * counterpart for further detail.
+ */
+ pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
+ break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
} else
return get_msr_hyperv(vcpu, msr, pdata);
break;
+ case MSR_IA32_BBL_CR_CTL3:
+ /* This legacy MSR exists but isn't fully documented in current
+ * silicon. It is however accessed by winxp in very narrow
+ * scenarios where it sets bit #19, itself documented as
+ * a "reserved" bit. Best effort attempt to source coherent
+ * read data here should the balance of the register be
+ * interpreted by the guest:
+ *
+ * L2 cache control register 3: 64GB range, 256KB size,
+ * enabled, latency 0x1, configured
+ */
+ data = 0xbe702111;
+ break;
default:
if (!ignore_msrs) {
pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
- printk(KERN_DEBUG "kvm: set_mce: "
- "injects mce exception while "
- "previous one is in progress!\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0;
}
vcpu->arch.interrupt.pending = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
- if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
- kvm_pic_clear_isr_ack(vcpu->kvm);
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
kvm_x86_ops->set_interrupt_shadow(vcpu,
events->interrupt.shadow);
vcpu->arch.emulate_ctxt.have_exception = false;
vcpu->arch.emulate_ctxt.perm_ok = false;
+ vcpu->arch.emulate_ctxt.only_vendor_specific_insn
+ = emulation_type & EMULTYPE_TRAP_UD;
+
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
- if (r == X86EMUL_PROPAGATE_FAULT)
- goto done;
trace_kvm_emulate_insn_start(vcpu);
-
- /* Only allow emulation of specific instructions on #UD
- * (namely VMMCALL, sysenter, sysexit, syscall)*/
- if (emulation_type & EMULTYPE_TRAP_UD) {
- if (!c->twobyte)
- return EMULATE_FAIL;
- switch (c->b) {
- case 0x01: /* VMMCALL */
- if (c->modrm_mod != 3 || c->modrm_rm != 1)
- return EMULATE_FAIL;
- break;
- case 0x34: /* sysenter */
- case 0x35: /* sysexit */
- if (c->modrm_mod != 0 || c->modrm_rm != 0)
- return EMULATE_FAIL;
- break;
- case 0x05: /* syscall */
- if (c->modrm_mod != 0 || c->modrm_rm != 0)
- return EMULATE_FAIL;
- break;
- default:
- return EMULATE_FAIL;
- }
-
- if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
- return EMULATE_FAIL;
- }
-
++vcpu->stat.insn_emulation;
if (r) {
+ if (emulation_type & EMULTYPE_TRAP_UD)
+ return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return handle_emulation_failure(vcpu);
}
-done:
if (vcpu->arch.emulate_ctxt.have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
- spin_lock(&kvm_lock);
+ raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
send_ipi = 1;
}
}
- spin_unlock(&kvm_lock);
+ raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
r = 1;
goto out;
}
+ if (kvm_check_request(KVM_REQ_NMI, vcpu))
+ vcpu->arch.nmi_pending = true;
}
r = kvm_mmu_reload(vcpu);
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
- atomic_set(&vcpu->guest_mode, 1);
- smp_wmb();
+ vcpu->mode = IN_GUEST_MODE;
+
+ /* We should set ->mode before check ->requests,
+ * see the comment in make_all_cpus_request.
+ */
+ smp_mb();
local_irq_disable();
- if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
+ if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
- atomic_set(&vcpu->guest_mode, 0);
+ vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
kvm_get_msr(vcpu, MSR_IA32_TSC, &vcpu->arch.last_guest_tsc);
- atomic_set(&vcpu->guest_mode, 0);
+ vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
- int pending_vec, max_bits;
+ int pending_vec, max_bits, idx;
struct desc_ptr dt;
dt.size = sregs->idt.limit;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
update_cpuid(vcpu);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
- if (irqchip_in_kernel(vcpu->kvm))
- kvm_pic_clear_isr_ack(vcpu->kvm);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
+ kvmclock_reset(vcpu);
+
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
- spin_lock_init(&kvm->arch.tsc_write_lock);
+ raw_spin_lock_init(&kvm->arch.tsc_write_lock);
return 0;
}
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
- if (atomic_xchg(&vcpu->guest_mode, 0))
+ if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
smp_send_reschedule(cpu);
put_cpu();
}