3 * Local APIC virtualization
5 * Copyright (C) 2006 Qumranet, Inc.
6 * Copyright (C) 2007 Novell
7 * Copyright (C) 2007 Intel
8 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
11 * Dor Laor <dor.laor@qumranet.com>
12 * Gregory Haskins <ghaskins@novell.com>
13 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
15 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17 * This work is licensed under the terms of the GNU GPL, version 2. See
18 * the COPYING file in the top-level directory.
21 #include <linux/kvm_host.h>
22 #include <linux/kvm.h>
24 #include <linux/highmem.h>
25 #include <linux/smp.h>
26 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/math64.h>
30 #include <linux/slab.h>
31 #include <asm/processor.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <linux/atomic.h>
37 #include <linux/jump_label.h>
38 #include "kvm_cache_regs.h"
45 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
47 #define mod_64(x, y) ((x) % (y))
55 #define APIC_BUS_CYCLE_NS 1
57 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
58 #define apic_debug(fmt, arg...)
60 #define APIC_LVT_NUM 6
61 /* 14 is the version for Xeon and Pentium 8.4.8*/
62 #define APIC_VERSION (0x14UL | ((APIC_LVT_NUM - 1) << 16))
63 #define LAPIC_MMIO_LENGTH (1 << 12)
64 /* followed define is not in apicdef.h */
65 #define APIC_SHORT_MASK 0xc0000
66 #define APIC_DEST_NOSHORT 0x0
67 #define APIC_DEST_MASK 0x800
68 #define MAX_APIC_VECTOR 256
70 #define VEC_POS(v) ((v) & (32 - 1))
71 #define REG_POS(v) (((v) >> 5) << 4)
73 static unsigned int min_timer_period_us = 500;
74 module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
76 static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
78 *((u32 *) (apic->regs + reg_off)) = val;
81 static inline int apic_test_and_set_vector(int vec, void *bitmap)
83 return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
86 static inline int apic_test_and_clear_vector(int vec, void *bitmap)
88 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
91 static inline int apic_test_vector(int vec, void *bitmap)
93 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
96 static inline void apic_set_vector(int vec, void *bitmap)
98 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
101 static inline void apic_clear_vector(int vec, void *bitmap)
103 clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
106 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
108 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
111 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
113 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
116 struct static_key_deferred apic_hw_disabled __read_mostly;
117 struct static_key_deferred apic_sw_disabled __read_mostly;
119 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
121 if ((kvm_apic_get_reg(apic, APIC_SPIV) ^ val) & APIC_SPIV_APIC_ENABLED) {
122 if (val & APIC_SPIV_APIC_ENABLED)
123 static_key_slow_dec_deferred(&apic_sw_disabled);
125 static_key_slow_inc(&apic_sw_disabled.key);
127 apic_set_reg(apic, APIC_SPIV, val);
130 static inline int apic_enabled(struct kvm_lapic *apic)
132 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
136 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
139 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
140 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
142 static inline int kvm_apic_id(struct kvm_lapic *apic)
144 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
147 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
149 return !(kvm_apic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
152 static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type)
154 return kvm_apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK;
157 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
159 return ((kvm_apic_get_reg(apic, APIC_LVTT) &
160 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT);
163 static inline int apic_lvtt_period(struct kvm_lapic *apic)
165 return ((kvm_apic_get_reg(apic, APIC_LVTT) &
166 apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC);
169 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
171 return ((kvm_apic_get_reg(apic, APIC_LVTT) &
172 apic->lapic_timer.timer_mode_mask) ==
173 APIC_LVT_TIMER_TSCDEADLINE);
176 static inline int apic_lvt_nmi_mode(u32 lvt_val)
178 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
181 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
183 struct kvm_lapic *apic = vcpu->arch.apic;
184 struct kvm_cpuid_entry2 *feat;
185 u32 v = APIC_VERSION;
187 if (!kvm_vcpu_has_lapic(vcpu))
190 feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
191 if (feat && (feat->ecx & (1 << (X86_FEATURE_X2APIC & 31))))
192 v |= APIC_LVR_DIRECTED_EOI;
193 apic_set_reg(apic, APIC_LVR, v);
196 static inline int apic_x2apic_mode(struct kvm_lapic *apic)
198 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
201 static unsigned int apic_lvt_mask[APIC_LVT_NUM] = {
202 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
203 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
204 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
205 LINT_MASK, LINT_MASK, /* LVT0-1 */
206 LVT_MASK /* LVTERR */
209 static int find_highest_vector(void *bitmap)
212 int word_offset = MAX_APIC_VECTOR >> 5;
214 while ((word_offset != 0) && (word[(--word_offset) << 2] == 0))
217 if (likely(!word_offset && !word[0]))
220 return fls(word[word_offset << 2]) - 1 + (word_offset << 5);
223 static u8 count_vectors(void *bitmap)
228 for (word_offset = 0; word_offset < MAX_APIC_VECTOR >> 5; ++word_offset)
229 count += hweight32(word[word_offset << 2]);
233 static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
235 apic->irr_pending = true;
236 return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
239 static inline int apic_search_irr(struct kvm_lapic *apic)
241 return find_highest_vector(apic->regs + APIC_IRR);
244 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
248 if (!apic->irr_pending)
251 result = apic_search_irr(apic);
252 ASSERT(result == -1 || result >= 16);
257 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
259 apic->irr_pending = false;
260 apic_clear_vector(vec, apic->regs + APIC_IRR);
261 if (apic_search_irr(apic) != -1)
262 apic->irr_pending = true;
265 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
267 if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
269 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
271 * ISR (in service register) bit is set when injecting an interrupt.
272 * The highest vector is injected. Thus the latest bit set matches
273 * the highest bit in ISR.
275 apic->highest_isr_cache = vec;
278 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
280 if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
282 BUG_ON(apic->isr_count < 0);
283 apic->highest_isr_cache = -1;
286 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
290 /* This may race with setting of irr in __apic_accept_irq() and
291 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
292 * will cause vmexit immediately and the value will be recalculated
293 * on the next vmentry.
295 if (!kvm_vcpu_has_lapic(vcpu))
297 highest_irr = apic_find_highest_irr(vcpu->arch.apic);
302 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
303 int vector, int level, int trig_mode);
305 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
307 struct kvm_lapic *apic = vcpu->arch.apic;
309 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
310 irq->level, irq->trig_mode);
313 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
316 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
320 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
323 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
327 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
329 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
332 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
335 if (pv_eoi_get_user(vcpu, &val) < 0)
336 apic_debug("Can't read EOI MSR value: 0x%llx\n",
337 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
341 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
343 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
344 apic_debug("Can't set EOI MSR value: 0x%llx\n",
345 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
348 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
351 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
353 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
354 apic_debug("Can't clear EOI MSR value: 0x%llx\n",
355 (unsigned long long)vcpi->arch.pv_eoi.msr_val);
358 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
361 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
364 if (!apic->isr_count)
366 if (likely(apic->highest_isr_cache != -1))
367 return apic->highest_isr_cache;
369 result = find_highest_vector(apic->regs + APIC_ISR);
370 ASSERT(result == -1 || result >= 16);
375 static void apic_update_ppr(struct kvm_lapic *apic)
377 u32 tpr, isrv, ppr, old_ppr;
380 old_ppr = kvm_apic_get_reg(apic, APIC_PROCPRI);
381 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI);
382 isr = apic_find_highest_isr(apic);
383 isrv = (isr != -1) ? isr : 0;
385 if ((tpr & 0xf0) >= (isrv & 0xf0))
390 apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x",
391 apic, ppr, isr, isrv);
393 if (old_ppr != ppr) {
394 apic_set_reg(apic, APIC_PROCPRI, ppr);
396 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
400 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
402 apic_set_reg(apic, APIC_TASKPRI, tpr);
403 apic_update_ppr(apic);
406 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
408 return dest == 0xff || kvm_apic_id(apic) == dest;
411 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
416 if (apic_x2apic_mode(apic)) {
417 logical_id = kvm_apic_get_reg(apic, APIC_LDR);
418 return logical_id & mda;
421 logical_id = GET_APIC_LOGICAL_ID(kvm_apic_get_reg(apic, APIC_LDR));
423 switch (kvm_apic_get_reg(apic, APIC_DFR)) {
425 if (logical_id & mda)
428 case APIC_DFR_CLUSTER:
429 if (((logical_id >> 4) == (mda >> 0x4))
430 && (logical_id & mda & 0xf))
434 apic_debug("Bad DFR vcpu %d: %08x\n",
435 apic->vcpu->vcpu_id, kvm_apic_get_reg(apic, APIC_DFR));
442 int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
443 int short_hand, int dest, int dest_mode)
446 struct kvm_lapic *target = vcpu->arch.apic;
448 apic_debug("target %p, source %p, dest 0x%x, "
449 "dest_mode 0x%x, short_hand 0x%x\n",
450 target, source, dest, dest_mode, short_hand);
453 switch (short_hand) {
454 case APIC_DEST_NOSHORT:
457 result = kvm_apic_match_physical_addr(target, dest);
460 result = kvm_apic_match_logical_addr(target, dest);
463 result = (target == source);
465 case APIC_DEST_ALLINC:
468 case APIC_DEST_ALLBUT:
469 result = (target != source);
472 apic_debug("kvm: apic: Bad dest shorthand value %x\n",
481 * Add a pending IRQ into lapic.
482 * Return 1 if successfully added and 0 if discarded.
484 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
485 int vector, int level, int trig_mode)
488 struct kvm_vcpu *vcpu = apic->vcpu;
490 switch (delivery_mode) {
492 vcpu->arch.apic_arb_prio++;
494 /* FIXME add logic for vcpu on reset */
495 if (unlikely(!apic_enabled(apic)))
499 apic_debug("level trig mode for vector %d", vector);
500 apic_set_vector(vector, apic->regs + APIC_TMR);
502 apic_clear_vector(vector, apic->regs + APIC_TMR);
504 result = !apic_test_and_set_irr(vector, apic);
505 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
506 trig_mode, vector, !result);
509 apic_debug("level trig mode repeatedly for "
510 "vector %d", vector);
514 kvm_make_request(KVM_REQ_EVENT, vcpu);
519 apic_debug("Ignoring delivery mode 3\n");
523 apic_debug("Ignoring guest SMI\n");
528 kvm_inject_nmi(vcpu);
533 if (!trig_mode || level) {
535 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
536 kvm_make_request(KVM_REQ_EVENT, vcpu);
539 apic_debug("Ignoring de-assert INIT to vcpu %d\n",
544 case APIC_DM_STARTUP:
545 apic_debug("SIPI to vcpu %d vector 0x%02x\n",
546 vcpu->vcpu_id, vector);
547 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
549 vcpu->arch.sipi_vector = vector;
550 vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
551 kvm_make_request(KVM_REQ_EVENT, vcpu);
558 * Should only be called by kvm_apic_local_deliver() with LVT0,
559 * before NMI watchdog was enabled. Already handled by
560 * kvm_apic_accept_pic_intr().
565 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
572 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
574 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
577 static int apic_set_eoi(struct kvm_lapic *apic)
579 int vector = apic_find_highest_isr(apic);
581 trace_kvm_eoi(apic, vector);
584 * Not every write EOI will has corresponding ISR,
585 * one example is when Kernel check timer on setup_IO_APIC
590 apic_clear_isr(vector, apic);
591 apic_update_ppr(apic);
593 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
594 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
596 if (apic_test_vector(vector, apic->regs + APIC_TMR))
597 trigger_mode = IOAPIC_LEVEL_TRIG;
599 trigger_mode = IOAPIC_EDGE_TRIG;
600 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
602 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
606 static void apic_send_ipi(struct kvm_lapic *apic)
608 u32 icr_low = kvm_apic_get_reg(apic, APIC_ICR);
609 u32 icr_high = kvm_apic_get_reg(apic, APIC_ICR2);
610 struct kvm_lapic_irq irq;
612 irq.vector = icr_low & APIC_VECTOR_MASK;
613 irq.delivery_mode = icr_low & APIC_MODE_MASK;
614 irq.dest_mode = icr_low & APIC_DEST_MASK;
615 irq.level = icr_low & APIC_INT_ASSERT;
616 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
617 irq.shorthand = icr_low & APIC_SHORT_MASK;
618 if (apic_x2apic_mode(apic))
619 irq.dest_id = icr_high;
621 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
623 trace_kvm_apic_ipi(icr_low, irq.dest_id);
625 apic_debug("icr_high 0x%x, icr_low 0x%x, "
626 "short_hand 0x%x, dest 0x%x, trig_mode 0x%x, level 0x%x, "
627 "dest_mode 0x%x, delivery_mode 0x%x, vector 0x%x\n",
628 icr_high, icr_low, irq.shorthand, irq.dest_id,
629 irq.trig_mode, irq.level, irq.dest_mode, irq.delivery_mode,
632 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq);
635 static u32 apic_get_tmcct(struct kvm_lapic *apic)
641 ASSERT(apic != NULL);
643 /* if initial count is 0, current count should also be 0 */
644 if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
647 remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
648 if (ktime_to_ns(remaining) < 0)
649 remaining = ktime_set(0, 0);
651 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
652 tmcct = div64_u64(ns,
653 (APIC_BUS_CYCLE_NS * apic->divide_count));
658 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
660 struct kvm_vcpu *vcpu = apic->vcpu;
661 struct kvm_run *run = vcpu->run;
663 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
664 run->tpr_access.rip = kvm_rip_read(vcpu);
665 run->tpr_access.is_write = write;
668 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
670 if (apic->vcpu->arch.tpr_access_reporting)
671 __report_tpr_access(apic, write);
674 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
678 if (offset >= LAPIC_MMIO_LENGTH)
683 if (apic_x2apic_mode(apic))
684 val = kvm_apic_id(apic);
686 val = kvm_apic_id(apic) << 24;
689 apic_debug("Access APIC ARBPRI register which is for P6\n");
692 case APIC_TMCCT: /* Timer CCR */
693 if (apic_lvtt_tscdeadline(apic))
696 val = apic_get_tmcct(apic);
699 apic_update_ppr(apic);
700 val = kvm_apic_get_reg(apic, offset);
703 report_tpr_access(apic, false);
706 val = kvm_apic_get_reg(apic, offset);
713 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
715 return container_of(dev, struct kvm_lapic, dev);
718 static int apic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
721 unsigned char alignment = offset & 0xf;
723 /* this bitmask has a bit cleared for each reserved register */
724 static const u64 rmask = 0x43ff01ffffffe70cULL;
726 if ((alignment + len) > 4) {
727 apic_debug("KVM_APIC_READ: alignment error %x %d\n",
732 if (offset > 0x3f0 || !(rmask & (1ULL << (offset >> 4)))) {
733 apic_debug("KVM_APIC_READ: read reserved register %x\n",
738 result = __apic_read(apic, offset & ~0xf);
740 trace_kvm_apic_read(offset, result);
746 memcpy(data, (char *)&result + alignment, len);
749 printk(KERN_ERR "Local APIC read with len = %x, "
750 "should be 1,2, or 4 instead\n", len);
756 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
758 return kvm_apic_hw_enabled(apic) &&
759 addr >= apic->base_address &&
760 addr < apic->base_address + LAPIC_MMIO_LENGTH;
763 static int apic_mmio_read(struct kvm_io_device *this,
764 gpa_t address, int len, void *data)
766 struct kvm_lapic *apic = to_lapic(this);
767 u32 offset = address - apic->base_address;
769 if (!apic_mmio_in_range(apic, address))
772 apic_reg_read(apic, offset, len, data);
777 static void update_divide_count(struct kvm_lapic *apic)
779 u32 tmp1, tmp2, tdcr;
781 tdcr = kvm_apic_get_reg(apic, APIC_TDCR);
783 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
784 apic->divide_count = 0x1 << (tmp2 & 0x7);
786 apic_debug("timer divide count is 0x%x\n",
790 static void start_apic_timer(struct kvm_lapic *apic)
793 atomic_set(&apic->lapic_timer.pending, 0);
795 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
796 /* lapic timer in oneshot or periodic mode */
797 now = apic->lapic_timer.timer.base->get_time();
798 apic->lapic_timer.period = (u64)kvm_apic_get_reg(apic, APIC_TMICT)
799 * APIC_BUS_CYCLE_NS * apic->divide_count;
801 if (!apic->lapic_timer.period)
804 * Do not allow the guest to program periodic timers with small
805 * interval, since the hrtimers are not throttled by the host
808 if (apic_lvtt_period(apic)) {
809 s64 min_period = min_timer_period_us * 1000LL;
811 if (apic->lapic_timer.period < min_period) {
813 "kvm: vcpu %i: requested %lld ns "
814 "lapic timer period limited to %lld ns\n",
816 apic->lapic_timer.period, min_period);
817 apic->lapic_timer.period = min_period;
821 hrtimer_start(&apic->lapic_timer.timer,
822 ktime_add_ns(now, apic->lapic_timer.period),
825 apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
827 "timer initial count 0x%x, period %lldns, "
828 "expire @ 0x%016" PRIx64 ".\n", __func__,
829 APIC_BUS_CYCLE_NS, ktime_to_ns(now),
830 kvm_apic_get_reg(apic, APIC_TMICT),
831 apic->lapic_timer.period,
832 ktime_to_ns(ktime_add_ns(now,
833 apic->lapic_timer.period)));
834 } else if (apic_lvtt_tscdeadline(apic)) {
835 /* lapic timer in tsc deadline mode */
836 u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline;
838 struct kvm_vcpu *vcpu = apic->vcpu;
839 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
842 if (unlikely(!tscdeadline || !this_tsc_khz))
845 local_irq_save(flags);
847 now = apic->lapic_timer.timer.base->get_time();
848 guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
849 if (likely(tscdeadline > guest_tsc)) {
850 ns = (tscdeadline - guest_tsc) * 1000000ULL;
851 do_div(ns, this_tsc_khz);
853 hrtimer_start(&apic->lapic_timer.timer,
854 ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
856 local_irq_restore(flags);
860 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
862 int nmi_wd_enabled = apic_lvt_nmi_mode(kvm_apic_get_reg(apic, APIC_LVT0));
864 if (apic_lvt_nmi_mode(lvt0_val)) {
865 if (!nmi_wd_enabled) {
866 apic_debug("Receive NMI setting on APIC_LVT0 "
867 "for cpu %d\n", apic->vcpu->vcpu_id);
868 apic->vcpu->kvm->arch.vapics_in_nmi_mode++;
870 } else if (nmi_wd_enabled)
871 apic->vcpu->kvm->arch.vapics_in_nmi_mode--;
874 static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
878 trace_kvm_apic_write(reg, val);
881 case APIC_ID: /* Local APIC ID */
882 if (!apic_x2apic_mode(apic))
883 apic_set_reg(apic, APIC_ID, val);
889 report_tpr_access(apic, true);
890 apic_set_tpr(apic, val & 0xff);
898 if (!apic_x2apic_mode(apic))
899 apic_set_reg(apic, APIC_LDR, val & APIC_LDR_MASK);
905 if (!apic_x2apic_mode(apic))
906 apic_set_reg(apic, APIC_DFR, val | 0x0FFFFFFF);
913 if (kvm_apic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
914 mask |= APIC_SPIV_DIRECTED_EOI;
915 apic_set_spiv(apic, val & mask);
916 if (!(val & APIC_SPIV_APIC_ENABLED)) {
920 for (i = 0; i < APIC_LVT_NUM; i++) {
921 lvt_val = kvm_apic_get_reg(apic,
922 APIC_LVTT + 0x10 * i);
923 apic_set_reg(apic, APIC_LVTT + 0x10 * i,
924 lvt_val | APIC_LVT_MASKED);
926 atomic_set(&apic->lapic_timer.pending, 0);
932 /* No delay here, so we always clear the pending bit */
933 apic_set_reg(apic, APIC_ICR, val & ~(1 << 12));
938 if (!apic_x2apic_mode(apic))
940 apic_set_reg(apic, APIC_ICR2, val);
944 apic_manage_nmi_watchdog(apic, val);
949 /* TODO: Check vector */
950 if (!kvm_apic_sw_enabled(apic))
951 val |= APIC_LVT_MASKED;
953 val &= apic_lvt_mask[(reg - APIC_LVTT) >> 4];
954 apic_set_reg(apic, reg, val);
959 if ((kvm_apic_get_reg(apic, APIC_LVTT) &
960 apic->lapic_timer.timer_mode_mask) !=
961 (val & apic->lapic_timer.timer_mode_mask))
962 hrtimer_cancel(&apic->lapic_timer.timer);
964 if (!kvm_apic_sw_enabled(apic))
965 val |= APIC_LVT_MASKED;
966 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
967 apic_set_reg(apic, APIC_LVTT, val);
971 if (apic_lvtt_tscdeadline(apic))
974 hrtimer_cancel(&apic->lapic_timer.timer);
975 apic_set_reg(apic, APIC_TMICT, val);
976 start_apic_timer(apic);
981 apic_debug("KVM_WRITE:TDCR %x\n", val);
982 apic_set_reg(apic, APIC_TDCR, val);
983 update_divide_count(apic);
987 if (apic_x2apic_mode(apic) && val != 0) {
988 apic_debug("KVM_WRITE:ESR not zero %x\n", val);
994 if (apic_x2apic_mode(apic)) {
995 apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
1004 apic_debug("Local APIC Write to read-only register %x\n", reg);
1008 static int apic_mmio_write(struct kvm_io_device *this,
1009 gpa_t address, int len, const void *data)
1011 struct kvm_lapic *apic = to_lapic(this);
1012 unsigned int offset = address - apic->base_address;
1015 if (!apic_mmio_in_range(apic, address))
1019 * APIC register must be aligned on 128-bits boundary.
1020 * 32/64/128 bits registers must be accessed thru 32 bits.
1023 if (len != 4 || (offset & 0xf)) {
1024 /* Don't shout loud, $infamous_os would cause only noise. */
1025 apic_debug("apic write: bad size=%d %lx\n", len, (long)address);
1031 /* too common printing */
1032 if (offset != APIC_EOI)
1033 apic_debug("%s: offset 0x%x with length 0x%x, and value is "
1034 "0x%x\n", __func__, offset, len, val);
1036 apic_reg_write(apic, offset & 0xff0, val);
1041 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
1043 if (kvm_vcpu_has_lapic(vcpu))
1044 apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
1046 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
1048 void kvm_free_lapic(struct kvm_vcpu *vcpu)
1050 struct kvm_lapic *apic = vcpu->arch.apic;
1052 if (!vcpu->arch.apic)
1055 hrtimer_cancel(&apic->lapic_timer.timer);
1057 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
1058 static_key_slow_dec_deferred(&apic_hw_disabled);
1060 if (!(kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_APIC_ENABLED))
1061 static_key_slow_dec_deferred(&apic_sw_disabled);
1064 free_page((unsigned long)apic->regs);
1070 *----------------------------------------------------------------------
1072 *----------------------------------------------------------------------
1075 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
1077 struct kvm_lapic *apic = vcpu->arch.apic;
1079 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
1080 apic_lvtt_period(apic))
1083 return apic->lapic_timer.tscdeadline;
1086 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
1088 struct kvm_lapic *apic = vcpu->arch.apic;
1090 if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
1091 apic_lvtt_period(apic))
1094 hrtimer_cancel(&apic->lapic_timer.timer);
1095 apic->lapic_timer.tscdeadline = data;
1096 start_apic_timer(apic);
1099 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
1101 struct kvm_lapic *apic = vcpu->arch.apic;
1103 if (!kvm_vcpu_has_lapic(vcpu))
1106 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
1107 | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
1110 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1114 if (!kvm_vcpu_has_lapic(vcpu))
1117 tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
1119 return (tpr & 0xf0) >> 4;
1122 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1124 struct kvm_lapic *apic = vcpu->arch.apic;
1127 value |= MSR_IA32_APICBASE_BSP;
1128 vcpu->arch.apic_base = value;
1132 /* update jump label if enable bit changes */
1133 if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
1134 if (value & MSR_IA32_APICBASE_ENABLE)
1135 static_key_slow_dec_deferred(&apic_hw_disabled);
1137 static_key_slow_inc(&apic_hw_disabled.key);
1140 if (!kvm_vcpu_is_bsp(apic->vcpu))
1141 value &= ~MSR_IA32_APICBASE_BSP;
1143 vcpu->arch.apic_base = value;
1144 if (apic_x2apic_mode(apic)) {
1145 u32 id = kvm_apic_id(apic);
1146 u32 ldr = ((id & ~0xf) << 16) | (1 << (id & 0xf));
1147 apic_set_reg(apic, APIC_LDR, ldr);
1149 apic->base_address = apic->vcpu->arch.apic_base &
1150 MSR_IA32_APICBASE_BASE;
1152 /* with FSB delivery interrupt, we can restart APIC functionality */
1153 apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
1154 "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
1158 void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1160 struct kvm_lapic *apic;
1163 apic_debug("%s\n", __func__);
1166 apic = vcpu->arch.apic;
1167 ASSERT(apic != NULL);
1169 /* Stop the timer in case it's a reset to an active apic */
1170 hrtimer_cancel(&apic->lapic_timer.timer);
1172 apic_set_reg(apic, APIC_ID, vcpu->vcpu_id << 24);
1173 kvm_apic_set_version(apic->vcpu);
1175 for (i = 0; i < APIC_LVT_NUM; i++)
1176 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1177 apic_set_reg(apic, APIC_LVT0,
1178 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1180 apic_set_reg(apic, APIC_DFR, 0xffffffffU);
1181 apic_set_spiv(apic, 0xff);
1182 apic_set_reg(apic, APIC_TASKPRI, 0);
1183 apic_set_reg(apic, APIC_LDR, 0);
1184 apic_set_reg(apic, APIC_ESR, 0);
1185 apic_set_reg(apic, APIC_ICR, 0);
1186 apic_set_reg(apic, APIC_ICR2, 0);
1187 apic_set_reg(apic, APIC_TDCR, 0);
1188 apic_set_reg(apic, APIC_TMICT, 0);
1189 for (i = 0; i < 8; i++) {
1190 apic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
1191 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1192 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1194 apic->irr_pending = false;
1195 apic->isr_count = 0;
1196 apic->highest_isr_cache = -1;
1197 update_divide_count(apic);
1198 atomic_set(&apic->lapic_timer.pending, 0);
1199 if (kvm_vcpu_is_bsp(vcpu))
1200 kvm_lapic_set_base(vcpu,
1201 vcpu->arch.apic_base | MSR_IA32_APICBASE_BSP);
1202 vcpu->arch.pv_eoi.msr_val = 0;
1203 apic_update_ppr(apic);
1205 vcpu->arch.apic_arb_prio = 0;
1206 vcpu->arch.apic_attention = 0;
1208 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
1209 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
1210 vcpu, kvm_apic_id(apic),
1211 vcpu->arch.apic_base, apic->base_address);
1215 *----------------------------------------------------------------------
1217 *----------------------------------------------------------------------
1220 static bool lapic_is_periodic(struct kvm_lapic *apic)
1222 return apic_lvtt_period(apic);
1225 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
1227 struct kvm_lapic *apic = vcpu->arch.apic;
1229 if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
1230 apic_lvt_enabled(apic, APIC_LVTT))
1231 return atomic_read(&apic->lapic_timer.pending);
1236 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
1238 u32 reg = kvm_apic_get_reg(apic, lvt_type);
1239 int vector, mode, trig_mode;
1241 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
1242 vector = reg & APIC_VECTOR_MASK;
1243 mode = reg & APIC_MODE_MASK;
1244 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
1245 return __apic_accept_irq(apic, mode, vector, 1, trig_mode);
1250 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
1252 struct kvm_lapic *apic = vcpu->arch.apic;
1255 kvm_apic_local_deliver(apic, APIC_LVT0);
1258 static const struct kvm_io_device_ops apic_mmio_ops = {
1259 .read = apic_mmio_read,
1260 .write = apic_mmio_write,
1263 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
1265 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
1266 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
1267 struct kvm_vcpu *vcpu = apic->vcpu;
1268 wait_queue_head_t *q = &vcpu->wq;
1271 * There is a race window between reading and incrementing, but we do
1272 * not care about potentially losing timer events in the !reinject
1273 * case anyway. Note: KVM_REQ_PENDING_TIMER is implicitly checked
1274 * in vcpu_enter_guest.
1276 if (!atomic_read(&ktimer->pending)) {
1277 atomic_inc(&ktimer->pending);
1278 /* FIXME: this code should not know anything about vcpus */
1279 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1282 if (waitqueue_active(q))
1283 wake_up_interruptible(q);
1285 if (lapic_is_periodic(apic)) {
1286 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
1287 return HRTIMER_RESTART;
1289 return HRTIMER_NORESTART;
1292 int kvm_create_lapic(struct kvm_vcpu *vcpu)
1294 struct kvm_lapic *apic;
1296 ASSERT(vcpu != NULL);
1297 apic_debug("apic_init %d\n", vcpu->vcpu_id);
1299 apic = kzalloc(sizeof(*apic), GFP_KERNEL);
1303 vcpu->arch.apic = apic;
1305 apic->regs = (void *)get_zeroed_page(GFP_KERNEL);
1307 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
1309 goto nomem_free_apic;
1313 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
1315 apic->lapic_timer.timer.function = apic_timer_fn;
1318 * APIC is created enabled. This will prevent kvm_lapic_set_base from
1319 * thinking that APIC satet has changed.
1321 vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
1322 kvm_lapic_set_base(vcpu,
1323 APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE);
1325 static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */
1326 kvm_lapic_reset(vcpu);
1327 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
1336 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
1338 struct kvm_lapic *apic = vcpu->arch.apic;
1341 if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
1344 apic_update_ppr(apic);
1345 highest_irr = apic_find_highest_irr(apic);
1346 if ((highest_irr == -1) ||
1347 ((highest_irr & 0xF0) <= kvm_apic_get_reg(apic, APIC_PROCPRI)))
1352 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
1354 u32 lvt0 = kvm_apic_get_reg(vcpu->arch.apic, APIC_LVT0);
1357 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
1359 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
1360 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
1365 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
1367 struct kvm_lapic *apic = vcpu->arch.apic;
1369 if (!kvm_vcpu_has_lapic(vcpu))
1372 if (atomic_read(&apic->lapic_timer.pending) > 0) {
1373 if (kvm_apic_local_deliver(apic, APIC_LVTT))
1374 atomic_dec(&apic->lapic_timer.pending);
1378 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
1380 int vector = kvm_apic_has_interrupt(vcpu);
1381 struct kvm_lapic *apic = vcpu->arch.apic;
1386 apic_set_isr(vector, apic);
1387 apic_update_ppr(apic);
1388 apic_clear_irr(vector, apic);
1392 void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
1394 struct kvm_lapic *apic = vcpu->arch.apic;
1396 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
1397 kvm_apic_set_version(vcpu);
1398 apic_set_spiv(apic, kvm_apic_get_reg(apic, APIC_SPIV));
1400 apic_update_ppr(apic);
1401 hrtimer_cancel(&apic->lapic_timer.timer);
1402 update_divide_count(apic);
1403 start_apic_timer(apic);
1404 apic->irr_pending = true;
1405 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
1406 apic->highest_isr_cache = -1;
1407 kvm_make_request(KVM_REQ_EVENT, vcpu);
1410 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
1412 struct hrtimer *timer;
1414 if (!kvm_vcpu_has_lapic(vcpu))
1417 timer = &vcpu->arch.apic->lapic_timer.timer;
1418 if (hrtimer_cancel(timer))
1419 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
1423 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
1425 * Detect whether guest triggered PV EOI since the
1426 * last entry. If yes, set EOI on guests's behalf.
1427 * Clear PV EOI in guest memory in any case.
1429 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
1430 struct kvm_lapic *apic)
1435 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
1436 * and KVM_PV_EOI_ENABLED in guest memory as follows:
1438 * KVM_APIC_PV_EOI_PENDING is unset:
1439 * -> host disabled PV EOI.
1440 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
1441 * -> host enabled PV EOI, guest did not execute EOI yet.
1442 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
1443 * -> host enabled PV EOI, guest executed EOI.
1445 BUG_ON(!pv_eoi_enabled(vcpu));
1446 pending = pv_eoi_get_pending(vcpu);
1448 * Clear pending bit in any case: it will be set again on vmentry.
1449 * While this might not be ideal from performance point of view,
1450 * this makes sure pv eoi is only enabled when we know it's safe.
1452 pv_eoi_clr_pending(vcpu);
1455 vector = apic_set_eoi(apic);
1456 trace_kvm_pv_eoi(apic, vector);
1459 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1464 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
1465 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
1467 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1470 vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
1471 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
1472 kunmap_atomic(vapic);
1474 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1478 * apic_sync_pv_eoi_to_guest - called before vmentry
1480 * Detect whether it's safe to enable PV EOI and
1483 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
1484 struct kvm_lapic *apic)
1486 if (!pv_eoi_enabled(vcpu) ||
1487 /* IRR set or many bits in ISR: could be nested. */
1488 apic->irr_pending ||
1489 /* Cache not set: could be safe but we don't bother. */
1490 apic->highest_isr_cache == -1 ||
1491 /* Need EOI to update ioapic. */
1492 kvm_ioapic_handles_vector(vcpu->kvm, apic->highest_isr_cache)) {
1494 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
1495 * so we need not do anything here.
1500 pv_eoi_set_pending(apic->vcpu);
1503 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1506 int max_irr, max_isr;
1507 struct kvm_lapic *apic = vcpu->arch.apic;
1510 apic_sync_pv_eoi_to_guest(vcpu, apic);
1512 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1515 tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff;
1516 max_irr = apic_find_highest_irr(apic);
1519 max_isr = apic_find_highest_isr(apic);
1522 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1524 vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
1525 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
1526 kunmap_atomic(vapic);
1529 void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1531 vcpu->arch.apic->vapic_addr = vapic_addr;
1533 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1535 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1538 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1540 struct kvm_lapic *apic = vcpu->arch.apic;
1541 u32 reg = (msr - APIC_BASE_MSR) << 4;
1543 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1546 /* if this is ICR write vector before command */
1548 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1549 return apic_reg_write(apic, reg, (u32)data);
1552 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
1554 struct kvm_lapic *apic = vcpu->arch.apic;
1555 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
1557 if (!irqchip_in_kernel(vcpu->kvm) || !apic_x2apic_mode(apic))
1560 if (apic_reg_read(apic, reg, 4, &low))
1563 apic_reg_read(apic, APIC_ICR2, 4, &high);
1565 *data = (((u64)high) << 32) | low;
1570 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
1572 struct kvm_lapic *apic = vcpu->arch.apic;
1574 if (!kvm_vcpu_has_lapic(vcpu))
1577 /* if this is ICR write vector before command */
1578 if (reg == APIC_ICR)
1579 apic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
1580 return apic_reg_write(apic, reg, (u32)data);
1583 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1585 struct kvm_lapic *apic = vcpu->arch.apic;
1588 if (!kvm_vcpu_has_lapic(vcpu))
1591 if (apic_reg_read(apic, reg, 4, &low))
1593 if (reg == APIC_ICR)
1594 apic_reg_read(apic, APIC_ICR2, 4, &high);
1596 *data = (((u64)high) << 32) | low;
1601 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1603 u64 addr = data & ~KVM_MSR_ENABLED;
1604 if (!IS_ALIGNED(addr, 4))
1607 vcpu->arch.pv_eoi.msr_val = data;
1608 if (!pv_eoi_enabled(vcpu))
1610 return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1614 void kvm_lapic_init(void)
1616 /* do not patch jump label more than once per second */
1617 jump_label_rate_limit(&apic_hw_disabled, HZ);
1618 jump_label_rate_limit(&apic_sw_disabled, HZ);