1 // SPDX-License-Identifier: GPL-2.0-only
4 * Local APIC virtualization
6 * Copyright (C) 2006 Qumranet, Inc.
7 * Copyright (C) 2007 Novell
8 * Copyright (C) 2007 Intel
9 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
12 * Dor Laor <dor.laor@qumranet.com>
13 * Gregory Haskins <ghaskins@novell.com>
14 * Yaozu (Eddie) Dong <eddie.dong@intel.com>
16 * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
19 #include <linux/kvm_host.h>
20 #include <linux/kvm.h>
22 #include <linux/highmem.h>
23 #include <linux/smp.h>
24 #include <linux/hrtimer.h>
26 #include <linux/export.h>
27 #include <linux/math64.h>
28 #include <linux/slab.h>
29 #include <asm/processor.h>
32 #include <asm/current.h>
33 #include <asm/apicdef.h>
34 #include <asm/delay.h>
35 #include <linux/atomic.h>
36 #include <linux/jump_label.h>
37 #include "kvm_cache_regs.h"
46 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
48 #define mod_64(x, y) ((x) % (y))
56 /* 14 is the version for Xeon and Pentium 8.4.8*/
57 #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16))
58 #define LAPIC_MMIO_LENGTH (1 << 12)
59 /* followed define is not in apicdef.h */
60 #define MAX_APIC_VECTOR 256
61 #define APIC_VECTORS_PER_REG 32
63 static bool lapic_timer_advance_dynamic __read_mostly;
64 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100 /* clock cycles */
65 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000 /* clock cycles */
66 #define LAPIC_TIMER_ADVANCE_NS_INIT 1000
67 #define LAPIC_TIMER_ADVANCE_NS_MAX 5000
68 /* step-by-step approximation to mitigate fluctuation */
69 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
71 static inline int apic_test_vector(int vec, void *bitmap)
73 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
76 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
78 struct kvm_lapic *apic = vcpu->arch.apic;
80 return apic_test_vector(vector, apic->regs + APIC_ISR) ||
81 apic_test_vector(vector, apic->regs + APIC_IRR);
84 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
86 return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
89 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
91 return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
94 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
95 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
97 static inline int apic_enabled(struct kvm_lapic *apic)
99 return kvm_apic_sw_enabled(apic) && kvm_apic_hw_enabled(apic);
103 (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
106 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
107 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
109 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
111 return apic->vcpu->vcpu_id;
114 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
116 return pi_inject_timer && kvm_vcpu_apicv_active(vcpu);
119 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
121 return kvm_x86_ops.set_hv_timer
122 && !(kvm_mwait_in_guest(vcpu->kvm) ||
123 kvm_can_post_timer_interrupt(vcpu));
125 EXPORT_SYMBOL_GPL(kvm_can_use_hv_timer);
127 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
129 return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
132 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
133 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
135 case KVM_APIC_MODE_X2APIC: {
136 u32 offset = (dest_id >> 16) * 16;
137 u32 max_apic_id = map->max_apic_id;
139 if (offset <= max_apic_id) {
140 u8 cluster_size = min(max_apic_id - offset + 1, 16U);
142 offset = array_index_nospec(offset, map->max_apic_id + 1);
143 *cluster = &map->phys_map[offset];
144 *mask = dest_id & (0xffff >> (16 - cluster_size));
151 case KVM_APIC_MODE_XAPIC_FLAT:
152 *cluster = map->xapic_flat_map;
153 *mask = dest_id & 0xff;
155 case KVM_APIC_MODE_XAPIC_CLUSTER:
156 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
157 *mask = dest_id & 0xf;
165 static void kvm_apic_map_free(struct rcu_head *rcu)
167 struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
173 * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
175 * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
176 * apic_map_lock_held.
184 void kvm_recalculate_apic_map(struct kvm *kvm)
186 struct kvm_apic_map *new, *old = NULL;
187 struct kvm_vcpu *vcpu;
189 u32 max_id = 255; /* enough space for any xAPIC ID */
191 /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
192 if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
195 WARN_ONCE(!irqchip_in_kernel(kvm),
196 "Dirty APIC map without an in-kernel local APIC");
198 mutex_lock(&kvm->arch.apic_map_lock);
200 * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map
201 * (if clean) or the APIC registers (if dirty).
203 if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
204 DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
205 /* Someone else has updated the map. */
206 mutex_unlock(&kvm->arch.apic_map_lock);
210 kvm_for_each_vcpu(i, vcpu, kvm)
211 if (kvm_apic_present(vcpu))
212 max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
214 new = kvzalloc(sizeof(struct kvm_apic_map) +
215 sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
221 new->max_apic_id = max_id;
223 kvm_for_each_vcpu(i, vcpu, kvm) {
224 struct kvm_lapic *apic = vcpu->arch.apic;
225 struct kvm_lapic **cluster;
231 if (!kvm_apic_present(vcpu))
234 xapic_id = kvm_xapic_id(apic);
235 x2apic_id = kvm_x2apic_id(apic);
237 /* Hotplug hack: see kvm_apic_match_physical_addr(), ... */
238 if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
239 x2apic_id <= new->max_apic_id)
240 new->phys_map[x2apic_id] = apic;
242 * ... xAPIC ID of VCPUs with APIC ID > 0xff will wrap-around,
243 * prevent them from masking VCPUs with APIC ID <= 0xff.
245 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
246 new->phys_map[xapic_id] = apic;
248 if (!kvm_apic_sw_enabled(apic))
251 ldr = kvm_lapic_get_reg(apic, APIC_LDR);
253 if (apic_x2apic_mode(apic)) {
254 new->mode |= KVM_APIC_MODE_X2APIC;
256 ldr = GET_APIC_LOGICAL_ID(ldr);
257 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
258 new->mode |= KVM_APIC_MODE_XAPIC_FLAT;
260 new->mode |= KVM_APIC_MODE_XAPIC_CLUSTER;
263 if (!kvm_apic_map_get_logical_dest(new, ldr, &cluster, &mask))
267 cluster[ffs(mask) - 1] = apic;
270 old = rcu_dereference_protected(kvm->arch.apic_map,
271 lockdep_is_held(&kvm->arch.apic_map_lock));
272 rcu_assign_pointer(kvm->arch.apic_map, new);
274 * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
275 * If another update has come in, leave it DIRTY.
277 atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
278 UPDATE_IN_PROGRESS, CLEAN);
279 mutex_unlock(&kvm->arch.apic_map_lock);
282 call_rcu(&old->rcu, kvm_apic_map_free);
284 kvm_make_scan_ioapic_request(kvm);
287 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
289 bool enabled = val & APIC_SPIV_APIC_ENABLED;
291 kvm_lapic_set_reg(apic, APIC_SPIV, val);
293 if (enabled != apic->sw_enabled) {
294 apic->sw_enabled = enabled;
296 static_branch_slow_dec_deferred(&apic_sw_disabled);
298 static_branch_inc(&apic_sw_disabled.key);
300 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
303 /* Check if there are APF page ready requests pending */
305 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
308 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
310 kvm_lapic_set_reg(apic, APIC_ID, id << 24);
311 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
314 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
316 kvm_lapic_set_reg(apic, APIC_LDR, id);
317 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
320 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
322 kvm_lapic_set_reg(apic, APIC_DFR, val);
323 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
326 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
328 return ((id >> 4) << 16) | (1 << (id & 0xf));
331 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
333 u32 ldr = kvm_apic_calc_x2apic_ldr(id);
335 WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
337 kvm_lapic_set_reg(apic, APIC_ID, id);
338 kvm_lapic_set_reg(apic, APIC_LDR, ldr);
339 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
342 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
344 return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
347 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
349 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
352 static inline int apic_lvtt_period(struct kvm_lapic *apic)
354 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
357 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
359 return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
362 static inline int apic_lvt_nmi_mode(u32 lvt_val)
364 return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
367 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
369 struct kvm_lapic *apic = vcpu->arch.apic;
370 u32 v = APIC_VERSION;
372 if (!lapic_in_kernel(vcpu))
376 * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
377 * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
378 * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
379 * version first and level-triggered interrupts never get EOIed in
382 if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
383 !ioapic_in_kernel(vcpu->kvm))
384 v |= APIC_LVR_DIRECTED_EOI;
385 kvm_lapic_set_reg(apic, APIC_LVR, v);
388 static const unsigned int apic_lvt_mask[KVM_APIC_LVT_NUM] = {
389 LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */
390 LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */
391 LVT_MASK | APIC_MODE_MASK, /* LVTPC */
392 LINT_MASK, LINT_MASK, /* LVT0-1 */
393 LVT_MASK /* LVTERR */
396 static int find_highest_vector(void *bitmap)
401 for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
402 vec >= 0; vec -= APIC_VECTORS_PER_REG) {
403 reg = bitmap + REG_POS(vec);
405 return __fls(*reg) + vec;
411 static u8 count_vectors(void *bitmap)
417 for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
418 reg = bitmap + REG_POS(vec);
419 count += hweight32(*reg);
425 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
428 u32 pir_val, irr_val, prev_irr_val;
431 max_updated_irr = -1;
434 for (i = vec = 0; i <= 7; i++, vec += 32) {
435 pir_val = READ_ONCE(pir[i]);
436 irr_val = *((u32 *)(regs + APIC_IRR + i * 0x10));
438 prev_irr_val = irr_val;
439 irr_val |= xchg(&pir[i], 0);
440 *((u32 *)(regs + APIC_IRR + i * 0x10)) = irr_val;
441 if (prev_irr_val != irr_val) {
443 __fls(irr_val ^ prev_irr_val) + vec;
447 *max_irr = __fls(irr_val) + vec;
450 return ((max_updated_irr != -1) &&
451 (max_updated_irr == *max_irr));
453 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
455 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
457 struct kvm_lapic *apic = vcpu->arch.apic;
459 return __kvm_apic_update_irr(pir, apic->regs, max_irr);
461 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
463 static inline int apic_search_irr(struct kvm_lapic *apic)
465 return find_highest_vector(apic->regs + APIC_IRR);
468 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
473 * Note that irr_pending is just a hint. It will be always
474 * true with virtual interrupt delivery enabled.
476 if (!apic->irr_pending)
479 result = apic_search_irr(apic);
480 ASSERT(result == -1 || result >= 16);
485 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
487 struct kvm_vcpu *vcpu;
491 if (unlikely(vcpu->arch.apicv_active)) {
492 /* need to update RVI */
493 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
494 static_call(kvm_x86_hwapic_irr_update)(vcpu,
495 apic_find_highest_irr(apic));
497 apic->irr_pending = false;
498 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
499 if (apic_search_irr(apic) != -1)
500 apic->irr_pending = true;
504 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
506 apic_clear_irr(vec, vcpu->arch.apic);
508 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
510 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
512 struct kvm_vcpu *vcpu;
514 if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
520 * With APIC virtualization enabled, all caching is disabled
521 * because the processor can modify ISR under the hood. Instead
524 if (unlikely(vcpu->arch.apicv_active))
525 static_call(kvm_x86_hwapic_isr_update)(vcpu, vec);
528 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
530 * ISR (in service register) bit is set when injecting an interrupt.
531 * The highest vector is injected. Thus the latest bit set matches
532 * the highest bit in ISR.
534 apic->highest_isr_cache = vec;
538 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
543 * Note that isr_count is always 1, and highest_isr_cache
544 * is always -1, with APIC virtualization enabled.
546 if (!apic->isr_count)
548 if (likely(apic->highest_isr_cache != -1))
549 return apic->highest_isr_cache;
551 result = find_highest_vector(apic->regs + APIC_ISR);
552 ASSERT(result == -1 || result >= 16);
557 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
559 struct kvm_vcpu *vcpu;
560 if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
566 * We do get here for APIC virtualization enabled if the guest
567 * uses the Hyper-V APIC enlightenment. In this case we may need
568 * to trigger a new interrupt delivery by writing the SVI field;
569 * on the other hand isr_count and highest_isr_cache are unused
570 * and must be left alone.
572 if (unlikely(vcpu->arch.apicv_active))
573 static_call(kvm_x86_hwapic_isr_update)(vcpu,
574 apic_find_highest_isr(apic));
577 BUG_ON(apic->isr_count < 0);
578 apic->highest_isr_cache = -1;
582 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
584 /* This may race with setting of irr in __apic_accept_irq() and
585 * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
586 * will cause vmexit immediately and the value will be recalculated
587 * on the next vmentry.
589 return apic_find_highest_irr(vcpu->arch.apic);
591 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
593 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
594 int vector, int level, int trig_mode,
595 struct dest_map *dest_map);
597 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
598 struct dest_map *dest_map)
600 struct kvm_lapic *apic = vcpu->arch.apic;
602 return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
603 irq->level, irq->trig_mode, dest_map);
606 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
607 struct kvm_lapic_irq *irq, u32 min)
610 struct kvm_vcpu *vcpu;
612 if (min > map->max_apic_id)
615 for_each_set_bit(i, ipi_bitmap,
616 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
617 if (map->phys_map[min + i]) {
618 vcpu = map->phys_map[min + i]->vcpu;
619 count += kvm_apic_set_irq(vcpu, irq, NULL);
626 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
627 unsigned long ipi_bitmap_high, u32 min,
628 unsigned long icr, int op_64_bit)
630 struct kvm_apic_map *map;
631 struct kvm_lapic_irq irq = {0};
632 int cluster_size = op_64_bit ? 64 : 32;
635 if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
638 irq.vector = icr & APIC_VECTOR_MASK;
639 irq.delivery_mode = icr & APIC_MODE_MASK;
640 irq.level = (icr & APIC_INT_ASSERT) != 0;
641 irq.trig_mode = icr & APIC_INT_LEVELTRIG;
644 map = rcu_dereference(kvm->arch.apic_map);
648 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
650 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
657 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
660 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
664 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
667 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
671 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
673 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
676 static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
679 if (pv_eoi_get_user(vcpu, &val) < 0) {
680 printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
681 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
684 return val & KVM_PV_EOI_ENABLED;
687 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
689 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
690 printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
691 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
694 __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
697 static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
699 if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
700 printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
701 (unsigned long long)vcpu->arch.pv_eoi.msr_val);
704 __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
707 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
710 if (apic->vcpu->arch.apicv_active)
711 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
713 highest_irr = apic_find_highest_irr(apic);
714 if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
719 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
721 u32 tpr, isrv, ppr, old_ppr;
724 old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
725 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
726 isr = apic_find_highest_isr(apic);
727 isrv = (isr != -1) ? isr : 0;
729 if ((tpr & 0xf0) >= (isrv & 0xf0))
736 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
738 return ppr < old_ppr;
741 static void apic_update_ppr(struct kvm_lapic *apic)
745 if (__apic_update_ppr(apic, &ppr) &&
746 apic_has_interrupt_for_ppr(apic, ppr) != -1)
747 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
750 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
752 apic_update_ppr(vcpu->arch.apic);
754 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
756 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
758 kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
759 apic_update_ppr(apic);
762 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
764 return mda == (apic_x2apic_mode(apic) ?
765 X2APIC_BROADCAST : APIC_BROADCAST);
768 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
770 if (kvm_apic_broadcast(apic, mda))
773 if (apic_x2apic_mode(apic))
774 return mda == kvm_x2apic_id(apic);
777 * Hotplug hack: Make LAPIC in xAPIC mode also accept interrupts as if
778 * it were in x2APIC mode. Hotplugged VCPUs start in xAPIC mode and
779 * this allows unique addressing of VCPUs with APIC ID over 0xff.
780 * The 0xff condition is needed because writeable xAPIC ID.
782 if (kvm_x2apic_id(apic) > 0xff && mda == kvm_x2apic_id(apic))
785 return mda == kvm_xapic_id(apic);
788 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
792 if (kvm_apic_broadcast(apic, mda))
795 logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
797 if (apic_x2apic_mode(apic))
798 return ((logical_id >> 16) == (mda >> 16))
799 && (logical_id & mda & 0xffff) != 0;
801 logical_id = GET_APIC_LOGICAL_ID(logical_id);
803 switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
805 return (logical_id & mda) != 0;
806 case APIC_DFR_CLUSTER:
807 return ((logical_id >> 4) == (mda >> 4))
808 && (logical_id & mda & 0xf) != 0;
814 /* The KVM local APIC implementation has two quirks:
816 * - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
817 * in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
818 * KVM doesn't do that aliasing.
820 * - in-kernel IOAPIC messages have to be delivered directly to
821 * x2APIC, because the kernel does not support interrupt remapping.
822 * In order to support broadcast without interrupt remapping, x2APIC
823 * rewrites the destination of non-IPI messages from APIC_BROADCAST
824 * to X2APIC_BROADCAST.
826 * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API. This is
827 * important when userspace wants to use x2APIC-format MSIs, because
828 * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
830 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
831 struct kvm_lapic *source, struct kvm_lapic *target)
833 bool ipi = source != NULL;
835 if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
836 !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
837 return X2APIC_BROADCAST;
842 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
843 int shorthand, unsigned int dest, int dest_mode)
845 struct kvm_lapic *target = vcpu->arch.apic;
846 u32 mda = kvm_apic_mda(vcpu, dest, source, target);
850 case APIC_DEST_NOSHORT:
851 if (dest_mode == APIC_DEST_PHYSICAL)
852 return kvm_apic_match_physical_addr(target, mda);
854 return kvm_apic_match_logical_addr(target, mda);
856 return target == source;
857 case APIC_DEST_ALLINC:
859 case APIC_DEST_ALLBUT:
860 return target != source;
865 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
867 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
868 const unsigned long *bitmap, u32 bitmap_size)
873 mod = vector % dest_vcpus;
875 for (i = 0; i <= mod; i++) {
876 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
877 BUG_ON(idx == bitmap_size);
883 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
885 if (!kvm->arch.disabled_lapic_found) {
886 kvm->arch.disabled_lapic_found = true;
888 "Disabled LAPIC found during irq injection\n");
892 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
893 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
895 if (kvm->arch.x2apic_broadcast_quirk_disabled) {
896 if ((irq->dest_id == APIC_BROADCAST &&
897 map->mode != KVM_APIC_MODE_X2APIC))
899 if (irq->dest_id == X2APIC_BROADCAST)
902 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
903 if (irq->dest_id == (x2apic_ipi ?
904 X2APIC_BROADCAST : APIC_BROADCAST))
911 /* Return true if the interrupt can be handled by using *bitmap as index mask
912 * for valid destinations in *dst array.
913 * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
914 * Note: we may have zero kvm_lapic destinations when we return true, which
915 * means that the interrupt should be dropped. In this case, *bitmap would be
916 * zero and *dst undefined.
918 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
919 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
920 struct kvm_apic_map *map, struct kvm_lapic ***dst,
921 unsigned long *bitmap)
925 if (irq->shorthand == APIC_DEST_SELF && src) {
929 } else if (irq->shorthand)
932 if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
935 if (irq->dest_mode == APIC_DEST_PHYSICAL) {
936 if (irq->dest_id > map->max_apic_id) {
939 u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
940 *dst = &map->phys_map[dest_id];
947 if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
951 if (!kvm_lowest_prio_delivery(irq))
954 if (!kvm_vector_hashing_enabled()) {
956 for_each_set_bit(i, bitmap, 16) {
961 else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
962 (*dst)[lowest]->vcpu) < 0)
969 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
972 if (!(*dst)[lowest]) {
973 kvm_apic_disabled_lapic_found(kvm);
979 *bitmap = (lowest >= 0) ? 1 << lowest : 0;
984 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
985 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
987 struct kvm_apic_map *map;
988 unsigned long bitmap;
989 struct kvm_lapic **dst = NULL;
995 if (irq->shorthand == APIC_DEST_SELF) {
996 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1001 map = rcu_dereference(kvm->arch.apic_map);
1003 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1006 for_each_set_bit(i, &bitmap, 16) {
1009 *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1018 * This routine tries to handle interrupts in posted mode, here is how
1019 * it deals with different cases:
1020 * - For single-destination interrupts, handle it in posted mode
1021 * - Else if vector hashing is enabled and it is a lowest-priority
1022 * interrupt, handle it in posted mode and use the following mechanism
1023 * to find the destination vCPU.
1024 * 1. For lowest-priority interrupts, store all the possible
1025 * destination vCPUs in an array.
1026 * 2. Use "guest vector % max number of destination vCPUs" to find
1027 * the right destination vCPU in the array for the lowest-priority
1029 * - Otherwise, use remapped mode to inject the interrupt.
1031 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1032 struct kvm_vcpu **dest_vcpu)
1034 struct kvm_apic_map *map;
1035 unsigned long bitmap;
1036 struct kvm_lapic **dst = NULL;
1043 map = rcu_dereference(kvm->arch.apic_map);
1045 if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1046 hweight16(bitmap) == 1) {
1047 unsigned long i = find_first_bit(&bitmap, 16);
1050 *dest_vcpu = dst[i]->vcpu;
1060 * Add a pending IRQ into lapic.
1061 * Return 1 if successfully added and 0 if discarded.
1063 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1064 int vector, int level, int trig_mode,
1065 struct dest_map *dest_map)
1068 struct kvm_vcpu *vcpu = apic->vcpu;
1070 trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1072 switch (delivery_mode) {
1073 case APIC_DM_LOWEST:
1074 vcpu->arch.apic_arb_prio++;
1077 if (unlikely(trig_mode && !level))
1080 /* FIXME add logic for vcpu on reset */
1081 if (unlikely(!apic_enabled(apic)))
1087 __set_bit(vcpu->vcpu_id, dest_map->map);
1088 dest_map->vectors[vcpu->vcpu_id] = vector;
1091 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1093 kvm_lapic_set_vector(vector,
1094 apic->regs + APIC_TMR);
1096 kvm_lapic_clear_vector(vector,
1097 apic->regs + APIC_TMR);
1100 if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
1101 kvm_lapic_set_irr(vector, apic);
1102 kvm_make_request(KVM_REQ_EVENT, vcpu);
1103 kvm_vcpu_kick(vcpu);
1109 vcpu->arch.pv.pv_unhalted = 1;
1110 kvm_make_request(KVM_REQ_EVENT, vcpu);
1111 kvm_vcpu_kick(vcpu);
1116 kvm_make_request(KVM_REQ_SMI, vcpu);
1117 kvm_vcpu_kick(vcpu);
1122 kvm_inject_nmi(vcpu);
1123 kvm_vcpu_kick(vcpu);
1127 if (!trig_mode || level) {
1129 /* assumes that there are only KVM_APIC_INIT/SIPI */
1130 apic->pending_events = (1UL << KVM_APIC_INIT);
1131 kvm_make_request(KVM_REQ_EVENT, vcpu);
1132 kvm_vcpu_kick(vcpu);
1136 case APIC_DM_STARTUP:
1138 apic->sipi_vector = vector;
1139 /* make sure sipi_vector is visible for the receiver */
1141 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1142 kvm_make_request(KVM_REQ_EVENT, vcpu);
1143 kvm_vcpu_kick(vcpu);
1146 case APIC_DM_EXTINT:
1148 * Should only be called by kvm_apic_local_deliver() with LVT0,
1149 * before NMI watchdog was enabled. Already handled by
1150 * kvm_apic_accept_pic_intr().
1155 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1163 * This routine identifies the destination vcpus mask meant to receive the
1164 * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1165 * out the destination vcpus array and set the bitmap or it traverses to
1166 * each available vcpu to identify the same.
1168 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1169 unsigned long *vcpu_bitmap)
1171 struct kvm_lapic **dest_vcpu = NULL;
1172 struct kvm_lapic *src = NULL;
1173 struct kvm_apic_map *map;
1174 struct kvm_vcpu *vcpu;
1175 unsigned long bitmap;
1180 map = rcu_dereference(kvm->arch.apic_map);
1182 ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1185 for_each_set_bit(i, &bitmap, 16) {
1188 vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1189 __set_bit(vcpu_idx, vcpu_bitmap);
1192 kvm_for_each_vcpu(i, vcpu, kvm) {
1193 if (!kvm_apic_present(vcpu))
1195 if (!kvm_apic_match_dest(vcpu, NULL,
1200 __set_bit(i, vcpu_bitmap);
1206 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1208 return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1211 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1213 return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1216 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1220 /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1221 if (!kvm_ioapic_handles_vector(apic, vector))
1224 /* Request a KVM exit to inform the userspace IOAPIC. */
1225 if (irqchip_split(apic->vcpu->kvm)) {
1226 apic->vcpu->arch.pending_ioapic_eoi = vector;
1227 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1231 if (apic_test_vector(vector, apic->regs + APIC_TMR))
1232 trigger_mode = IOAPIC_LEVEL_TRIG;
1234 trigger_mode = IOAPIC_EDGE_TRIG;
1236 kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1239 static int apic_set_eoi(struct kvm_lapic *apic)
1241 int vector = apic_find_highest_isr(apic);
1243 trace_kvm_eoi(apic, vector);
1246 * Not every write EOI will has corresponding ISR,
1247 * one example is when Kernel check timer on setup_IO_APIC
1252 apic_clear_isr(vector, apic);
1253 apic_update_ppr(apic);
1255 if (to_hv_vcpu(apic->vcpu) &&
1256 test_bit(vector, to_hv_synic(apic->vcpu)->vec_bitmap))
1257 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1259 kvm_ioapic_send_eoi(apic, vector);
1260 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1265 * this interface assumes a trap-like exit, which has already finished
1266 * desired side effect including vISR and vPPR update.
1268 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1270 struct kvm_lapic *apic = vcpu->arch.apic;
1272 trace_kvm_eoi(apic, vector);
1274 kvm_ioapic_send_eoi(apic, vector);
1275 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1277 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1279 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1281 struct kvm_lapic_irq irq;
1283 irq.vector = icr_low & APIC_VECTOR_MASK;
1284 irq.delivery_mode = icr_low & APIC_MODE_MASK;
1285 irq.dest_mode = icr_low & APIC_DEST_MASK;
1286 irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1287 irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1288 irq.shorthand = icr_low & APIC_SHORT_MASK;
1289 irq.msi_redir_hint = false;
1290 if (apic_x2apic_mode(apic))
1291 irq.dest_id = icr_high;
1293 irq.dest_id = GET_APIC_DEST_FIELD(icr_high);
1295 trace_kvm_apic_ipi(icr_low, irq.dest_id);
1297 kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1300 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1302 ktime_t remaining, now;
1306 ASSERT(apic != NULL);
1308 /* if initial count is 0, current count should also be 0 */
1309 if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1310 apic->lapic_timer.period == 0)
1314 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1315 if (ktime_to_ns(remaining) < 0)
1318 ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1319 tmcct = div64_u64(ns,
1320 (APIC_BUS_CYCLE_NS * apic->divide_count));
1325 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1327 struct kvm_vcpu *vcpu = apic->vcpu;
1328 struct kvm_run *run = vcpu->run;
1330 kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1331 run->tpr_access.rip = kvm_rip_read(vcpu);
1332 run->tpr_access.is_write = write;
1335 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1337 if (apic->vcpu->arch.tpr_access_reporting)
1338 __report_tpr_access(apic, write);
1341 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1345 if (offset >= LAPIC_MMIO_LENGTH)
1352 case APIC_TMCCT: /* Timer CCR */
1353 if (apic_lvtt_tscdeadline(apic))
1356 val = apic_get_tmcct(apic);
1359 apic_update_ppr(apic);
1360 val = kvm_lapic_get_reg(apic, offset);
1363 report_tpr_access(apic, false);
1366 val = kvm_lapic_get_reg(apic, offset);
1373 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1375 return container_of(dev, struct kvm_lapic, dev);
1378 #define APIC_REG_MASK(reg) (1ull << ((reg) >> 4))
1379 #define APIC_REGS_MASK(first, count) \
1380 (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1382 int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1385 unsigned char alignment = offset & 0xf;
1387 /* this bitmask has a bit cleared for each reserved register */
1388 u64 valid_reg_mask =
1389 APIC_REG_MASK(APIC_ID) |
1390 APIC_REG_MASK(APIC_LVR) |
1391 APIC_REG_MASK(APIC_TASKPRI) |
1392 APIC_REG_MASK(APIC_PROCPRI) |
1393 APIC_REG_MASK(APIC_LDR) |
1394 APIC_REG_MASK(APIC_DFR) |
1395 APIC_REG_MASK(APIC_SPIV) |
1396 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1397 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1398 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1399 APIC_REG_MASK(APIC_ESR) |
1400 APIC_REG_MASK(APIC_ICR) |
1401 APIC_REG_MASK(APIC_ICR2) |
1402 APIC_REG_MASK(APIC_LVTT) |
1403 APIC_REG_MASK(APIC_LVTTHMR) |
1404 APIC_REG_MASK(APIC_LVTPC) |
1405 APIC_REG_MASK(APIC_LVT0) |
1406 APIC_REG_MASK(APIC_LVT1) |
1407 APIC_REG_MASK(APIC_LVTERR) |
1408 APIC_REG_MASK(APIC_TMICT) |
1409 APIC_REG_MASK(APIC_TMCCT) |
1410 APIC_REG_MASK(APIC_TDCR);
1412 /* ARBPRI is not valid on x2APIC */
1413 if (!apic_x2apic_mode(apic))
1414 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
1416 if (alignment + len > 4)
1419 if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
1422 result = __apic_read(apic, offset & ~0xf);
1424 trace_kvm_apic_read(offset, result);
1430 memcpy(data, (char *)&result + alignment, len);
1433 printk(KERN_ERR "Local APIC read with len = %x, "
1434 "should be 1,2, or 4 instead\n", len);
1439 EXPORT_SYMBOL_GPL(kvm_lapic_reg_read);
1441 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1443 return addr >= apic->base_address &&
1444 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1447 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1448 gpa_t address, int len, void *data)
1450 struct kvm_lapic *apic = to_lapic(this);
1451 u32 offset = address - apic->base_address;
1453 if (!apic_mmio_in_range(apic, address))
1456 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1457 if (!kvm_check_has_quirk(vcpu->kvm,
1458 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1461 memset(data, 0xff, len);
1465 kvm_lapic_reg_read(apic, offset, len, data);
1470 static void update_divide_count(struct kvm_lapic *apic)
1472 u32 tmp1, tmp2, tdcr;
1474 tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1476 tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1477 apic->divide_count = 0x1 << (tmp2 & 0x7);
1480 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1483 * Do not allow the guest to program periodic timers with small
1484 * interval, since the hrtimers are not throttled by the host
1487 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1488 s64 min_period = min_timer_period_us * 1000LL;
1490 if (apic->lapic_timer.period < min_period) {
1491 pr_info_ratelimited(
1492 "kvm: vcpu %i: requested %lld ns "
1493 "lapic timer period limited to %lld ns\n",
1494 apic->vcpu->vcpu_id,
1495 apic->lapic_timer.period, min_period);
1496 apic->lapic_timer.period = min_period;
1501 static void cancel_hv_timer(struct kvm_lapic *apic);
1503 static void cancel_apic_timer(struct kvm_lapic *apic)
1505 hrtimer_cancel(&apic->lapic_timer.timer);
1507 if (apic->lapic_timer.hv_timer_in_use)
1508 cancel_hv_timer(apic);
1512 static void apic_update_lvtt(struct kvm_lapic *apic)
1514 u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1515 apic->lapic_timer.timer_mode_mask;
1517 if (apic->lapic_timer.timer_mode != timer_mode) {
1518 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1519 APIC_LVT_TIMER_TSCDEADLINE)) {
1520 cancel_apic_timer(apic);
1521 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1522 apic->lapic_timer.period = 0;
1523 apic->lapic_timer.tscdeadline = 0;
1525 apic->lapic_timer.timer_mode = timer_mode;
1526 limit_periodic_timer_frequency(apic);
1531 * On APICv, this test will cause a busy wait
1532 * during a higher-priority task.
1535 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1537 struct kvm_lapic *apic = vcpu->arch.apic;
1538 u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1540 if (kvm_apic_hw_enabled(apic)) {
1541 int vec = reg & APIC_VECTOR_MASK;
1542 void *bitmap = apic->regs + APIC_ISR;
1544 if (vcpu->arch.apicv_active)
1545 bitmap = apic->regs + APIC_IRR;
1547 if (apic_test_vector(vec, bitmap))
1553 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1555 u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1558 * If the guest TSC is running at a different ratio than the host, then
1559 * convert the delay to nanoseconds to achieve an accurate delay. Note
1560 * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1561 * always for VMX enabled hardware.
1563 if (vcpu->arch.tsc_scaling_ratio == kvm_default_tsc_scaling_ratio) {
1564 __delay(min(guest_cycles,
1565 nsec_to_cycles(vcpu, timer_advance_ns)));
1567 u64 delay_ns = guest_cycles * 1000000ULL;
1568 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1569 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1573 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1574 s64 advance_expire_delta)
1576 struct kvm_lapic *apic = vcpu->arch.apic;
1577 u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1580 /* Do not adjust for tiny fluctuations or large random spikes. */
1581 if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1582 abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1586 if (advance_expire_delta < 0) {
1587 ns = -advance_expire_delta * 1000000ULL;
1588 do_div(ns, vcpu->arch.virtual_tsc_khz);
1589 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1592 ns = advance_expire_delta * 1000000ULL;
1593 do_div(ns, vcpu->arch.virtual_tsc_khz);
1594 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1597 if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1598 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1599 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1602 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1604 struct kvm_lapic *apic = vcpu->arch.apic;
1605 u64 guest_tsc, tsc_deadline;
1607 tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1608 apic->lapic_timer.expired_tscdeadline = 0;
1609 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1610 apic->lapic_timer.advance_expire_delta = guest_tsc - tsc_deadline;
1612 if (lapic_timer_advance_dynamic) {
1613 adjust_lapic_timer_advance(vcpu, apic->lapic_timer.advance_expire_delta);
1615 * If the timer fired early, reread the TSC to account for the
1616 * overhead of the above adjustment to avoid waiting longer
1617 * than is necessary.
1619 if (guest_tsc < tsc_deadline)
1620 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1623 if (guest_tsc < tsc_deadline)
1624 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1627 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1629 if (lapic_in_kernel(vcpu) &&
1630 vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1631 vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1632 lapic_timer_int_injected(vcpu))
1633 __kvm_wait_lapic_expire(vcpu);
1635 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1637 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1639 struct kvm_timer *ktimer = &apic->lapic_timer;
1641 kvm_apic_local_deliver(apic, APIC_LVTT);
1642 if (apic_lvtt_tscdeadline(apic)) {
1643 ktimer->tscdeadline = 0;
1644 } else if (apic_lvtt_oneshot(apic)) {
1645 ktimer->tscdeadline = 0;
1646 ktimer->target_expiration = 0;
1650 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1652 struct kvm_vcpu *vcpu = apic->vcpu;
1653 struct kvm_timer *ktimer = &apic->lapic_timer;
1655 if (atomic_read(&apic->lapic_timer.pending))
1658 if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1659 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1661 if (!from_timer_fn && vcpu->arch.apicv_active) {
1662 WARN_ON(kvm_get_running_vcpu() != vcpu);
1663 kvm_apic_inject_pending_timer_irqs(apic);
1667 if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1669 * Ensure the guest's timer has truly expired before posting an
1670 * interrupt. Open code the relevant checks to avoid querying
1671 * lapic_timer_int_injected(), which will be false since the
1672 * interrupt isn't yet injected. Waiting until after injecting
1673 * is not an option since that won't help a posted interrupt.
1675 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1676 vcpu->arch.apic->lapic_timer.timer_advance_ns)
1677 __kvm_wait_lapic_expire(vcpu);
1678 kvm_apic_inject_pending_timer_irqs(apic);
1682 atomic_inc(&apic->lapic_timer.pending);
1683 kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1685 kvm_vcpu_kick(vcpu);
1688 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1690 struct kvm_timer *ktimer = &apic->lapic_timer;
1691 u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1694 struct kvm_vcpu *vcpu = apic->vcpu;
1695 unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1696 unsigned long flags;
1699 if (unlikely(!tscdeadline || !this_tsc_khz))
1702 local_irq_save(flags);
1705 guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1707 ns = (tscdeadline - guest_tsc) * 1000000ULL;
1708 do_div(ns, this_tsc_khz);
1710 if (likely(tscdeadline > guest_tsc) &&
1711 likely(ns > apic->lapic_timer.timer_advance_ns)) {
1712 expire = ktime_add_ns(now, ns);
1713 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1714 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1716 apic_timer_expired(apic, false);
1718 local_irq_restore(flags);
1721 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1723 return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1726 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1728 ktime_t now, remaining;
1729 u64 ns_remaining_old, ns_remaining_new;
1731 apic->lapic_timer.period =
1732 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1733 limit_periodic_timer_frequency(apic);
1736 remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1737 if (ktime_to_ns(remaining) < 0)
1740 ns_remaining_old = ktime_to_ns(remaining);
1741 ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1742 apic->divide_count, old_divisor);
1744 apic->lapic_timer.tscdeadline +=
1745 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1746 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1747 apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1750 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1757 apic->lapic_timer.period =
1758 tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1760 if (!apic->lapic_timer.period) {
1761 apic->lapic_timer.tscdeadline = 0;
1765 limit_periodic_timer_frequency(apic);
1766 deadline = apic->lapic_timer.period;
1768 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
1769 if (unlikely(count_reg != APIC_TMICT)) {
1770 deadline = tmict_to_ns(apic,
1771 kvm_lapic_get_reg(apic, count_reg));
1772 if (unlikely(deadline <= 0))
1773 deadline = apic->lapic_timer.period;
1774 else if (unlikely(deadline > apic->lapic_timer.period)) {
1775 pr_info_ratelimited(
1776 "kvm: vcpu %i: requested lapic timer restore with "
1777 "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
1778 "Using initial count to start timer.\n",
1779 apic->vcpu->vcpu_id,
1781 kvm_lapic_get_reg(apic, count_reg),
1782 deadline, apic->lapic_timer.period);
1783 kvm_lapic_set_reg(apic, count_reg, 0);
1784 deadline = apic->lapic_timer.period;
1789 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1790 nsec_to_cycles(apic->vcpu, deadline);
1791 apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
1796 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
1798 ktime_t now = ktime_get();
1803 * Synchronize both deadlines to the same time source or
1804 * differences in the periods (caused by differences in the
1805 * underlying clocks or numerical approximation errors) will
1806 * cause the two to drift apart over time as the errors
1809 apic->lapic_timer.target_expiration =
1810 ktime_add_ns(apic->lapic_timer.target_expiration,
1811 apic->lapic_timer.period);
1812 delta = ktime_sub(apic->lapic_timer.target_expiration, now);
1813 apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
1814 nsec_to_cycles(apic->vcpu, delta);
1817 static void start_sw_period(struct kvm_lapic *apic)
1819 if (!apic->lapic_timer.period)
1822 if (ktime_after(ktime_get(),
1823 apic->lapic_timer.target_expiration)) {
1824 apic_timer_expired(apic, false);
1826 if (apic_lvtt_oneshot(apic))
1829 advance_periodic_target_expiration(apic);
1832 hrtimer_start(&apic->lapic_timer.timer,
1833 apic->lapic_timer.target_expiration,
1834 HRTIMER_MODE_ABS_HARD);
1837 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
1839 if (!lapic_in_kernel(vcpu))
1842 return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
1844 EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
1846 static void cancel_hv_timer(struct kvm_lapic *apic)
1848 WARN_ON(preemptible());
1849 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1850 static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
1851 apic->lapic_timer.hv_timer_in_use = false;
1854 static bool start_hv_timer(struct kvm_lapic *apic)
1856 struct kvm_timer *ktimer = &apic->lapic_timer;
1857 struct kvm_vcpu *vcpu = apic->vcpu;
1860 WARN_ON(preemptible());
1861 if (!kvm_can_use_hv_timer(vcpu))
1864 if (!ktimer->tscdeadline)
1867 if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
1870 ktimer->hv_timer_in_use = true;
1871 hrtimer_cancel(&ktimer->timer);
1874 * To simplify handling the periodic timer, leave the hv timer running
1875 * even if the deadline timer has expired, i.e. rely on the resulting
1876 * VM-Exit to recompute the periodic timer's target expiration.
1878 if (!apic_lvtt_period(apic)) {
1880 * Cancel the hv timer if the sw timer fired while the hv timer
1881 * was being programmed, or if the hv timer itself expired.
1883 if (atomic_read(&ktimer->pending)) {
1884 cancel_hv_timer(apic);
1885 } else if (expired) {
1886 apic_timer_expired(apic, false);
1887 cancel_hv_timer(apic);
1891 trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
1896 static void start_sw_timer(struct kvm_lapic *apic)
1898 struct kvm_timer *ktimer = &apic->lapic_timer;
1900 WARN_ON(preemptible());
1901 if (apic->lapic_timer.hv_timer_in_use)
1902 cancel_hv_timer(apic);
1903 if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
1906 if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1907 start_sw_period(apic);
1908 else if (apic_lvtt_tscdeadline(apic))
1909 start_sw_tscdeadline(apic);
1910 trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
1913 static void restart_apic_timer(struct kvm_lapic *apic)
1917 if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
1920 if (!start_hv_timer(apic))
1921 start_sw_timer(apic);
1926 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
1928 struct kvm_lapic *apic = vcpu->arch.apic;
1931 /* If the preempt notifier has already run, it also called apic_timer_expired */
1932 if (!apic->lapic_timer.hv_timer_in_use)
1934 WARN_ON(rcuwait_active(&vcpu->wait));
1935 apic_timer_expired(apic, false);
1936 cancel_hv_timer(apic);
1938 if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1939 advance_periodic_target_expiration(apic);
1940 restart_apic_timer(apic);
1945 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
1947 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
1949 restart_apic_timer(vcpu->arch.apic);
1951 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
1953 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
1955 struct kvm_lapic *apic = vcpu->arch.apic;
1958 /* Possibly the TSC deadline timer is not enabled yet */
1959 if (apic->lapic_timer.hv_timer_in_use)
1960 start_sw_timer(apic);
1963 EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
1965 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
1967 struct kvm_lapic *apic = vcpu->arch.apic;
1969 WARN_ON(!apic->lapic_timer.hv_timer_in_use);
1970 restart_apic_timer(apic);
1973 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
1975 atomic_set(&apic->lapic_timer.pending, 0);
1977 if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
1978 && !set_target_expiration(apic, count_reg))
1981 restart_apic_timer(apic);
1984 static void start_apic_timer(struct kvm_lapic *apic)
1986 __start_apic_timer(apic, APIC_TMICT);
1989 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
1991 bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
1993 if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
1994 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
1995 if (lvt0_in_nmi_mode) {
1996 atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
1998 atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2002 int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2006 trace_kvm_apic_write(reg, val);
2009 case APIC_ID: /* Local APIC ID */
2010 if (!apic_x2apic_mode(apic))
2011 kvm_apic_set_xapic_id(apic, val >> 24);
2017 report_tpr_access(apic, true);
2018 apic_set_tpr(apic, val & 0xff);
2026 if (!apic_x2apic_mode(apic))
2027 kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2033 if (!apic_x2apic_mode(apic))
2034 kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2041 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2042 mask |= APIC_SPIV_DIRECTED_EOI;
2043 apic_set_spiv(apic, val & mask);
2044 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2048 for (i = 0; i < KVM_APIC_LVT_NUM; i++) {
2049 lvt_val = kvm_lapic_get_reg(apic,
2050 APIC_LVTT + 0x10 * i);
2051 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i,
2052 lvt_val | APIC_LVT_MASKED);
2054 apic_update_lvtt(apic);
2055 atomic_set(&apic->lapic_timer.pending, 0);
2061 /* No delay here, so we always clear the pending bit */
2063 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2064 kvm_lapic_set_reg(apic, APIC_ICR, val);
2068 if (!apic_x2apic_mode(apic))
2070 kvm_lapic_set_reg(apic, APIC_ICR2, val);
2074 apic_manage_nmi_watchdog(apic, val);
2080 /* TODO: Check vector */
2084 if (!kvm_apic_sw_enabled(apic))
2085 val |= APIC_LVT_MASKED;
2086 size = ARRAY_SIZE(apic_lvt_mask);
2087 index = array_index_nospec(
2088 (reg - APIC_LVTT) >> 4, size);
2089 val &= apic_lvt_mask[index];
2090 kvm_lapic_set_reg(apic, reg, val);
2095 if (!kvm_apic_sw_enabled(apic))
2096 val |= APIC_LVT_MASKED;
2097 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2098 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2099 apic_update_lvtt(apic);
2103 if (apic_lvtt_tscdeadline(apic))
2106 cancel_apic_timer(apic);
2107 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2108 start_apic_timer(apic);
2112 uint32_t old_divisor = apic->divide_count;
2114 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2115 update_divide_count(apic);
2116 if (apic->divide_count != old_divisor &&
2117 apic->lapic_timer.period) {
2118 hrtimer_cancel(&apic->lapic_timer.timer);
2119 update_target_expiration(apic, old_divisor);
2120 restart_apic_timer(apic);
2125 if (apic_x2apic_mode(apic) && val != 0)
2130 if (apic_x2apic_mode(apic)) {
2131 kvm_lapic_reg_write(apic, APIC_ICR,
2132 APIC_DEST_SELF | (val & APIC_VECTOR_MASK));
2141 kvm_recalculate_apic_map(apic->vcpu->kvm);
2145 EXPORT_SYMBOL_GPL(kvm_lapic_reg_write);
2147 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2148 gpa_t address, int len, const void *data)
2150 struct kvm_lapic *apic = to_lapic(this);
2151 unsigned int offset = address - apic->base_address;
2154 if (!apic_mmio_in_range(apic, address))
2157 if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2158 if (!kvm_check_has_quirk(vcpu->kvm,
2159 KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2166 * APIC register must be aligned on 128-bits boundary.
2167 * 32/64/128 bits registers must be accessed thru 32 bits.
2170 if (len != 4 || (offset & 0xf))
2175 kvm_lapic_reg_write(apic, offset & 0xff0, val);
2180 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2182 kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2184 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2186 /* emulate APIC access in a trap manner */
2187 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2191 /* hw has done the conditional check and inst decode */
2194 kvm_lapic_reg_read(vcpu->arch.apic, offset, 4, &val);
2196 /* TODO: optimize to just emulate side effect w/o one more write */
2197 kvm_lapic_reg_write(vcpu->arch.apic, offset, val);
2199 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2201 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2203 struct kvm_lapic *apic = vcpu->arch.apic;
2205 if (!vcpu->arch.apic)
2208 hrtimer_cancel(&apic->lapic_timer.timer);
2210 if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2211 static_branch_slow_dec_deferred(&apic_hw_disabled);
2213 if (!apic->sw_enabled)
2214 static_branch_slow_dec_deferred(&apic_sw_disabled);
2217 free_page((unsigned long)apic->regs);
2223 *----------------------------------------------------------------------
2225 *----------------------------------------------------------------------
2227 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2229 struct kvm_lapic *apic = vcpu->arch.apic;
2231 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2234 return apic->lapic_timer.tscdeadline;
2237 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2239 struct kvm_lapic *apic = vcpu->arch.apic;
2241 if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2244 hrtimer_cancel(&apic->lapic_timer.timer);
2245 apic->lapic_timer.tscdeadline = data;
2246 start_apic_timer(apic);
2249 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2251 struct kvm_lapic *apic = vcpu->arch.apic;
2253 apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
2254 | (kvm_lapic_get_reg(apic, APIC_TASKPRI) & 4));
2257 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2261 tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2263 return (tpr & 0xf0) >> 4;
2266 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2268 u64 old_value = vcpu->arch.apic_base;
2269 struct kvm_lapic *apic = vcpu->arch.apic;
2271 vcpu->arch.apic_base = value;
2273 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2274 kvm_update_cpuid_runtime(vcpu);
2279 /* update jump label if enable bit changes */
2280 if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2281 if (value & MSR_IA32_APICBASE_ENABLE) {
2282 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2283 static_branch_slow_dec_deferred(&apic_hw_disabled);
2284 /* Check if there are APF page ready requests pending */
2285 kvm_make_request(KVM_REQ_APF_READY, vcpu);
2287 static_branch_inc(&apic_hw_disabled.key);
2288 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2292 if (((old_value ^ value) & X2APIC_ENABLE) && (value & X2APIC_ENABLE))
2293 kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2295 if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE))
2296 static_call(kvm_x86_set_virtual_apic_mode)(vcpu);
2298 apic->base_address = apic->vcpu->arch.apic_base &
2299 MSR_IA32_APICBASE_BASE;
2301 if ((value & MSR_IA32_APICBASE_ENABLE) &&
2302 apic->base_address != APIC_DEFAULT_PHYS_BASE)
2303 pr_warn_once("APIC base relocation is unsupported by KVM");
2306 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2308 struct kvm_lapic *apic = vcpu->arch.apic;
2310 if (vcpu->arch.apicv_active) {
2311 /* irr_pending is always true when apicv is activated. */
2312 apic->irr_pending = true;
2313 apic->isr_count = 1;
2315 apic->irr_pending = (apic_search_irr(apic) != -1);
2316 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2319 EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);
2321 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2323 struct kvm_lapic *apic = vcpu->arch.apic;
2327 vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE |
2328 MSR_IA32_APICBASE_ENABLE;
2329 if (kvm_vcpu_is_reset_bsp(vcpu))
2330 vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
2336 /* Stop the timer in case it's a reset to an active apic */
2337 hrtimer_cancel(&apic->lapic_timer.timer);
2340 apic->base_address = APIC_DEFAULT_PHYS_BASE;
2342 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2344 kvm_apic_set_version(apic->vcpu);
2346 for (i = 0; i < KVM_APIC_LVT_NUM; i++)
2347 kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
2348 apic_update_lvtt(apic);
2349 if (kvm_vcpu_is_reset_bsp(vcpu) &&
2350 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2351 kvm_lapic_set_reg(apic, APIC_LVT0,
2352 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2353 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2355 kvm_apic_set_dfr(apic, 0xffffffffU);
2356 apic_set_spiv(apic, 0xff);
2357 kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2358 if (!apic_x2apic_mode(apic))
2359 kvm_apic_set_ldr(apic, 0);
2360 kvm_lapic_set_reg(apic, APIC_ESR, 0);
2361 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2362 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2363 kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2364 kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2365 for (i = 0; i < 8; i++) {
2366 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2367 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2368 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2370 kvm_apic_update_apicv(vcpu);
2371 apic->highest_isr_cache = -1;
2372 update_divide_count(apic);
2373 atomic_set(&apic->lapic_timer.pending, 0);
2375 vcpu->arch.pv_eoi.msr_val = 0;
2376 apic_update_ppr(apic);
2377 if (vcpu->arch.apicv_active) {
2378 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2379 static_call(kvm_x86_hwapic_irr_update)(vcpu, -1);
2380 static_call(kvm_x86_hwapic_isr_update)(vcpu, -1);
2383 vcpu->arch.apic_arb_prio = 0;
2384 vcpu->arch.apic_attention = 0;
2386 kvm_recalculate_apic_map(vcpu->kvm);
2390 *----------------------------------------------------------------------
2392 *----------------------------------------------------------------------
2395 static bool lapic_is_periodic(struct kvm_lapic *apic)
2397 return apic_lvtt_period(apic);
2400 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2402 struct kvm_lapic *apic = vcpu->arch.apic;
2404 if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2405 return atomic_read(&apic->lapic_timer.pending);
2410 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2412 u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2413 int vector, mode, trig_mode;
2415 if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2416 vector = reg & APIC_VECTOR_MASK;
2417 mode = reg & APIC_MODE_MASK;
2418 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2419 return __apic_accept_irq(apic, mode, vector, 1, trig_mode,
2425 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2427 struct kvm_lapic *apic = vcpu->arch.apic;
2430 kvm_apic_local_deliver(apic, APIC_LVT0);
2433 static const struct kvm_io_device_ops apic_mmio_ops = {
2434 .read = apic_mmio_read,
2435 .write = apic_mmio_write,
2438 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2440 struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2441 struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2443 apic_timer_expired(apic, true);
2445 if (lapic_is_periodic(apic)) {
2446 advance_periodic_target_expiration(apic);
2447 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2448 return HRTIMER_RESTART;
2450 return HRTIMER_NORESTART;
2453 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2455 struct kvm_lapic *apic;
2457 ASSERT(vcpu != NULL);
2459 apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2463 vcpu->arch.apic = apic;
2465 apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2467 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2469 goto nomem_free_apic;
2473 hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2474 HRTIMER_MODE_ABS_HARD);
2475 apic->lapic_timer.timer.function = apic_timer_fn;
2476 if (timer_advance_ns == -1) {
2477 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2478 lapic_timer_advance_dynamic = true;
2480 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2481 lapic_timer_advance_dynamic = false;
2484 static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2485 kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2490 vcpu->arch.apic = NULL;
2495 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2497 struct kvm_lapic *apic = vcpu->arch.apic;
2500 if (!kvm_apic_present(vcpu))
2503 __apic_update_ppr(apic, &ppr);
2504 return apic_has_interrupt_for_ppr(apic, ppr);
2506 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2508 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2510 u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2512 if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2514 if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2515 GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2520 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2522 struct kvm_lapic *apic = vcpu->arch.apic;
2524 if (atomic_read(&apic->lapic_timer.pending) > 0) {
2525 kvm_apic_inject_pending_timer_irqs(apic);
2526 atomic_set(&apic->lapic_timer.pending, 0);
2530 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2532 int vector = kvm_apic_has_interrupt(vcpu);
2533 struct kvm_lapic *apic = vcpu->arch.apic;
2540 * We get here even with APIC virtualization enabled, if doing
2541 * nested virtualization and L1 runs with the "acknowledge interrupt
2542 * on exit" mode. Then we cannot inject the interrupt via RVI,
2543 * because the process would deliver it through the IDT.
2546 apic_clear_irr(vector, apic);
2547 if (to_hv_vcpu(vcpu) && test_bit(vector, to_hv_synic(vcpu)->auto_eoi_bitmap)) {
2549 * For auto-EOI interrupts, there might be another pending
2550 * interrupt above PPR, so check whether to raise another
2553 apic_update_ppr(apic);
2556 * For normal interrupts, PPR has been raised and there cannot
2557 * be a higher-priority pending interrupt---except if there was
2558 * a concurrent interrupt injection, but that would have
2559 * triggered KVM_REQ_EVENT already.
2561 apic_set_isr(vector, apic);
2562 __apic_update_ppr(apic, &ppr);
2568 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2569 struct kvm_lapic_state *s, bool set)
2571 if (apic_x2apic_mode(vcpu->arch.apic)) {
2572 u32 *id = (u32 *)(s->regs + APIC_ID);
2573 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2575 if (vcpu->kvm->arch.x2apic_format) {
2576 if (*id != vcpu->vcpu_id)
2585 /* In x2APIC mode, the LDR is fixed and based on the id */
2587 *ldr = kvm_apic_calc_x2apic_ldr(*id);
2593 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2595 memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2598 * Get calculated timer current count for remaining timer period (if
2599 * any) and store it in the returned register set.
2601 __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2602 __apic_read(vcpu->arch.apic, APIC_TMCCT));
2604 return kvm_apic_state_fixup(vcpu, s, false);
2607 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2609 struct kvm_lapic *apic = vcpu->arch.apic;
2612 kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2613 /* set SPIV separately to get count of SW disabled APICs right */
2614 apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2616 r = kvm_apic_state_fixup(vcpu, s, true);
2618 kvm_recalculate_apic_map(vcpu->kvm);
2621 memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2623 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2624 kvm_recalculate_apic_map(vcpu->kvm);
2625 kvm_apic_set_version(vcpu);
2627 apic_update_ppr(apic);
2628 hrtimer_cancel(&apic->lapic_timer.timer);
2629 apic->lapic_timer.expired_tscdeadline = 0;
2630 apic_update_lvtt(apic);
2631 apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2632 update_divide_count(apic);
2633 __start_apic_timer(apic, APIC_TMCCT);
2634 kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
2635 kvm_apic_update_apicv(vcpu);
2636 apic->highest_isr_cache = -1;
2637 if (vcpu->arch.apicv_active) {
2638 static_call(kvm_x86_apicv_post_state_restore)(vcpu);
2639 static_call(kvm_x86_hwapic_irr_update)(vcpu,
2640 apic_find_highest_irr(apic));
2641 static_call(kvm_x86_hwapic_isr_update)(vcpu,
2642 apic_find_highest_isr(apic));
2644 kvm_make_request(KVM_REQ_EVENT, vcpu);
2645 if (ioapic_in_kernel(vcpu->kvm))
2646 kvm_rtc_eoi_tracking_restore_one(vcpu);
2648 vcpu->arch.apic_arb_prio = 0;
2653 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
2655 struct hrtimer *timer;
2657 if (!lapic_in_kernel(vcpu) ||
2658 kvm_can_post_timer_interrupt(vcpu))
2661 timer = &vcpu->arch.apic->lapic_timer.timer;
2662 if (hrtimer_cancel(timer))
2663 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
2667 * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
2669 * Detect whether guest triggered PV EOI since the
2670 * last entry. If yes, set EOI on guests's behalf.
2671 * Clear PV EOI in guest memory in any case.
2673 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
2674 struct kvm_lapic *apic)
2679 * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
2680 * and KVM_PV_EOI_ENABLED in guest memory as follows:
2682 * KVM_APIC_PV_EOI_PENDING is unset:
2683 * -> host disabled PV EOI.
2684 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
2685 * -> host enabled PV EOI, guest did not execute EOI yet.
2686 * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
2687 * -> host enabled PV EOI, guest executed EOI.
2689 BUG_ON(!pv_eoi_enabled(vcpu));
2690 pending = pv_eoi_get_pending(vcpu);
2692 * Clear pending bit in any case: it will be set again on vmentry.
2693 * While this might not be ideal from performance point of view,
2694 * this makes sure pv eoi is only enabled when we know it's safe.
2696 pv_eoi_clr_pending(vcpu);
2699 vector = apic_set_eoi(apic);
2700 trace_kvm_pv_eoi(apic, vector);
2703 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
2707 if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
2708 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
2710 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2713 if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2717 apic_set_tpr(vcpu->arch.apic, data & 0xff);
2721 * apic_sync_pv_eoi_to_guest - called before vmentry
2723 * Detect whether it's safe to enable PV EOI and
2726 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
2727 struct kvm_lapic *apic)
2729 if (!pv_eoi_enabled(vcpu) ||
2730 /* IRR set or many bits in ISR: could be nested. */
2731 apic->irr_pending ||
2732 /* Cache not set: could be safe but we don't bother. */
2733 apic->highest_isr_cache == -1 ||
2734 /* Need EOI to update ioapic. */
2735 kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
2737 * PV EOI was disabled by apic_sync_pv_eoi_from_guest
2738 * so we need not do anything here.
2743 pv_eoi_set_pending(apic->vcpu);
2746 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
2749 int max_irr, max_isr;
2750 struct kvm_lapic *apic = vcpu->arch.apic;
2752 apic_sync_pv_eoi_to_guest(vcpu, apic);
2754 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
2757 tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
2758 max_irr = apic_find_highest_irr(apic);
2761 max_isr = apic_find_highest_isr(apic);
2764 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
2766 kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
2770 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
2773 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
2774 &vcpu->arch.apic->vapic_cache,
2775 vapic_addr, sizeof(u32)))
2777 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2779 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
2782 vcpu->arch.apic->vapic_addr = vapic_addr;
2786 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
2788 struct kvm_lapic *apic = vcpu->arch.apic;
2789 u32 reg = (msr - APIC_BASE_MSR) << 4;
2791 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2794 if (reg == APIC_ICR2)
2797 /* if this is ICR write vector before command */
2798 if (reg == APIC_ICR)
2799 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2800 return kvm_lapic_reg_write(apic, reg, (u32)data);
2803 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
2805 struct kvm_lapic *apic = vcpu->arch.apic;
2806 u32 reg = (msr - APIC_BASE_MSR) << 4, low, high = 0;
2808 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
2811 if (reg == APIC_DFR || reg == APIC_ICR2)
2814 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2816 if (reg == APIC_ICR)
2817 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2819 *data = (((u64)high) << 32) | low;
2824 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
2826 struct kvm_lapic *apic = vcpu->arch.apic;
2828 if (!lapic_in_kernel(vcpu))
2831 /* if this is ICR write vector before command */
2832 if (reg == APIC_ICR)
2833 kvm_lapic_reg_write(apic, APIC_ICR2, (u32)(data >> 32));
2834 return kvm_lapic_reg_write(apic, reg, (u32)data);
2837 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
2839 struct kvm_lapic *apic = vcpu->arch.apic;
2842 if (!lapic_in_kernel(vcpu))
2845 if (kvm_lapic_reg_read(apic, reg, 4, &low))
2847 if (reg == APIC_ICR)
2848 kvm_lapic_reg_read(apic, APIC_ICR2, 4, &high);
2850 *data = (((u64)high) << 32) | low;
2855 int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
2857 u64 addr = data & ~KVM_MSR_ENABLED;
2858 struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
2859 unsigned long new_len;
2861 if (!IS_ALIGNED(addr, 4))
2864 vcpu->arch.pv_eoi.msr_val = data;
2865 if (!pv_eoi_enabled(vcpu))
2868 if (addr == ghc->gpa && len <= ghc->len)
2873 return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
2876 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
2878 struct kvm_lapic *apic = vcpu->arch.apic;
2883 if (!lapic_in_kernel(vcpu))
2887 * Read pending events before calling the check_events
2890 pe = smp_load_acquire(&apic->pending_events);
2894 if (is_guest_mode(vcpu)) {
2895 r = kvm_check_nested_events(vcpu);
2897 return r == -EBUSY ? 0 : r;
2899 * If an event has happened and caused a vmexit,
2900 * we know INITs are latched and therefore
2901 * we will not incorrectly deliver an APIC
2902 * event instead of a vmexit.
2907 * INITs are latched while CPU is in specific states
2908 * (SMM, VMX root mode, SVM with GIF=0).
2909 * Because a CPU cannot be in these states immediately
2910 * after it has processed an INIT signal (and thus in
2911 * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
2912 * and leave the INIT pending.
2914 if (kvm_vcpu_latch_init(vcpu)) {
2915 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
2916 if (test_bit(KVM_APIC_SIPI, &pe))
2917 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2921 if (test_bit(KVM_APIC_INIT, &pe)) {
2922 clear_bit(KVM_APIC_INIT, &apic->pending_events);
2923 kvm_vcpu_reset(vcpu, true);
2924 if (kvm_vcpu_is_bsp(apic->vcpu))
2925 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2927 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
2929 if (test_bit(KVM_APIC_SIPI, &pe)) {
2930 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
2931 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
2932 /* evaluate pending_events before reading the vector */
2934 sipi_vector = apic->sipi_vector;
2935 kvm_x86_ops.vcpu_deliver_sipi_vector(vcpu, sipi_vector);
2936 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
2942 void kvm_lapic_exit(void)
2944 static_key_deferred_flush(&apic_hw_disabled);
2945 static_key_deferred_flush(&apic_sw_disabled);