5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <asm/signal.h>
28 #include <linux/kvm.h>
29 #include <linux/kvm_para.h>
31 #include <linux/kvm_types.h>
33 #include <asm/kvm_host.h>
36 #define KVM_MMIO_SIZE 8
40 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
41 * in kvm, other bits are visible for userspace which are defined in
42 * include/linux/kvm_h.
44 #define KVM_MEMSLOT_INVALID (1UL << 16)
46 /* Two fragments for cross MMIO pages. */
47 #define KVM_MAX_MMIO_FRAGMENTS 2
50 * For the normal pfn, the highest 12 bits should be zero,
51 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
52 * mask bit 63 to indicate the noslot pfn.
54 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
55 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
56 #define KVM_PFN_NOSLOT (0x1ULL << 63)
58 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
59 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
60 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
63 * error pfns indicate that the gfn is in slot but faild to
64 * translate it to pfn on host.
66 static inline bool is_error_pfn(pfn_t pfn)
68 return !!(pfn & KVM_PFN_ERR_MASK);
72 * error_noslot pfns indicate that the gfn can not be
73 * translated to pfn - it is not in slot or failed to
74 * translate it to pfn.
76 static inline bool is_error_noslot_pfn(pfn_t pfn)
78 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
81 /* noslot pfn indicates that the gfn is not in slot. */
82 static inline bool is_noslot_pfn(pfn_t pfn)
84 return pfn == KVM_PFN_NOSLOT;
87 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
88 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
90 static inline bool kvm_is_error_hva(unsigned long addr)
92 return addr >= PAGE_OFFSET;
95 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
97 static inline bool is_error_page(struct page *page)
103 * vcpu->requests bit members
105 #define KVM_REQ_TLB_FLUSH 0
106 #define KVM_REQ_MIGRATE_TIMER 1
107 #define KVM_REQ_REPORT_TPR_ACCESS 2
108 #define KVM_REQ_MMU_RELOAD 3
109 #define KVM_REQ_TRIPLE_FAULT 4
110 #define KVM_REQ_PENDING_TIMER 5
111 #define KVM_REQ_UNHALT 6
112 #define KVM_REQ_MMU_SYNC 7
113 #define KVM_REQ_CLOCK_UPDATE 8
114 #define KVM_REQ_KICK 9
115 #define KVM_REQ_DEACTIVATE_FPU 10
116 #define KVM_REQ_EVENT 11
117 #define KVM_REQ_APF_HALT 12
118 #define KVM_REQ_STEAL_UPDATE 13
119 #define KVM_REQ_NMI 14
120 #define KVM_REQ_IMMEDIATE_EXIT 15
121 #define KVM_REQ_PMU 16
122 #define KVM_REQ_PMI 17
123 #define KVM_REQ_WATCHDOG 18
124 #define KVM_REQ_MASTERCLOCK_UPDATE 19
125 #define KVM_REQ_MCLOCK_INPROGRESS 20
127 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
128 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
132 extern struct kmem_cache *kvm_vcpu_cache;
134 struct kvm_io_range {
137 struct kvm_io_device *dev;
140 #define NR_IOBUS_DEVS 1000
144 struct kvm_io_range range[];
153 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
154 int len, const void *val);
155 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
157 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
158 int len, struct kvm_io_device *dev);
159 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
160 struct kvm_io_device *dev);
162 #ifdef CONFIG_KVM_ASYNC_PF
163 struct kvm_async_pf {
164 struct work_struct work;
165 struct list_head link;
166 struct list_head queue;
167 struct kvm_vcpu *vcpu;
168 struct mm_struct *mm;
171 struct kvm_arch_async_pf arch;
176 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
177 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
178 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
179 struct kvm_arch_async_pf *arch);
180 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
187 READING_SHADOW_PAGE_TABLES,
191 * Sometimes a large or cross-page mmio needs to be broken up into separate
192 * exits for userspace servicing.
194 struct kvm_mmio_fragment {
202 #ifdef CONFIG_PREEMPT_NOTIFIERS
203 struct preempt_notifier preempt_notifier;
209 unsigned long requests;
210 unsigned long guest_debug;
216 int guest_fpu_loaded, guest_xcr0_loaded;
217 wait_queue_head_t wq;
221 struct kvm_vcpu_stat stat;
223 #ifdef CONFIG_HAS_IOMEM
225 int mmio_read_completed;
227 int mmio_cur_fragment;
228 int mmio_nr_fragments;
229 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
232 #ifdef CONFIG_KVM_ASYNC_PF
235 struct list_head queue;
236 struct list_head done;
241 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
243 * Cpu relax intercept or pause loop exit optimization
244 * in_spin_loop: set when a vcpu does a pause loop exit
245 * or cpu relax intercepted.
246 * dy_eligible: indicates whether vcpu is eligible for directed yield.
253 struct kvm_vcpu_arch arch;
256 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
258 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
262 * Some of the bitops functions do not support too long bitmaps.
263 * This number must be determined not to exceed such limits.
265 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
267 struct kvm_memory_slot {
269 unsigned long npages;
271 unsigned long *dirty_bitmap;
272 struct kvm_arch_memory_slot arch;
273 unsigned long userspace_addr;
278 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
280 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
283 struct kvm_kernel_irq_routing_entry {
286 int (*set)(struct kvm_kernel_irq_routing_entry *e,
287 struct kvm *kvm, int irq_source_id, int level);
295 struct hlist_node link;
298 #ifdef __KVM_HAVE_IOAPIC
300 struct kvm_irq_routing_table {
301 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
302 struct kvm_kernel_irq_routing_entry *rt_entries;
305 * Array indexed by gsi. Each entry contains list of irq chips
306 * the gsi is connected to.
308 struct hlist_head map[0];
313 struct kvm_irq_routing_table {};
317 #ifndef KVM_MEM_SLOTS_NUM
318 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
323 * memslots are not sorted by id anymore, please use id_to_memslot()
324 * to get the memslot by its id.
326 struct kvm_memslots {
328 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
329 /* The mapping table from slot id to the index in memslots[]. */
330 int id_to_index[KVM_MEM_SLOTS_NUM];
335 struct mutex slots_lock;
336 struct mm_struct *mm; /* userspace tied to this vm */
337 struct kvm_memslots *memslots;
338 struct srcu_struct srcu;
339 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
342 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
343 atomic_t online_vcpus;
344 int last_boosted_vcpu;
345 struct list_head vm_list;
347 struct kvm_io_bus *buses[KVM_NR_BUSES];
348 #ifdef CONFIG_HAVE_KVM_EVENTFD
351 struct list_head items;
352 struct list_head resampler_list;
353 struct mutex resampler_lock;
355 struct list_head ioeventfds;
357 struct kvm_vm_stat stat;
358 struct kvm_arch arch;
359 atomic_t users_count;
360 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
361 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
362 spinlock_t ring_lock;
363 struct list_head coalesced_zones;
366 struct mutex irq_lock;
367 #ifdef CONFIG_HAVE_KVM_IRQCHIP
369 * Update side is protected by irq_lock and,
370 * if configured, irqfds.lock.
372 struct kvm_irq_routing_table __rcu *irq_routing;
373 struct hlist_head mask_notifier_list;
374 struct hlist_head irq_ack_notifier_list;
377 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
378 struct mmu_notifier mmu_notifier;
379 unsigned long mmu_notifier_seq;
380 long mmu_notifier_count;
385 #define kvm_err(fmt, ...) \
386 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
387 #define kvm_info(fmt, ...) \
388 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
389 #define kvm_debug(fmt, ...) \
390 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
391 #define kvm_pr_unimpl(fmt, ...) \
392 pr_err_ratelimited("kvm [%i]: " fmt, \
393 task_tgid_nr(current), ## __VA_ARGS__)
395 /* The guest did something we don't support. */
396 #define vcpu_unimpl(vcpu, fmt, ...) \
397 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
399 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
402 return kvm->vcpus[i];
405 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
407 idx < atomic_read(&kvm->online_vcpus) && \
408 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
411 #define kvm_for_each_memslot(memslot, slots) \
412 for (memslot = &slots->memslots[0]; \
413 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
416 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
417 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
419 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
420 void vcpu_put(struct kvm_vcpu *vcpu);
422 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
423 struct module *module);
426 void kvm_get_kvm(struct kvm *kvm);
427 void kvm_put_kvm(struct kvm *kvm);
428 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
430 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
432 return rcu_dereference_check(kvm->memslots,
433 srcu_read_lock_held(&kvm->srcu)
434 || lockdep_is_held(&kvm->slots_lock));
437 static inline struct kvm_memory_slot *
438 id_to_memslot(struct kvm_memslots *slots, int id)
440 int index = slots->id_to_index[id];
441 struct kvm_memory_slot *slot;
443 slot = &slots->memslots[index];
445 WARN_ON(slot->id != id);
449 int kvm_set_memory_region(struct kvm *kvm,
450 struct kvm_userspace_memory_region *mem,
452 int __kvm_set_memory_region(struct kvm *kvm,
453 struct kvm_userspace_memory_region *mem,
455 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
456 struct kvm_memory_slot *dont);
457 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
458 int kvm_arch_prepare_memory_region(struct kvm *kvm,
459 struct kvm_memory_slot *memslot,
460 struct kvm_memory_slot old,
461 struct kvm_userspace_memory_region *mem,
463 void kvm_arch_commit_memory_region(struct kvm *kvm,
464 struct kvm_userspace_memory_region *mem,
465 struct kvm_memory_slot old,
467 bool kvm_largepages_enabled(void);
468 void kvm_disable_largepages(void);
469 /* flush all memory translations */
470 void kvm_arch_flush_shadow_all(struct kvm *kvm);
471 /* flush memory translations pointing to 'slot' */
472 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
473 struct kvm_memory_slot *slot);
475 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
478 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
479 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
480 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
481 void kvm_release_page_clean(struct page *page);
482 void kvm_release_page_dirty(struct page *page);
483 void kvm_set_page_dirty(struct page *page);
484 void kvm_set_page_accessed(struct page *page);
486 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
487 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
488 bool write_fault, bool *writable);
489 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
490 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
492 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
493 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
495 void kvm_release_pfn_dirty(pfn_t pfn);
496 void kvm_release_pfn_clean(pfn_t pfn);
497 void kvm_set_pfn_dirty(pfn_t pfn);
498 void kvm_set_pfn_accessed(pfn_t pfn);
499 void kvm_get_pfn(pfn_t pfn);
501 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
503 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
505 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
506 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
507 void *data, unsigned long len);
508 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
509 int offset, int len);
510 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
512 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
513 void *data, unsigned long len);
514 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
516 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
517 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
518 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
519 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
520 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
521 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
522 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
525 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
526 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
527 bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
528 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
529 void kvm_resched(struct kvm_vcpu *vcpu);
530 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
531 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
533 void kvm_flush_remote_tlbs(struct kvm *kvm);
534 void kvm_reload_remote_mmus(struct kvm *kvm);
535 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
537 long kvm_arch_dev_ioctl(struct file *filp,
538 unsigned int ioctl, unsigned long arg);
539 long kvm_arch_vcpu_ioctl(struct file *filp,
540 unsigned int ioctl, unsigned long arg);
541 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
543 int kvm_dev_ioctl_check_extension(long ext);
545 int kvm_get_dirty_log(struct kvm *kvm,
546 struct kvm_dirty_log *log, int *is_dirty);
547 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
548 struct kvm_dirty_log *log);
550 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
552 kvm_userspace_memory_region *mem,
554 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level);
555 long kvm_arch_vm_ioctl(struct file *filp,
556 unsigned int ioctl, unsigned long arg);
558 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
559 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
561 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
562 struct kvm_translation *tr);
564 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
565 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
566 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
567 struct kvm_sregs *sregs);
568 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
569 struct kvm_sregs *sregs);
570 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
571 struct kvm_mp_state *mp_state);
572 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
573 struct kvm_mp_state *mp_state);
574 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
575 struct kvm_guest_debug *dbg);
576 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
578 int kvm_arch_init(void *opaque);
579 void kvm_arch_exit(void);
581 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
582 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
584 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
585 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
586 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
587 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
588 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
589 int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
590 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
592 int kvm_arch_hardware_enable(void *garbage);
593 void kvm_arch_hardware_disable(void *garbage);
594 int kvm_arch_hardware_setup(void);
595 void kvm_arch_hardware_unsetup(void);
596 void kvm_arch_check_processor_compat(void *rtn);
597 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
598 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
600 void kvm_free_physmem(struct kvm *kvm);
602 void *kvm_kvzalloc(unsigned long size);
603 void kvm_kvfree(const void *addr);
605 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
606 static inline struct kvm *kvm_arch_alloc_vm(void)
608 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
611 static inline void kvm_arch_free_vm(struct kvm *kvm)
617 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
619 #ifdef __KVM_HAVE_ARCH_WQP
620 return vcpu->arch.wqp;
626 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
627 void kvm_arch_destroy_vm(struct kvm *kvm);
628 void kvm_free_all_assigned_devices(struct kvm *kvm);
629 void kvm_arch_sync_events(struct kvm *kvm);
631 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
632 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
634 bool kvm_is_mmio_pfn(pfn_t pfn);
636 struct kvm_irq_ack_notifier {
637 struct hlist_node link;
639 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
642 struct kvm_assigned_dev_kernel {
643 struct kvm_irq_ack_notifier ack_notifier;
644 struct list_head list;
649 unsigned int entries_nr;
651 bool host_irq_disabled;
653 struct msix_entry *host_msix_entries;
655 struct msix_entry *guest_msix_entries;
656 unsigned long irq_requested_type;
661 spinlock_t intx_lock;
662 spinlock_t intx_mask_lock;
664 struct pci_saved_state *pci_saved_state;
667 struct kvm_irq_mask_notifier {
668 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
670 struct hlist_node link;
673 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
674 struct kvm_irq_mask_notifier *kimn);
675 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
676 struct kvm_irq_mask_notifier *kimn);
677 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
680 #ifdef __KVM_HAVE_IOAPIC
681 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
682 union kvm_ioapic_redirect_entry *entry,
683 unsigned long *deliver_bitmask);
685 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
686 int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level);
687 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
688 int irq_source_id, int level);
689 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
690 void kvm_register_irq_ack_notifier(struct kvm *kvm,
691 struct kvm_irq_ack_notifier *kian);
692 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
693 struct kvm_irq_ack_notifier *kian);
694 int kvm_request_irq_source_id(struct kvm *kvm);
695 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
697 /* For vcpu->arch.iommu_flags */
698 #define KVM_IOMMU_CACHE_COHERENCY 0x1
700 #ifdef CONFIG_IOMMU_API
701 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
702 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
703 int kvm_iommu_map_guest(struct kvm *kvm);
704 int kvm_iommu_unmap_guest(struct kvm *kvm);
705 int kvm_assign_device(struct kvm *kvm,
706 struct kvm_assigned_dev_kernel *assigned_dev);
707 int kvm_deassign_device(struct kvm *kvm,
708 struct kvm_assigned_dev_kernel *assigned_dev);
709 #else /* CONFIG_IOMMU_API */
710 static inline int kvm_iommu_map_pages(struct kvm *kvm,
711 struct kvm_memory_slot *slot)
716 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
717 struct kvm_memory_slot *slot)
721 static inline int kvm_iommu_map_guest(struct kvm *kvm)
726 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
731 static inline int kvm_assign_device(struct kvm *kvm,
732 struct kvm_assigned_dev_kernel *assigned_dev)
737 static inline int kvm_deassign_device(struct kvm *kvm,
738 struct kvm_assigned_dev_kernel *assigned_dev)
742 #endif /* CONFIG_IOMMU_API */
744 static inline void __guest_enter(void)
747 * This is running in ioctl context so we can avoid
748 * the call to vtime_account() with its unnecessary idle check.
750 vtime_account_system(current);
751 current->flags |= PF_VCPU;
754 static inline void __guest_exit(void)
757 * This is running in ioctl context so we can avoid
758 * the call to vtime_account() with its unnecessary idle check.
760 vtime_account_system(current);
761 current->flags &= ~PF_VCPU;
764 #ifdef CONFIG_CONTEXT_TRACKING
765 extern void guest_enter(void);
766 extern void guest_exit(void);
768 #else /* !CONFIG_CONTEXT_TRACKING */
769 static inline void guest_enter(void)
774 static inline void guest_exit(void)
778 #endif /* !CONFIG_CONTEXT_TRACKING */
780 static inline void kvm_guest_enter(void)
784 BUG_ON(preemptible());
786 local_irq_save(flags);
788 local_irq_restore(flags);
790 /* KVM does not hold any references to rcu protected data when it
791 * switches CPU into a guest mode. In fact switching to a guest mode
792 * is very similar to exiting to userspase from rcu point of view. In
793 * addition CPU may stay in a guest mode for quite a long time (up to
794 * one time slice). Lets treat guest mode as quiescent state, just like
795 * we do with user-mode execution.
797 rcu_virt_note_context_switch(smp_processor_id());
800 static inline void kvm_guest_exit(void)
804 local_irq_save(flags);
806 local_irq_restore(flags);
810 * search_memslots() and __gfn_to_memslot() are here because they are
811 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
812 * gfn_to_memslot() itself isn't here as an inline because that would
813 * bloat other code too much.
815 static inline struct kvm_memory_slot *
816 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
818 struct kvm_memory_slot *memslot;
820 kvm_for_each_memslot(memslot, slots)
821 if (gfn >= memslot->base_gfn &&
822 gfn < memslot->base_gfn + memslot->npages)
828 static inline struct kvm_memory_slot *
829 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
831 return search_memslots(slots, gfn);
834 static inline unsigned long
835 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
837 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
840 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
842 return gfn_to_memslot(kvm, gfn)->id;
845 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
847 /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
848 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
849 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
853 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
855 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
857 return slot->base_gfn + gfn_offset;
860 static inline gpa_t gfn_to_gpa(gfn_t gfn)
862 return (gpa_t)gfn << PAGE_SHIFT;
865 static inline gfn_t gpa_to_gfn(gpa_t gpa)
867 return (gfn_t)(gpa >> PAGE_SHIFT);
870 static inline hpa_t pfn_to_hpa(pfn_t pfn)
872 return (hpa_t)pfn << PAGE_SHIFT;
875 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
877 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
885 struct kvm_stats_debugfs_item {
888 enum kvm_stat_kind kind;
889 struct dentry *dentry;
891 extern struct kvm_stats_debugfs_item debugfs_entries[];
892 extern struct dentry *kvm_debugfs_dir;
894 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
895 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
897 if (unlikely(kvm->mmu_notifier_count))
900 * Ensure the read of mmu_notifier_count happens before the read
901 * of mmu_notifier_seq. This interacts with the smp_wmb() in
902 * mmu_notifier_invalidate_range_end to make sure that the caller
903 * either sees the old (non-zero) value of mmu_notifier_count or
904 * the new (incremented) value of mmu_notifier_seq.
905 * PowerPC Book3s HV KVM calls this under a per-page lock
906 * rather than under kvm->mmu_lock, for scalability, so
907 * can't rely on kvm->mmu_lock to keep things ordered.
910 if (kvm->mmu_notifier_seq != mmu_seq)
916 #ifdef KVM_CAP_IRQ_ROUTING
918 #define KVM_MAX_IRQ_ROUTES 1024
920 int kvm_setup_default_irq_routing(struct kvm *kvm);
921 int kvm_set_irq_routing(struct kvm *kvm,
922 const struct kvm_irq_routing_entry *entries,
925 void kvm_free_irq_routing(struct kvm *kvm);
927 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
931 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
935 #ifdef CONFIG_HAVE_KVM_EVENTFD
937 void kvm_eventfd_init(struct kvm *kvm);
938 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
940 #ifdef CONFIG_HAVE_KVM_IRQCHIP
941 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
942 void kvm_irqfd_release(struct kvm *kvm);
943 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
945 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
950 static inline void kvm_irqfd_release(struct kvm *kvm) {}
955 static inline void kvm_eventfd_init(struct kvm *kvm) {}
957 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
962 static inline void kvm_irqfd_release(struct kvm *kvm) {}
964 #ifdef CONFIG_HAVE_KVM_IRQCHIP
965 static inline void kvm_irq_routing_update(struct kvm *kvm,
966 struct kvm_irq_routing_table *irq_rt)
968 rcu_assign_pointer(kvm->irq_routing, irq_rt);
972 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
977 #endif /* CONFIG_HAVE_KVM_EVENTFD */
979 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
980 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
982 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
985 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
989 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
993 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
995 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1000 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
1008 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1010 set_bit(req, &vcpu->requests);
1013 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1015 if (test_bit(req, &vcpu->requests)) {
1016 clear_bit(req, &vcpu->requests);
1023 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1025 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1027 vcpu->spin_loop.in_spin_loop = val;
1029 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1031 vcpu->spin_loop.dy_eligible = val;
1034 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1036 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1040 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1044 static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1049 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */