5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/preempt.h>
19 #include <linux/msi.h>
20 #include <linux/slab.h>
21 #include <linux/rcupdate.h>
22 #include <linux/ratelimit.h>
23 #include <asm/signal.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_para.h>
28 #include <linux/kvm_types.h>
30 #include <asm/kvm_host.h>
33 #define KVM_MMIO_SIZE 8
37 * vcpu->requests bit members
39 #define KVM_REQ_TLB_FLUSH 0
40 #define KVM_REQ_MIGRATE_TIMER 1
41 #define KVM_REQ_REPORT_TPR_ACCESS 2
42 #define KVM_REQ_MMU_RELOAD 3
43 #define KVM_REQ_TRIPLE_FAULT 4
44 #define KVM_REQ_PENDING_TIMER 5
45 #define KVM_REQ_UNHALT 6
46 #define KVM_REQ_MMU_SYNC 7
47 #define KVM_REQ_CLOCK_UPDATE 8
48 #define KVM_REQ_KICK 9
49 #define KVM_REQ_DEACTIVATE_FPU 10
50 #define KVM_REQ_EVENT 11
51 #define KVM_REQ_APF_HALT 12
52 #define KVM_REQ_STEAL_UPDATE 13
53 #define KVM_REQ_NMI 14
54 #define KVM_REQ_IMMEDIATE_EXIT 15
56 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
60 extern struct kmem_cache *kvm_vcpu_cache;
65 struct kvm_io_device *dev;
70 #define NR_IOBUS_DEVS 300
71 struct kvm_io_range range[NR_IOBUS_DEVS];
80 int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
81 int len, const void *val);
82 int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
84 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
85 int len, struct kvm_io_device *dev);
86 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
87 struct kvm_io_device *dev);
89 #ifdef CONFIG_KVM_ASYNC_PF
91 struct work_struct work;
92 struct list_head link;
93 struct list_head queue;
94 struct kvm_vcpu *vcpu;
98 struct kvm_arch_async_pf arch;
103 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
104 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
105 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
106 struct kvm_arch_async_pf *arch);
107 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
118 #ifdef CONFIG_PREEMPT_NOTIFIERS
119 struct preempt_notifier preempt_notifier;
125 unsigned long requests;
126 unsigned long guest_debug;
132 int guest_fpu_loaded, guest_xcr0_loaded;
133 wait_queue_head_t wq;
137 struct kvm_vcpu_stat stat;
139 #ifdef CONFIG_HAS_IOMEM
141 int mmio_read_completed;
145 unsigned char mmio_data[KVM_MMIO_SIZE];
146 gpa_t mmio_phys_addr;
149 #ifdef CONFIG_KVM_ASYNC_PF
152 struct list_head queue;
153 struct list_head done;
158 struct kvm_vcpu_arch arch;
161 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
163 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
167 * Some of the bitops functions do not support too long bitmaps.
168 * This number must be determined not to exceed such limits.
170 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
172 struct kvm_lpage_info {
173 unsigned long rmap_pde;
177 struct kvm_memory_slot {
179 unsigned long npages;
182 unsigned long *dirty_bitmap;
183 unsigned long *dirty_bitmap_head;
184 unsigned long nr_dirty_pages;
185 struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
186 unsigned long userspace_addr;
191 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
193 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
196 struct kvm_kernel_irq_routing_entry {
199 int (*set)(struct kvm_kernel_irq_routing_entry *e,
200 struct kvm *kvm, int irq_source_id, int level);
208 struct hlist_node link;
211 #ifdef __KVM_HAVE_IOAPIC
213 struct kvm_irq_routing_table {
214 int chip[KVM_NR_IRQCHIPS][KVM_IOAPIC_NUM_PINS];
215 struct kvm_kernel_irq_routing_entry *rt_entries;
218 * Array indexed by gsi. Each entry contains list of irq chips
219 * the gsi is connected to.
221 struct hlist_head map[0];
226 struct kvm_irq_routing_table {};
230 #ifndef KVM_MEM_SLOTS_NUM
231 #define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
236 * memslots are not sorted by id anymore, please use id_to_memslot()
237 * to get the memslot by its id.
239 struct kvm_memslots {
241 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
242 /* The mapping table from slot id to the index in memslots[]. */
243 int id_to_index[KVM_MEM_SLOTS_NUM];
248 struct mutex slots_lock;
249 struct mm_struct *mm; /* userspace tied to this vm */
250 struct kvm_memslots *memslots;
251 struct srcu_struct srcu;
252 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
254 struct kvm_vcpu *bsp_vcpu;
256 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
257 atomic_t online_vcpus;
258 int last_boosted_vcpu;
259 struct list_head vm_list;
261 struct kvm_io_bus *buses[KVM_NR_BUSES];
262 #ifdef CONFIG_HAVE_KVM_EVENTFD
265 struct list_head items;
267 struct list_head ioeventfds;
269 struct kvm_vm_stat stat;
270 struct kvm_arch arch;
271 atomic_t users_count;
272 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
273 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
274 spinlock_t ring_lock;
275 struct list_head coalesced_zones;
278 struct mutex irq_lock;
279 #ifdef CONFIG_HAVE_KVM_IRQCHIP
281 * Update side is protected by irq_lock and,
282 * if configured, irqfds.lock.
284 struct kvm_irq_routing_table __rcu *irq_routing;
285 struct hlist_head mask_notifier_list;
286 struct hlist_head irq_ack_notifier_list;
289 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
290 struct mmu_notifier mmu_notifier;
291 unsigned long mmu_notifier_seq;
292 long mmu_notifier_count;
297 /* The guest did something we don't support. */
298 #define pr_unimpl(vcpu, fmt, ...) \
299 pr_err_ratelimited("kvm: %i: cpu%i " fmt, \
300 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__)
302 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
303 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
305 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
308 return kvm->vcpus[i];
311 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
313 idx < atomic_read(&kvm->online_vcpus) && \
314 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
317 #define kvm_for_each_memslot(memslot, slots) \
318 for (memslot = &slots->memslots[0]; \
319 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
322 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
323 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
325 void vcpu_load(struct kvm_vcpu *vcpu);
326 void vcpu_put(struct kvm_vcpu *vcpu);
328 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
329 struct module *module);
332 void kvm_get_kvm(struct kvm *kvm);
333 void kvm_put_kvm(struct kvm *kvm);
334 void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new);
336 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
338 return rcu_dereference_check(kvm->memslots,
339 srcu_read_lock_held(&kvm->srcu)
340 || lockdep_is_held(&kvm->slots_lock));
343 static inline struct kvm_memory_slot *
344 id_to_memslot(struct kvm_memslots *slots, int id)
346 int index = slots->id_to_index[id];
347 struct kvm_memory_slot *slot;
349 slot = &slots->memslots[index];
351 WARN_ON(slot->id != id);
355 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
356 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
357 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
359 extern struct page *bad_page;
360 extern struct page *fault_page;
362 extern pfn_t bad_pfn;
363 extern pfn_t fault_pfn;
365 int is_error_page(struct page *page);
366 int is_error_pfn(pfn_t pfn);
367 int is_hwpoison_pfn(pfn_t pfn);
368 int is_fault_pfn(pfn_t pfn);
369 int is_noslot_pfn(pfn_t pfn);
370 int is_invalid_pfn(pfn_t pfn);
371 int kvm_is_error_hva(unsigned long addr);
372 int kvm_set_memory_region(struct kvm *kvm,
373 struct kvm_userspace_memory_region *mem,
375 int __kvm_set_memory_region(struct kvm *kvm,
376 struct kvm_userspace_memory_region *mem,
378 int kvm_arch_prepare_memory_region(struct kvm *kvm,
379 struct kvm_memory_slot *memslot,
380 struct kvm_memory_slot old,
381 struct kvm_userspace_memory_region *mem,
383 void kvm_arch_commit_memory_region(struct kvm *kvm,
384 struct kvm_userspace_memory_region *mem,
385 struct kvm_memory_slot old,
387 void kvm_disable_largepages(void);
388 void kvm_arch_flush_shadow(struct kvm *kvm);
390 int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
393 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
394 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
395 void kvm_release_page_clean(struct page *page);
396 void kvm_release_page_dirty(struct page *page);
397 void kvm_set_page_dirty(struct page *page);
398 void kvm_set_page_accessed(struct page *page);
400 pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr);
401 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
402 pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
403 bool write_fault, bool *writable);
404 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
405 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
407 pfn_t gfn_to_pfn_memslot(struct kvm *kvm,
408 struct kvm_memory_slot *slot, gfn_t gfn);
409 void kvm_release_pfn_dirty(pfn_t);
410 void kvm_release_pfn_clean(pfn_t pfn);
411 void kvm_set_pfn_dirty(pfn_t pfn);
412 void kvm_set_pfn_accessed(pfn_t pfn);
413 void kvm_get_pfn(pfn_t pfn);
415 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
417 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
419 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
420 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
421 void *data, unsigned long len);
422 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
423 int offset, int len);
424 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
426 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
427 void *data, unsigned long len);
428 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
430 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
431 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
432 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
433 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
434 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
435 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
436 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
439 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
440 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
441 void kvm_resched(struct kvm_vcpu *vcpu);
442 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
443 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
445 void kvm_flush_remote_tlbs(struct kvm *kvm);
446 void kvm_reload_remote_mmus(struct kvm *kvm);
448 long kvm_arch_dev_ioctl(struct file *filp,
449 unsigned int ioctl, unsigned long arg);
450 long kvm_arch_vcpu_ioctl(struct file *filp,
451 unsigned int ioctl, unsigned long arg);
453 int kvm_dev_ioctl_check_extension(long ext);
455 int kvm_get_dirty_log(struct kvm *kvm,
456 struct kvm_dirty_log *log, int *is_dirty);
457 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
458 struct kvm_dirty_log *log);
460 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
462 kvm_userspace_memory_region *mem,
464 long kvm_arch_vm_ioctl(struct file *filp,
465 unsigned int ioctl, unsigned long arg);
467 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
468 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
470 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
471 struct kvm_translation *tr);
473 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
474 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
475 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
476 struct kvm_sregs *sregs);
477 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
478 struct kvm_sregs *sregs);
479 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
480 struct kvm_mp_state *mp_state);
481 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
482 struct kvm_mp_state *mp_state);
483 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
484 struct kvm_guest_debug *dbg);
485 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
487 int kvm_arch_init(void *opaque);
488 void kvm_arch_exit(void);
490 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
491 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
493 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
494 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
495 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
496 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
497 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
498 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
500 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
501 int kvm_arch_hardware_enable(void *garbage);
502 void kvm_arch_hardware_disable(void *garbage);
503 int kvm_arch_hardware_setup(void);
504 void kvm_arch_hardware_unsetup(void);
505 void kvm_arch_check_processor_compat(void *rtn);
506 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
508 void kvm_free_physmem(struct kvm *kvm);
510 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
511 static inline struct kvm *kvm_arch_alloc_vm(void)
513 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
516 static inline void kvm_arch_free_vm(struct kvm *kvm)
522 int kvm_arch_init_vm(struct kvm *kvm);
523 void kvm_arch_destroy_vm(struct kvm *kvm);
524 void kvm_free_all_assigned_devices(struct kvm *kvm);
525 void kvm_arch_sync_events(struct kvm *kvm);
527 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
528 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
530 int kvm_is_mmio_pfn(pfn_t pfn);
532 struct kvm_irq_ack_notifier {
533 struct hlist_node link;
535 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
538 struct kvm_assigned_dev_kernel {
539 struct kvm_irq_ack_notifier ack_notifier;
540 struct list_head list;
545 unsigned int entries_nr;
547 bool host_irq_disabled;
548 struct msix_entry *host_msix_entries;
550 struct msix_entry *guest_msix_entries;
551 unsigned long irq_requested_type;
556 spinlock_t intx_lock;
558 struct pci_saved_state *pci_saved_state;
561 struct kvm_irq_mask_notifier {
562 void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
564 struct hlist_node link;
567 void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
568 struct kvm_irq_mask_notifier *kimn);
569 void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
570 struct kvm_irq_mask_notifier *kimn);
571 void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
574 #ifdef __KVM_HAVE_IOAPIC
575 void kvm_get_intr_delivery_bitmask(struct kvm_ioapic *ioapic,
576 union kvm_ioapic_redirect_entry *entry,
577 unsigned long *deliver_bitmask);
579 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level);
580 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
581 int irq_source_id, int level);
582 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
583 void kvm_register_irq_ack_notifier(struct kvm *kvm,
584 struct kvm_irq_ack_notifier *kian);
585 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
586 struct kvm_irq_ack_notifier *kian);
587 int kvm_request_irq_source_id(struct kvm *kvm);
588 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
590 /* For vcpu->arch.iommu_flags */
591 #define KVM_IOMMU_CACHE_COHERENCY 0x1
593 #ifdef CONFIG_IOMMU_API
594 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
595 int kvm_iommu_map_guest(struct kvm *kvm);
596 int kvm_iommu_unmap_guest(struct kvm *kvm);
597 int kvm_assign_device(struct kvm *kvm,
598 struct kvm_assigned_dev_kernel *assigned_dev);
599 int kvm_deassign_device(struct kvm *kvm,
600 struct kvm_assigned_dev_kernel *assigned_dev);
601 #else /* CONFIG_IOMMU_API */
602 static inline int kvm_iommu_map_pages(struct kvm *kvm,
603 struct kvm_memory_slot *slot)
608 static inline int kvm_iommu_map_guest(struct kvm *kvm)
613 static inline int kvm_iommu_unmap_guest(struct kvm *kvm)
618 static inline int kvm_assign_device(struct kvm *kvm,
619 struct kvm_assigned_dev_kernel *assigned_dev)
624 static inline int kvm_deassign_device(struct kvm *kvm,
625 struct kvm_assigned_dev_kernel *assigned_dev)
629 #endif /* CONFIG_IOMMU_API */
631 static inline void kvm_guest_enter(void)
633 BUG_ON(preemptible());
634 account_system_vtime(current);
635 current->flags |= PF_VCPU;
636 /* KVM does not hold any references to rcu protected data when it
637 * switches CPU into a guest mode. In fact switching to a guest mode
638 * is very similar to exiting to userspase from rcu point of view. In
639 * addition CPU may stay in a guest mode for quite a long time (up to
640 * one time slice). Lets treat guest mode as quiescent state, just like
641 * we do with user-mode execution.
643 rcu_virt_note_context_switch(smp_processor_id());
646 static inline void kvm_guest_exit(void)
648 account_system_vtime(current);
649 current->flags &= ~PF_VCPU;
652 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
654 return gfn_to_memslot(kvm, gfn)->id;
657 static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
660 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
663 static inline gpa_t gfn_to_gpa(gfn_t gfn)
665 return (gpa_t)gfn << PAGE_SHIFT;
668 static inline gfn_t gpa_to_gfn(gpa_t gpa)
670 return (gfn_t)(gpa >> PAGE_SHIFT);
673 static inline hpa_t pfn_to_hpa(pfn_t pfn)
675 return (hpa_t)pfn << PAGE_SHIFT;
678 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
680 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
688 struct kvm_stats_debugfs_item {
691 enum kvm_stat_kind kind;
692 struct dentry *dentry;
694 extern struct kvm_stats_debugfs_item debugfs_entries[];
695 extern struct dentry *kvm_debugfs_dir;
697 #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
698 static inline int mmu_notifier_retry(struct kvm_vcpu *vcpu, unsigned long mmu_seq)
700 if (unlikely(vcpu->kvm->mmu_notifier_count))
703 * Both reads happen under the mmu_lock and both values are
704 * modified under mmu_lock, so there's no need of smb_rmb()
705 * here in between, otherwise mmu_notifier_count should be
706 * read before mmu_notifier_seq, see
707 * mmu_notifier_invalidate_range_end write side.
709 if (vcpu->kvm->mmu_notifier_seq != mmu_seq)
715 #ifdef CONFIG_HAVE_KVM_IRQCHIP
717 #define KVM_MAX_IRQ_ROUTES 1024
719 int kvm_setup_default_irq_routing(struct kvm *kvm);
720 int kvm_set_irq_routing(struct kvm *kvm,
721 const struct kvm_irq_routing_entry *entries,
724 void kvm_free_irq_routing(struct kvm *kvm);
728 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
732 #ifdef CONFIG_HAVE_KVM_EVENTFD
734 void kvm_eventfd_init(struct kvm *kvm);
735 int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags);
736 void kvm_irqfd_release(struct kvm *kvm);
737 void kvm_irq_routing_update(struct kvm *, struct kvm_irq_routing_table *);
738 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
742 static inline void kvm_eventfd_init(struct kvm *kvm) {}
744 static inline int kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
749 static inline void kvm_irqfd_release(struct kvm *kvm) {}
751 #ifdef CONFIG_HAVE_KVM_IRQCHIP
752 static inline void kvm_irq_routing_update(struct kvm *kvm,
753 struct kvm_irq_routing_table *irq_rt)
755 rcu_assign_pointer(kvm->irq_routing, irq_rt);
759 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
764 #endif /* CONFIG_HAVE_KVM_EVENTFD */
766 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
767 static inline bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
769 return vcpu->kvm->bsp_vcpu_id == vcpu->vcpu_id;
773 #ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
775 long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
780 static inline long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
788 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
790 set_bit(req, &vcpu->requests);
793 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
795 if (test_bit(req, &vcpu->requests)) {
796 clear_bit(req, &vcpu->requests);