5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
17 #include <linux/preempt.h>
18 #include <asm/signal.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_para.h>
25 #define KVM_MAX_VCPUS 4
26 #define KVM_ALIAS_SLOTS 4
27 #define KVM_MEMORY_SLOTS 8
28 /* memory slots that does not exposed to userspace */
29 #define KVM_PRIVATE_MEM_SLOTS 4
30 #define KVM_PERMILLE_MMU_PAGES 20
31 #define KVM_MIN_ALLOC_MMU_PAGES 64
32 #define KVM_NUM_MMU_PAGES 1024
33 #define KVM_MIN_FREE_MMU_PAGES 5
34 #define KVM_REFILL_PAGES 25
35 #define KVM_MAX_CPUID_ENTRIES 40
37 #define KVM_PIO_PAGE_OFFSET 1
40 * vcpu->requests bit members
42 #define KVM_REQ_TLB_FLUSH 0
44 #define NR_PTE_CHAIN_ENTRIES 5
46 struct kvm_pte_chain {
47 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES];
48 struct hlist_node link;
52 * kvm_mmu_page_role, below, is defined as:
54 * bits 0:3 - total guest paging levels (2-4, or zero for real mode)
55 * bits 4:7 - page table level for this shadow (1-4)
56 * bits 8:9 - page table quadrant for 2-level guests
57 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode)
58 * bits 17:19 - common access permissions for all ptes in this shadow page
60 union kvm_mmu_page_role {
65 unsigned quadrant : 2;
66 unsigned pad_for_nice_hex_output : 6;
67 unsigned metaphysical : 1;
73 struct list_head link;
74 struct hlist_node hash_link;
77 * The following two entries are used to key the shadow page in the
81 union kvm_mmu_page_role role;
84 /* hold the gfn of each spte inside spt */
86 unsigned long slot_bitmap; /* One bit set per slot which has memory
87 * in this shadow page.
89 int multimapped; /* More than one parent_pte? */
90 int root_count; /* Currently serving as active root */
92 u64 *parent_pte; /* !multimapped */
93 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */
98 extern struct kmem_cache *kvm_vcpu_cache;
101 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
102 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
106 void (*new_cr3)(struct kvm_vcpu *vcpu);
107 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
108 void (*free)(struct kvm_vcpu *vcpu);
109 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva);
110 void (*prefetch_page)(struct kvm_vcpu *vcpu,
111 struct kvm_mmu_page *page);
114 int shadow_root_level;
119 #define KVM_NR_MEM_OBJS 40
122 * We don't want allocation failures within the mmu code, so we preallocate
123 * enough memory for a single page fault in a cache.
125 struct kvm_mmu_memory_cache {
127 void *objects[KVM_NR_MEM_OBJS];
130 struct kvm_guest_debug {
136 struct kvm_pio_request {
139 struct page *guest_pages[2];
140 unsigned guest_page_offset;
149 struct kvm_vcpu_stat {
159 u32 irq_window_exits;
162 u32 request_irq_exits;
164 u32 host_state_reload;
168 u32 insn_emulation_fail;
172 * It would be nice to use something smarter than a linear search, TBD...
173 * Thankfully we dont expect many devices to register (famous last words :),
174 * so until then it will suffice. At least its abstracted so we can change
179 #define NR_IOBUS_DEVS 6
180 struct kvm_io_device *devs[NR_IOBUS_DEVS];
183 void kvm_io_bus_init(struct kvm_io_bus *bus);
184 void kvm_io_bus_destroy(struct kvm_io_bus *bus);
185 struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr);
186 void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
187 struct kvm_io_device *dev);
189 #ifdef CONFIG_HAS_IOMEM
190 #define KVM_VCPU_MMIO \
192 int mmio_read_completed; \
195 unsigned char mmio_data[8]; \
196 gpa_t mmio_phys_addr;
199 #define KVM_VCPU_MMIO
203 #define KVM_VCPU_COMM \
205 struct preempt_notifier preempt_notifier; \
207 struct mutex mutex; \
209 struct kvm_run *run; \
211 unsigned long requests; \
212 struct kvm_guest_debug guest_debug; \
214 int guest_fpu_loaded; \
215 wait_queue_head_t wq; \
218 struct kvm_vcpu_stat stat; \
221 struct kvm_mem_alias {
223 unsigned long npages;
227 struct kvm_memory_slot {
229 unsigned long npages;
232 unsigned long *dirty_bitmap;
233 unsigned long userspace_addr;
238 u32 mmu_shadow_zapped;
244 u32 remote_tlb_flush;
248 struct mutex lock; /* protects everything except vcpus */
249 struct mm_struct *mm; /* userspace tied to this vm */
251 struct kvm_mem_alias aliases[KVM_ALIAS_SLOTS];
253 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
254 KVM_PRIVATE_MEM_SLOTS];
256 * Hash table of struct kvm_mmu_page.
258 struct list_head active_mmu_pages;
259 unsigned int n_free_mmu_pages;
260 unsigned int n_requested_mmu_pages;
261 unsigned int n_alloc_mmu_pages;
262 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
263 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
264 struct list_head vm_list;
266 struct kvm_io_bus mmio_bus;
267 struct kvm_io_bus pio_bus;
268 struct kvm_pic *vpic;
269 struct kvm_ioapic *vioapic;
270 int round_robin_prev_vcpu;
271 unsigned int tss_addr;
272 struct page *apic_access_page;
273 struct kvm_vm_stat stat;
276 static inline struct kvm_pic *pic_irqchip(struct kvm *kvm)
281 static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
286 static inline int irqchip_in_kernel(struct kvm *kvm)
288 return pic_irqchip(kvm) != NULL;
291 /* The guest did something we don't support. */
292 #define pr_unimpl(vcpu, fmt, ...) \
294 if (printk_ratelimit()) \
295 printk(KERN_ERR "kvm: %i: cpu%i " fmt, \
296 current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \
299 #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt)
300 #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt)
302 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
303 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
305 void vcpu_load(struct kvm_vcpu *vcpu);
306 void vcpu_put(struct kvm_vcpu *vcpu);
308 void decache_vcpus_on_cpu(int cpu);
311 int kvm_init(void *opaque, unsigned int vcpu_size,
312 struct module *module);
315 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
316 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
317 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
318 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
320 extern struct page *bad_page;
322 int is_error_page(struct page *page);
323 int kvm_is_error_hva(unsigned long addr);
324 int kvm_set_memory_region(struct kvm *kvm,
325 struct kvm_userspace_memory_region *mem,
327 int __kvm_set_memory_region(struct kvm *kvm,
328 struct kvm_userspace_memory_region *mem,
330 int kvm_arch_set_memory_region(struct kvm *kvm,
331 struct kvm_userspace_memory_region *mem,
332 struct kvm_memory_slot old,
334 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
335 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
336 void kvm_release_page_clean(struct page *page);
337 void kvm_release_page_dirty(struct page *page);
338 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
340 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
341 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
342 int offset, int len);
343 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
345 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
346 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
347 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
348 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
349 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
351 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
352 void kvm_resched(struct kvm_vcpu *vcpu);
353 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
354 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
355 void kvm_flush_remote_tlbs(struct kvm *kvm);
357 long kvm_arch_dev_ioctl(struct file *filp,
358 unsigned int ioctl, unsigned long arg);
359 long kvm_arch_vcpu_ioctl(struct file *filp,
360 unsigned int ioctl, unsigned long arg);
361 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
362 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
364 int kvm_dev_ioctl_check_extension(long ext);
366 int kvm_get_dirty_log(struct kvm *kvm,
367 struct kvm_dirty_log *log, int *is_dirty);
368 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
369 struct kvm_dirty_log *log);
371 int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
373 kvm_userspace_memory_region *mem,
375 long kvm_arch_vm_ioctl(struct file *filp,
376 unsigned int ioctl, unsigned long arg);
377 void kvm_arch_destroy_vm(struct kvm *kvm);
379 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
380 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
382 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
383 struct kvm_translation *tr);
385 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
386 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
387 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
388 struct kvm_sregs *sregs);
389 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
390 struct kvm_sregs *sregs);
391 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
392 struct kvm_debug_guest *dbg);
393 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
395 int kvm_arch_init(void *opaque);
396 void kvm_arch_exit(void);
398 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
399 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
401 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
402 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
403 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
404 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
405 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
406 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
408 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu);
409 void kvm_arch_hardware_enable(void *garbage);
410 void kvm_arch_hardware_disable(void *garbage);
411 int kvm_arch_hardware_setup(void);
412 void kvm_arch_hardware_unsetup(void);
413 void kvm_arch_check_processor_compat(void *rtn);
415 void kvm_free_physmem(struct kvm *kvm);
417 struct kvm *kvm_arch_create_vm(void);
418 void kvm_arch_destroy_vm(struct kvm *kvm);
420 static inline void kvm_guest_enter(void)
422 account_system_vtime(current);
423 current->flags |= PF_VCPU;
426 static inline void kvm_guest_exit(void)
428 account_system_vtime(current);
429 current->flags &= ~PF_VCPU;
432 static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot)
434 return slot - kvm->memslots;
437 static inline gpa_t gfn_to_gpa(gfn_t gfn)
439 return (gpa_t)gfn << PAGE_SHIFT;
447 struct kvm_stats_debugfs_item {
450 enum kvm_stat_kind kind;
451 struct dentry *dentry;
453 extern struct kvm_stats_debugfs_item debugfs_entries[];
455 #if defined(CONFIG_X86)