2 * kvm_ia64.c: Basic KVM suppport On Itanium series processors
5 * Copyright (C) 2007, Intel Corporation.
6 * Xiantao Zhang (xiantao.zhang@intel.com)
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 #include <linux/module.h>
24 #include <linux/errno.h>
25 #include <linux/percpu.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/kvm_host.h>
30 #include <linux/kvm.h>
31 #include <linux/bitops.h>
32 #include <linux/hrtimer.h>
33 #include <linux/uaccess.h>
34 #include <linux/iommu.h>
35 #include <linux/intel-iommu.h>
36 #include <linux/pci.h>
38 #include <asm/pgtable.h>
39 #include <asm/gcc_intrin.h>
41 #include <asm/cacheflush.h>
42 #include <asm/div64.h>
45 #include <asm/sn/addrs.h>
46 #include <asm/sn/clksupport.h>
47 #include <asm/sn/shub_mmr.h>
56 static unsigned long kvm_vmm_base;
57 static unsigned long kvm_vsa_base;
58 static unsigned long kvm_vm_buffer;
59 static unsigned long kvm_vm_buffer_size;
60 unsigned long kvm_vmm_gp;
62 static long vp_env_info;
64 static struct kvm_vmm_info *kvm_vmm_info;
66 static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
68 struct kvm_stats_debugfs_item debugfs_entries[] = {
72 static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
74 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
75 if (vcpu->kvm->arch.is_sn2)
79 return ia64_getreg(_IA64_REG_AR_ITC);
82 static void kvm_flush_icache(unsigned long start, unsigned long len)
86 for (l = 0; l < (len + 32); l += 32)
87 ia64_fc((void *)(start + l));
93 static void kvm_flush_tlb_all(void)
95 unsigned long i, j, count0, count1, stride0, stride1, addr;
98 addr = local_cpu_data->ptce_base;
99 count0 = local_cpu_data->ptce_count[0];
100 count1 = local_cpu_data->ptce_count[1];
101 stride0 = local_cpu_data->ptce_stride[0];
102 stride1 = local_cpu_data->ptce_stride[1];
104 local_irq_save(flags);
105 for (i = 0; i < count0; ++i) {
106 for (j = 0; j < count1; ++j) {
112 local_irq_restore(flags);
113 ia64_srlz_i(); /* srlz.i implies srlz.d */
116 long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
118 struct ia64_pal_retval iprv;
120 PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
126 static DEFINE_SPINLOCK(vp_lock);
128 int kvm_arch_hardware_enable(void *garbage)
133 unsigned long saved_psr;
136 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
137 local_irq_save(saved_psr);
138 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
139 local_irq_restore(saved_psr);
144 status = ia64_pal_vp_init_env(kvm_vsa_base ?
145 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
146 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
148 spin_unlock(&vp_lock);
149 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
154 kvm_vsa_base = tmp_base;
155 printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
157 spin_unlock(&vp_lock);
158 ia64_ptr_entry(0x3, slot);
163 void kvm_arch_hardware_disable(void *garbage)
169 unsigned long saved_psr;
170 unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
172 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
175 local_irq_save(saved_psr);
176 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
177 local_irq_restore(saved_psr);
181 status = ia64_pal_vp_exit_env(host_iva);
183 printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
185 ia64_ptr_entry(0x3, slot);
188 void kvm_arch_check_processor_compat(void *rtn)
193 int kvm_dev_ioctl_check_extension(long ext)
199 case KVM_CAP_IRQCHIP:
200 case KVM_CAP_MP_STATE:
201 case KVM_CAP_IRQ_INJECT_STATUS:
204 case KVM_CAP_COALESCED_MMIO:
205 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
208 r = iommu_present(&pci_bus_type);
217 static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
219 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
220 kvm_run->hw.hardware_exit_reason = 1;
224 static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
226 struct kvm_mmio_req *p;
227 struct kvm_io_device *mmio_dev;
230 p = kvm_get_vcpu_ioreq(vcpu);
232 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
234 vcpu->mmio_needed = 1;
235 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
236 vcpu->mmio_size = kvm_run->mmio.len = p->size;
237 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
239 if (vcpu->mmio_is_write)
240 memcpy(vcpu->mmio_data, &p->data, p->size);
241 memcpy(kvm_run->mmio.data, &p->data, p->size);
242 kvm_run->exit_reason = KVM_EXIT_MMIO;
246 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
249 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
252 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
253 p->state = STATE_IORESP_READY;
258 static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
260 struct exit_ctl_data *p;
262 p = kvm_get_exit_data(vcpu);
264 if (p->exit_reason == EXIT_REASON_PAL_CALL)
265 return kvm_pal_emul(vcpu, kvm_run);
267 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
268 kvm_run->hw.hardware_exit_reason = 2;
273 static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
275 struct exit_ctl_data *p;
277 p = kvm_get_exit_data(vcpu);
279 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
283 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
284 kvm_run->hw.hardware_exit_reason = 3;
290 static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
292 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
294 if (!test_and_set_bit(vector, &vpd->irr[0])) {
295 vcpu->arch.irq_new_pending = 1;
303 * offset: address offset to IPI space.
304 * value: deliver value.
306 static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
321 printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
324 __apic_accept_irq(vcpu, vector);
327 static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
332 struct kvm_vcpu *vcpu;
334 kvm_for_each_vcpu(i, vcpu, kvm) {
335 lid.val = VCPU_LID(vcpu);
336 if (lid.id == id && lid.eid == eid)
343 static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
345 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
346 struct kvm_vcpu *target_vcpu;
347 struct kvm_pt_regs *regs;
348 union ia64_ipi_a addr = p->u.ipi_data.addr;
349 union ia64_ipi_d data = p->u.ipi_data.data;
351 target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
353 return handle_vm_error(vcpu, kvm_run);
355 if (!target_vcpu->arch.launched) {
356 regs = vcpu_regs(target_vcpu);
358 regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
359 regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
361 target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
362 if (waitqueue_active(&target_vcpu->wq))
363 wake_up_interruptible(&target_vcpu->wq);
365 vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
366 if (target_vcpu != vcpu)
367 kvm_vcpu_kick(target_vcpu);
374 struct kvm_ptc_g ptc_g_data;
375 struct kvm_vcpu *vcpu;
378 static void vcpu_global_purge(void *info)
380 struct call_data *p = (struct call_data *)info;
381 struct kvm_vcpu *vcpu = p->vcpu;
383 if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
386 set_bit(KVM_REQ_PTC_G, &vcpu->requests);
387 if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
388 vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
391 clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
392 vcpu->arch.ptc_g_count = 0;
393 set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
397 static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
399 struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
400 struct kvm *kvm = vcpu->kvm;
401 struct call_data call_data;
403 struct kvm_vcpu *vcpui;
405 call_data.ptc_g_data = p->u.ptc_g_data;
407 kvm_for_each_vcpu(i, vcpui, kvm) {
408 if (vcpui->arch.mp_state == KVM_MP_STATE_UNINITIALIZED ||
412 if (waitqueue_active(&vcpui->wq))
413 wake_up_interruptible(&vcpui->wq);
415 if (vcpui->cpu != -1) {
416 call_data.vcpu = vcpui;
417 smp_call_function_single(vcpui->cpu,
418 vcpu_global_purge, &call_data, 1);
420 printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
426 static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
431 static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
433 unsigned long pte, rtc_phys_addr, map_addr;
436 map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
437 rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
438 pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
439 slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
440 vcpu->arch.sn_rtc_tr_slot = slot;
442 printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
448 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
453 unsigned long vcpu_now_itc;
454 unsigned long expires;
455 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
456 unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
457 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
459 if (irqchip_in_kernel(vcpu->kvm)) {
461 vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
463 if (time_after(vcpu_now_itc, vpd->itm)) {
464 vcpu->arch.timer_check = 1;
467 itc_diff = vpd->itm - vcpu_now_itc;
469 itc_diff = -itc_diff;
471 expires = div64_u64(itc_diff, cyc_per_usec);
472 kt = ktime_set(0, 1000 * expires);
474 vcpu->arch.ht_active = 1;
475 hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
477 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
478 kvm_vcpu_block(vcpu);
479 hrtimer_cancel(p_ht);
480 vcpu->arch.ht_active = 0;
482 if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
483 kvm_cpu_has_pending_timer(vcpu))
484 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
485 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
487 if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
491 printk(KERN_ERR"kvm: Unsupported userspace halt!");
496 static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
497 struct kvm_run *kvm_run)
499 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
503 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
504 struct kvm_run *kvm_run)
509 static int handle_vcpu_debug(struct kvm_vcpu *vcpu,
510 struct kvm_run *kvm_run)
512 printk("VMM: %s", vcpu->arch.log_buf);
516 static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
517 struct kvm_run *kvm_run) = {
518 [EXIT_REASON_VM_PANIC] = handle_vm_error,
519 [EXIT_REASON_MMIO_INSTRUCTION] = handle_mmio,
520 [EXIT_REASON_PAL_CALL] = handle_pal_call,
521 [EXIT_REASON_SAL_CALL] = handle_sal_call,
522 [EXIT_REASON_SWITCH_RR6] = handle_switch_rr6,
523 [EXIT_REASON_VM_DESTROY] = handle_vm_shutdown,
524 [EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
525 [EXIT_REASON_IPI] = handle_ipi,
526 [EXIT_REASON_PTC_G] = handle_global_purge,
527 [EXIT_REASON_DEBUG] = handle_vcpu_debug,
531 static const int kvm_vti_max_exit_handlers =
532 sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
534 static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
536 struct exit_ctl_data *p_exit_data;
538 p_exit_data = kvm_get_exit_data(vcpu);
539 return p_exit_data->exit_reason;
543 * The guest has exited. See if we can fix it or if we need userspace
546 static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
548 u32 exit_reason = kvm_get_exit_reason(vcpu);
549 vcpu->arch.last_exit = exit_reason;
551 if (exit_reason < kvm_vti_max_exit_handlers
552 && kvm_vti_exit_handlers[exit_reason])
553 return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
555 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
556 kvm_run->hw.hardware_exit_reason = exit_reason;
561 static inline void vti_set_rr6(unsigned long rr6)
563 ia64_set_rr(RR6, rr6);
567 static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
570 struct kvm *kvm = vcpu->kvm;
573 /*Insert a pair of tr to map vmm*/
574 pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
575 r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
578 vcpu->arch.vmm_tr_slot = r;
579 /*Insert a pairt of tr to map data of vm*/
580 pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
581 r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
582 pte, KVM_VM_DATA_SHIFT);
585 vcpu->arch.vm_tr_slot = r;
587 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
588 if (kvm->arch.is_sn2) {
589 r = kvm_sn2_setup_mappings(vcpu);
600 static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
602 struct kvm *kvm = vcpu->kvm;
603 ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
604 ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
605 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
606 if (kvm->arch.is_sn2)
607 ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
611 static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
615 int cpu = smp_processor_id();
617 if (vcpu->arch.last_run_cpu != cpu ||
618 per_cpu(last_vcpu, cpu) != vcpu) {
619 per_cpu(last_vcpu, cpu) = vcpu;
620 vcpu->arch.last_run_cpu = cpu;
624 vcpu->arch.host_rr6 = ia64_get_rr(RR6);
625 vti_set_rr6(vcpu->arch.vmm_rr);
627 r = kvm_insert_vmm_mapping(vcpu);
628 local_irq_restore(psr);
632 static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
634 kvm_purge_vmm_mapping(vcpu);
635 vti_set_rr6(vcpu->arch.host_rr6);
638 static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
640 union context *host_ctx, *guest_ctx;
643 idx = srcu_read_lock(&vcpu->kvm->srcu);
646 if (signal_pending(current)) {
648 kvm_run->exit_reason = KVM_EXIT_INTR;
655 /*Get host and guest context with guest address space.*/
656 host_ctx = kvm_get_host_context(vcpu);
657 guest_ctx = kvm_get_guest_context(vcpu);
659 clear_bit(KVM_REQ_KICK, &vcpu->requests);
661 r = kvm_vcpu_pre_transition(vcpu);
665 srcu_read_unlock(&vcpu->kvm->srcu, idx);
666 vcpu->mode = IN_GUEST_MODE;
670 * Transition to the guest
672 kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
674 kvm_vcpu_post_transition(vcpu);
676 vcpu->arch.launched = 1;
677 set_bit(KVM_REQ_KICK, &vcpu->requests);
681 * We must have an instruction between local_irq_enable() and
682 * kvm_guest_exit(), so the timer interrupt isn't delayed by
683 * the interrupt shadow. The stat.exits increment will do nicely.
684 * But we need to prevent reordering, hence this barrier():
688 vcpu->mode = OUTSIDE_GUEST_MODE;
691 idx = srcu_read_lock(&vcpu->kvm->srcu);
693 r = kvm_handle_exit(kvm_run, vcpu);
701 srcu_read_unlock(&vcpu->kvm->srcu, idx);
704 idx = srcu_read_lock(&vcpu->kvm->srcu);
713 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
717 static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
719 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
721 if (!vcpu->mmio_is_write)
722 memcpy(&p->data, vcpu->mmio_data, 8);
723 p->state = STATE_IORESP_READY;
726 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
731 if (vcpu->sigset_active)
732 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
734 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
735 kvm_vcpu_block(vcpu);
736 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
741 if (vcpu->mmio_needed) {
742 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
743 kvm_set_mmio_data(vcpu);
744 vcpu->mmio_read_completed = 1;
745 vcpu->mmio_needed = 0;
747 r = __vcpu_run(vcpu, kvm_run);
749 if (vcpu->sigset_active)
750 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
755 struct kvm *kvm_arch_alloc_vm(void)
761 BUG_ON(sizeof(struct kvm) > KVM_VM_STRUCT_SIZE);
763 vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
768 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
769 kvm = (struct kvm *)(vm_base +
770 offsetof(struct kvm_vm_data, kvm_vm_struct));
771 kvm->arch.vm_base = vm_base;
772 printk(KERN_DEBUG"kvm: vm's data area:0x%lx\n", vm_base);
777 struct kvm_ia64_io_range {
783 static const struct kvm_ia64_io_range io_ranges[] = {
784 {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
785 {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
786 {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
787 {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
788 {PIB_START, PIB_SIZE, GPFN_PIB},
791 static void kvm_build_io_pmt(struct kvm *kvm)
795 /* Mark I/O ranges */
796 for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
798 for (j = io_ranges[i].start;
799 j < io_ranges[i].start + io_ranges[i].size;
801 kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
802 io_ranges[i].type, 0);
807 /*Use unused rids to virtualize guest rid.*/
808 #define GUEST_PHYSICAL_RR0 0x1739
809 #define GUEST_PHYSICAL_RR4 0x2739
810 #define VMM_INIT_RR 0x1660
812 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
819 kvm->arch.is_sn2 = ia64_platform_is("sn2");
821 kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
822 kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
823 kvm->arch.vmm_init_rr = VMM_INIT_RR;
826 *Fill P2M entries for MMIO/IO ranges
828 kvm_build_io_pmt(kvm);
830 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
832 /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
833 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
838 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
839 struct kvm_irqchip *chip)
844 switch (chip->chip_id) {
845 case KVM_IRQCHIP_IOAPIC:
846 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
855 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
860 switch (chip->chip_id) {
861 case KVM_IRQCHIP_IOAPIC:
862 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
871 #define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
873 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
875 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
878 for (i = 0; i < 16; i++) {
879 vpd->vgr[i] = regs->vpd.vgr[i];
880 vpd->vbgr[i] = regs->vpd.vbgr[i];
882 for (i = 0; i < 128; i++)
883 vpd->vcr[i] = regs->vpd.vcr[i];
884 vpd->vhpi = regs->vpd.vhpi;
885 vpd->vnat = regs->vpd.vnat;
886 vpd->vbnat = regs->vpd.vbnat;
887 vpd->vpsr = regs->vpd.vpsr;
889 vpd->vpr = regs->vpd.vpr;
891 memcpy(&vcpu->arch.guest, ®s->saved_guest, sizeof(union context));
893 RESTORE_REGS(mp_state);
894 RESTORE_REGS(vmm_rr);
895 memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
896 memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
897 RESTORE_REGS(itr_regions);
898 RESTORE_REGS(dtr_regions);
899 RESTORE_REGS(tc_regions);
900 RESTORE_REGS(irq_check);
901 RESTORE_REGS(itc_check);
902 RESTORE_REGS(timer_check);
903 RESTORE_REGS(timer_pending);
904 RESTORE_REGS(last_itc);
905 for (i = 0; i < 8; i++) {
906 vcpu->arch.vrr[i] = regs->vrr[i];
907 vcpu->arch.ibr[i] = regs->ibr[i];
908 vcpu->arch.dbr[i] = regs->dbr[i];
910 for (i = 0; i < 4; i++)
911 vcpu->arch.insvc[i] = regs->insvc[i];
913 RESTORE_REGS(metaphysical_rr0);
914 RESTORE_REGS(metaphysical_rr4);
915 RESTORE_REGS(metaphysical_saved_rr0);
916 RESTORE_REGS(metaphysical_saved_rr4);
917 RESTORE_REGS(fp_psr);
918 RESTORE_REGS(saved_gp);
920 vcpu->arch.irq_new_pending = 1;
921 vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
922 set_bit(KVM_REQ_RESUME, &vcpu->requests);
927 long kvm_arch_vm_ioctl(struct file *filp,
928 unsigned int ioctl, unsigned long arg)
930 struct kvm *kvm = filp->private_data;
931 void __user *argp = (void __user *)arg;
935 case KVM_SET_MEMORY_REGION: {
936 struct kvm_memory_region kvm_mem;
937 struct kvm_userspace_memory_region kvm_userspace_mem;
940 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
942 kvm_userspace_mem.slot = kvm_mem.slot;
943 kvm_userspace_mem.flags = kvm_mem.flags;
944 kvm_userspace_mem.guest_phys_addr =
945 kvm_mem.guest_phys_addr;
946 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
947 r = kvm_vm_ioctl_set_memory_region(kvm,
948 &kvm_userspace_mem, 0);
953 case KVM_CREATE_IRQCHIP:
955 r = kvm_ioapic_init(kvm);
958 r = kvm_setup_default_irq_routing(kvm);
960 mutex_lock(&kvm->slots_lock);
961 kvm_ioapic_destroy(kvm);
962 mutex_unlock(&kvm->slots_lock);
966 case KVM_IRQ_LINE_STATUS:
968 struct kvm_irq_level irq_event;
971 if (copy_from_user(&irq_event, argp, sizeof irq_event))
974 if (irqchip_in_kernel(kvm)) {
976 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
977 irq_event.irq, irq_event.level);
978 if (ioctl == KVM_IRQ_LINE_STATUS) {
980 irq_event.status = status;
981 if (copy_to_user(argp, &irq_event,
989 case KVM_GET_IRQCHIP: {
990 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
991 struct kvm_irqchip chip;
994 if (copy_from_user(&chip, argp, sizeof chip))
997 if (!irqchip_in_kernel(kvm))
999 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
1003 if (copy_to_user(argp, &chip, sizeof chip))
1008 case KVM_SET_IRQCHIP: {
1009 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
1010 struct kvm_irqchip chip;
1013 if (copy_from_user(&chip, argp, sizeof chip))
1016 if (!irqchip_in_kernel(kvm))
1018 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
1031 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1032 struct kvm_sregs *sregs)
1037 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1038 struct kvm_sregs *sregs)
1043 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1044 struct kvm_translation *tr)
1050 static int kvm_alloc_vmm_area(void)
1052 if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
1053 kvm_vmm_base = __get_free_pages(GFP_KERNEL,
1054 get_order(KVM_VMM_SIZE));
1058 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1059 kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
1061 printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
1062 kvm_vmm_base, kvm_vm_buffer);
1068 static void kvm_free_vmm_area(void)
1071 /*Zero this area before free to avoid bits leak!!*/
1072 memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
1073 free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
1080 static int vti_init_vpd(struct kvm_vcpu *vcpu)
1083 union cpuid3_t cpuid3;
1084 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1087 return PTR_ERR(vpd);
1090 for (i = 0; i < 5; i++)
1091 vpd->vcpuid[i] = ia64_get_cpuid(i);
1093 /* Limit the CPUID number to 5 */
1094 cpuid3.value = vpd->vcpuid[3];
1095 cpuid3.number = 4; /* 5 - 1 */
1096 vpd->vcpuid[3] = cpuid3.value;
1098 /*Set vac and vdc fields*/
1099 vpd->vac.a_from_int_cr = 1;
1100 vpd->vac.a_to_int_cr = 1;
1101 vpd->vac.a_from_psr = 1;
1102 vpd->vac.a_from_cpuid = 1;
1103 vpd->vac.a_cover = 1;
1106 vpd->vdc.d_vmsw = 1;
1108 /*Set virtual buffer*/
1109 vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
1114 static int vti_create_vp(struct kvm_vcpu *vcpu)
1117 struct vpd *vpd = vcpu->arch.vpd;
1118 unsigned long vmm_ivt;
1120 vmm_ivt = kvm_vmm_info->vmm_ivt;
1122 printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
1124 ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
1127 printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
1133 static void init_ptce_info(struct kvm_vcpu *vcpu)
1135 ia64_ptce_info_t ptce = {0};
1137 ia64_get_ptce(&ptce);
1138 vcpu->arch.ptce_base = ptce.base;
1139 vcpu->arch.ptce_count[0] = ptce.count[0];
1140 vcpu->arch.ptce_count[1] = ptce.count[1];
1141 vcpu->arch.ptce_stride[0] = ptce.stride[0];
1142 vcpu->arch.ptce_stride[1] = ptce.stride[1];
1145 static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
1147 struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
1149 if (hrtimer_cancel(p_ht))
1150 hrtimer_start_expires(p_ht, HRTIMER_MODE_ABS);
1153 static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
1155 struct kvm_vcpu *vcpu;
1156 wait_queue_head_t *q;
1158 vcpu = container_of(data, struct kvm_vcpu, arch.hlt_timer);
1161 if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
1164 if (waitqueue_active(q))
1165 wake_up_interruptible(q);
1168 vcpu->arch.timer_fired = 1;
1169 vcpu->arch.timer_check = 1;
1170 return HRTIMER_NORESTART;
1173 #define PALE_RESET_ENTRY 0x80000000ffffffb0UL
1175 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
1177 return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL);
1180 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1186 struct kvm *kvm = vcpu->kvm;
1187 struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1189 union context *p_ctx = &vcpu->arch.guest;
1190 struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
1192 /*Init vcpu context for first run.*/
1193 if (IS_ERR(vmm_vcpu))
1194 return PTR_ERR(vmm_vcpu);
1196 if (kvm_vcpu_is_bsp(vcpu)) {
1197 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
1199 /*Set entry address for first run.*/
1200 regs->cr_iip = PALE_RESET_ENTRY;
1202 /*Initialize itc offset for vcpus*/
1203 itc_offset = 0UL - kvm_get_itc(vcpu);
1204 for (i = 0; i < KVM_MAX_VCPUS; i++) {
1205 v = (struct kvm_vcpu *)((char *)vcpu +
1206 sizeof(struct kvm_vcpu_data) * i);
1207 v->arch.itc_offset = itc_offset;
1208 v->arch.last_itc = 0;
1211 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
1214 vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
1215 if (!vcpu->arch.apic)
1217 vcpu->arch.apic->vcpu = vcpu;
1220 p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + KVM_STK_OFFSET);
1221 p_ctx->gr[13] = (unsigned long)vmm_vcpu;
1222 p_ctx->psr = 0x1008522000UL;
1223 p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
1224 p_ctx->caller_unat = 0;
1226 p_ctx->ar[36] = 0x0; /*unat*/
1227 p_ctx->ar[19] = 0x0; /*rnat*/
1228 p_ctx->ar[18] = (unsigned long)vmm_vcpu +
1229 ((sizeof(struct kvm_vcpu)+15) & ~15);
1230 p_ctx->ar[64] = 0x0; /*pfs*/
1231 p_ctx->cr[0] = 0x7e04UL;
1232 p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
1233 p_ctx->cr[8] = 0x3c;
1235 /*Initialize region register*/
1236 p_ctx->rr[0] = 0x30;
1237 p_ctx->rr[1] = 0x30;
1238 p_ctx->rr[2] = 0x30;
1239 p_ctx->rr[3] = 0x30;
1240 p_ctx->rr[4] = 0x30;
1241 p_ctx->rr[5] = 0x30;
1242 p_ctx->rr[7] = 0x30;
1244 /*Initialize branch register 0*/
1245 p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
1247 vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
1248 vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
1249 vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
1251 hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1252 vcpu->arch.hlt_timer.function = hlt_timer_fn;
1254 vcpu->arch.last_run_cpu = -1;
1255 vcpu->arch.vpd = (struct vpd *)VPD_BASE(vcpu->vcpu_id);
1256 vcpu->arch.vsa_base = kvm_vsa_base;
1257 vcpu->arch.__gp = kvm_vmm_gp;
1258 vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
1259 vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_BASE(vcpu->vcpu_id);
1260 vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_BASE(vcpu->vcpu_id);
1261 init_ptce_info(vcpu);
1268 static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
1273 local_irq_save(psr);
1274 r = kvm_insert_vmm_mapping(vcpu);
1275 local_irq_restore(psr);
1278 r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
1282 r = vti_init_vpd(vcpu);
1284 printk(KERN_DEBUG"kvm: vpd init error!!\n");
1288 r = vti_create_vp(vcpu);
1292 kvm_purge_vmm_mapping(vcpu);
1296 kvm_vcpu_uninit(vcpu);
1301 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1304 struct kvm_vcpu *vcpu;
1305 unsigned long vm_base = kvm->arch.vm_base;
1309 BUG_ON(sizeof(struct kvm_vcpu) > VCPU_STRUCT_SIZE/2);
1312 if (id >= KVM_MAX_VCPUS) {
1313 printk(KERN_ERR"kvm: Can't configure vcpus > %ld",
1320 printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
1323 vcpu = (struct kvm_vcpu *)(vm_base + offsetof(struct kvm_vm_data,
1324 vcpu_data[id].vcpu_struct));
1328 r = vti_vcpu_setup(vcpu, id);
1332 printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
1341 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1346 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1351 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1356 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1357 struct kvm_guest_debug *dbg)
1362 void kvm_arch_free_vm(struct kvm *kvm)
1364 unsigned long vm_base = kvm->arch.vm_base;
1367 memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
1368 free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
1373 static void kvm_release_vm_pages(struct kvm *kvm)
1375 struct kvm_memslots *slots;
1376 struct kvm_memory_slot *memslot;
1378 unsigned long base_gfn;
1380 slots = kvm_memslots(kvm);
1381 kvm_for_each_memslot(memslot, slots) {
1382 base_gfn = memslot->base_gfn;
1383 for (j = 0; j < memslot->npages; j++) {
1384 if (memslot->rmap[j])
1385 put_page((struct page *)memslot->rmap[j]);
1390 void kvm_arch_sync_events(struct kvm *kvm)
1394 void kvm_arch_destroy_vm(struct kvm *kvm)
1396 kvm_iommu_unmap_guest(kvm);
1397 #ifdef KVM_CAP_DEVICE_ASSIGNMENT
1398 kvm_free_all_assigned_devices(kvm);
1400 kfree(kvm->arch.vioapic);
1401 kvm_release_vm_pages(kvm);
1404 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1408 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1410 if (cpu != vcpu->cpu) {
1412 if (vcpu->arch.ht_active)
1413 kvm_migrate_hlt_timer(vcpu);
1417 #define SAVE_REGS(_x) regs->_x = vcpu->arch._x
1419 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1421 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1426 for (i = 0; i < 16; i++) {
1427 regs->vpd.vgr[i] = vpd->vgr[i];
1428 regs->vpd.vbgr[i] = vpd->vbgr[i];
1430 for (i = 0; i < 128; i++)
1431 regs->vpd.vcr[i] = vpd->vcr[i];
1432 regs->vpd.vhpi = vpd->vhpi;
1433 regs->vpd.vnat = vpd->vnat;
1434 regs->vpd.vbnat = vpd->vbnat;
1435 regs->vpd.vpsr = vpd->vpsr;
1436 regs->vpd.vpr = vpd->vpr;
1438 memcpy(®s->saved_guest, &vcpu->arch.guest, sizeof(union context));
1440 SAVE_REGS(mp_state);
1442 memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
1443 memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
1444 SAVE_REGS(itr_regions);
1445 SAVE_REGS(dtr_regions);
1446 SAVE_REGS(tc_regions);
1447 SAVE_REGS(irq_check);
1448 SAVE_REGS(itc_check);
1449 SAVE_REGS(timer_check);
1450 SAVE_REGS(timer_pending);
1451 SAVE_REGS(last_itc);
1452 for (i = 0; i < 8; i++) {
1453 regs->vrr[i] = vcpu->arch.vrr[i];
1454 regs->ibr[i] = vcpu->arch.ibr[i];
1455 regs->dbr[i] = vcpu->arch.dbr[i];
1457 for (i = 0; i < 4; i++)
1458 regs->insvc[i] = vcpu->arch.insvc[i];
1459 regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
1461 SAVE_REGS(metaphysical_rr0);
1462 SAVE_REGS(metaphysical_rr4);
1463 SAVE_REGS(metaphysical_saved_rr0);
1464 SAVE_REGS(metaphysical_saved_rr4);
1466 SAVE_REGS(saved_gp);
1472 int kvm_arch_vcpu_ioctl_get_stack(struct kvm_vcpu *vcpu,
1473 struct kvm_ia64_vcpu_stack *stack)
1475 memcpy(stack, vcpu, sizeof(struct kvm_ia64_vcpu_stack));
1479 int kvm_arch_vcpu_ioctl_set_stack(struct kvm_vcpu *vcpu,
1480 struct kvm_ia64_vcpu_stack *stack)
1482 memcpy(vcpu + 1, &stack->stack[0] + sizeof(struct kvm_vcpu),
1483 sizeof(struct kvm_ia64_vcpu_stack) - sizeof(struct kvm_vcpu));
1485 vcpu->arch.exit_data = ((struct kvm_vcpu *)stack)->arch.exit_data;
1489 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
1492 hrtimer_cancel(&vcpu->arch.hlt_timer);
1493 kfree(vcpu->arch.apic);
1497 long kvm_arch_vcpu_ioctl(struct file *filp,
1498 unsigned int ioctl, unsigned long arg)
1500 struct kvm_vcpu *vcpu = filp->private_data;
1501 void __user *argp = (void __user *)arg;
1502 struct kvm_ia64_vcpu_stack *stack = NULL;
1506 case KVM_IA64_VCPU_GET_STACK: {
1507 struct kvm_ia64_vcpu_stack __user *user_stack;
1508 void __user *first_p = argp;
1511 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1514 if (!access_ok(VERIFY_WRITE, user_stack,
1515 sizeof(struct kvm_ia64_vcpu_stack))) {
1516 printk(KERN_INFO "KVM_IA64_VCPU_GET_STACK: "
1517 "Illegal user destination address for stack\n");
1520 stack = kzalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1526 r = kvm_arch_vcpu_ioctl_get_stack(vcpu, stack);
1530 if (copy_to_user(user_stack, stack,
1531 sizeof(struct kvm_ia64_vcpu_stack))) {
1538 case KVM_IA64_VCPU_SET_STACK: {
1539 struct kvm_ia64_vcpu_stack __user *user_stack;
1540 void __user *first_p = argp;
1543 if (copy_from_user(&user_stack, first_p, sizeof(void *)))
1546 if (!access_ok(VERIFY_READ, user_stack,
1547 sizeof(struct kvm_ia64_vcpu_stack))) {
1548 printk(KERN_INFO "KVM_IA64_VCPU_SET_STACK: "
1549 "Illegal user address for stack\n");
1552 stack = kmalloc(sizeof(struct kvm_ia64_vcpu_stack), GFP_KERNEL);
1557 if (copy_from_user(stack, user_stack,
1558 sizeof(struct kvm_ia64_vcpu_stack)))
1561 r = kvm_arch_vcpu_ioctl_set_stack(vcpu, stack);
1574 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1576 return VM_FAULT_SIGBUS;
1579 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1580 struct kvm_memory_slot *dont)
1584 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1589 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1590 struct kvm_memory_slot *memslot,
1591 struct kvm_memory_slot old,
1592 struct kvm_userspace_memory_region *mem,
1597 int npages = memslot->npages;
1598 unsigned long base_gfn = memslot->base_gfn;
1600 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
1603 for (i = 0; i < npages; i++) {
1604 pfn = gfn_to_pfn(kvm, base_gfn + i);
1605 if (!kvm_is_mmio_pfn(pfn)) {
1606 kvm_set_pmt_entry(kvm, base_gfn + i,
1608 _PAGE_AR_RWX | _PAGE_MA_WB);
1609 memslot->rmap[i] = (unsigned long)pfn_to_page(pfn);
1611 kvm_set_pmt_entry(kvm, base_gfn + i,
1612 GPFN_PHYS_MMIO | (pfn << PAGE_SHIFT),
1614 memslot->rmap[i] = 0;
1621 void kvm_arch_commit_memory_region(struct kvm *kvm,
1622 struct kvm_userspace_memory_region *mem,
1623 struct kvm_memory_slot old,
1629 void kvm_arch_flush_shadow(struct kvm *kvm)
1631 kvm_flush_remote_tlbs(kvm);
1634 long kvm_arch_dev_ioctl(struct file *filp,
1635 unsigned int ioctl, unsigned long arg)
1640 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1642 kvm_vcpu_uninit(vcpu);
1645 static int vti_cpu_has_kvm_support(void)
1647 long avail = 1, status = 1, control = 1;
1650 ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
1654 if (!(avail & PAL_PROC_VM_BIT))
1657 printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
1659 ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
1662 printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
1664 if (!(vp_env_info & VP_OPCODE)) {
1665 printk(KERN_WARNING"kvm: No opcode ability on hardware, "
1666 "vm_env_info:0x%lx\n", vp_env_info);
1676 * On SN2, the ITC isn't stable, so copy in fast path code to use the
1677 * SN2 RTC, replacing the ITC based default verion.
1679 static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
1680 struct module *module)
1682 unsigned long new_ar, new_ar_sn2;
1683 unsigned long module_base;
1685 if (!ia64_platform_is("sn2"))
1688 module_base = (unsigned long)module->module_core;
1690 new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
1691 new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
1693 printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
1697 * Copy the SN2 version of mov_ar into place. They are both
1698 * the same size, so 6 bundles is sufficient (6 * 0x10).
1700 memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
1703 static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
1704 struct module *module)
1706 unsigned long module_base;
1707 unsigned long vmm_size;
1709 unsigned long vmm_offset, func_offset, fdesc_offset;
1710 struct fdesc *p_fdesc;
1714 if (!kvm_vmm_base) {
1715 printk("kvm: kvm area hasn't been initialized yet!!\n");
1719 /*Calculate new position of relocated vmm module.*/
1720 module_base = (unsigned long)module->module_core;
1721 vmm_size = module->core_size;
1722 if (unlikely(vmm_size > KVM_VMM_SIZE))
1725 memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
1726 kvm_patch_vmm(vmm_info, module);
1727 kvm_flush_icache(kvm_vmm_base, vmm_size);
1729 /*Recalculate kvm_vmm_info based on new VMM*/
1730 vmm_offset = vmm_info->vmm_ivt - module_base;
1731 kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
1732 printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
1733 kvm_vmm_info->vmm_ivt);
1735 fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
1736 kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
1738 func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
1739 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1740 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1741 p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
1743 printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
1744 KVM_VMM_BASE+func_offset);
1746 fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
1747 kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
1749 func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
1750 p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
1751 p_fdesc->ip = KVM_VMM_BASE + func_offset;
1752 p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
1754 kvm_vmm_gp = p_fdesc->gp;
1756 printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
1757 kvm_vmm_info->vmm_entry);
1758 printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
1759 KVM_VMM_BASE + func_offset);
1764 int kvm_arch_init(void *opaque)
1767 struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
1769 if (!vti_cpu_has_kvm_support()) {
1770 printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
1776 printk(KERN_ERR "kvm: Already loaded VMM module!\n");
1782 kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
1786 if (kvm_alloc_vmm_area())
1789 r = kvm_relocate_vmm(vmm_info, vmm_info->module);
1796 kvm_free_vmm_area();
1798 kfree(kvm_vmm_info);
1803 void kvm_arch_exit(void)
1805 kvm_free_vmm_area();
1806 kfree(kvm_vmm_info);
1807 kvm_vmm_info = NULL;
1810 static void kvm_ia64_sync_dirty_log(struct kvm *kvm,
1811 struct kvm_memory_slot *memslot)
1816 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1817 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1819 n = kvm_dirty_bitmap_bytes(memslot);
1820 base = memslot->base_gfn / BITS_PER_LONG;
1822 spin_lock(&kvm->arch.dirty_log_lock);
1823 for (i = 0; i < n/sizeof(long); ++i) {
1824 memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
1825 dirty_bitmap[base + i] = 0;
1827 spin_unlock(&kvm->arch.dirty_log_lock);
1830 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1831 struct kvm_dirty_log *log)
1835 struct kvm_memory_slot *memslot;
1838 mutex_lock(&kvm->slots_lock);
1841 if (log->slot >= KVM_MEMORY_SLOTS)
1844 memslot = id_to_memslot(kvm->memslots, log->slot);
1846 if (!memslot->dirty_bitmap)
1849 kvm_ia64_sync_dirty_log(kvm, memslot);
1850 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1854 /* If nothing is dirty, don't bother messing with page tables. */
1856 kvm_flush_remote_tlbs(kvm);
1857 n = kvm_dirty_bitmap_bytes(memslot);
1858 memset(memslot->dirty_bitmap, 0, n);
1862 mutex_unlock(&kvm->slots_lock);
1866 int kvm_arch_hardware_setup(void)
1871 void kvm_arch_hardware_unsetup(void)
1875 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
1877 return __apic_accept_irq(vcpu, irq->vector);
1880 int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
1882 return apic->vcpu->vcpu_id == dest;
1885 int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
1890 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1892 return vcpu1->arch.xtp - vcpu2->arch.xtp;
1895 int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1896 int short_hand, int dest, int dest_mode)
1898 struct kvm_lapic *target = vcpu->arch.apic;
1899 return (dest_mode == 0) ?
1900 kvm_apic_match_physical_addr(target, dest) :
1901 kvm_apic_match_logical_addr(target, dest);
1904 static int find_highest_bits(int *dat)
1909 /* loop for all 256 bits */
1910 for (i = 7; i >= 0 ; i--) {
1914 return i * 32 + bitnum - 1;
1921 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
1923 struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
1925 if (vpd->irr[0] & (1UL << NMI_VECTOR))
1927 if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
1928 return ExtINT_VECTOR;
1930 return find_highest_bits((int *)&vpd->irr[0]);
1933 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1935 return vcpu->arch.timer_fired;
1938 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1940 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE) ||
1941 (kvm_highest_pending_irq(vcpu) != -1);
1944 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1946 return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
1949 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1950 struct kvm_mp_state *mp_state)
1952 mp_state->mp_state = vcpu->arch.mp_state;
1956 static int vcpu_reset(struct kvm_vcpu *vcpu)
1960 local_irq_save(psr);
1961 r = kvm_insert_vmm_mapping(vcpu);
1962 local_irq_restore(psr);
1966 vcpu->arch.launched = 0;
1967 kvm_arch_vcpu_uninit(vcpu);
1968 r = kvm_arch_vcpu_init(vcpu);
1972 kvm_purge_vmm_mapping(vcpu);
1978 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1979 struct kvm_mp_state *mp_state)
1983 vcpu->arch.mp_state = mp_state->mp_state;
1984 if (vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)
1985 r = vcpu_reset(vcpu);