2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 #include <linux/sched/signal.h>
21 #include <linux/memblock.h>
22 #include <linux/pgtable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/mmu_context.h>
28 #include <asm/pgalloc.h>
30 #include <linux/kvm_host.h>
32 #include "interrupt.h"
34 #define CREATE_TRACE_POINTS
38 #define VECTORSPACING 0x100 /* for EI/VI mode */
41 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
42 KVM_GENERIC_VM_STATS()
45 const struct kvm_stats_header kvm_vm_stats_header = {
46 .name_size = KVM_STATS_NAME_SIZE,
47 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
48 .id_offset = sizeof(struct kvm_stats_header),
49 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
50 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
51 sizeof(kvm_vm_stats_desc),
54 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
55 KVM_GENERIC_VCPU_STATS(),
56 STATS_DESC_COUNTER(VCPU, wait_exits),
57 STATS_DESC_COUNTER(VCPU, cache_exits),
58 STATS_DESC_COUNTER(VCPU, signal_exits),
59 STATS_DESC_COUNTER(VCPU, int_exits),
60 STATS_DESC_COUNTER(VCPU, cop_unusable_exits),
61 STATS_DESC_COUNTER(VCPU, tlbmod_exits),
62 STATS_DESC_COUNTER(VCPU, tlbmiss_ld_exits),
63 STATS_DESC_COUNTER(VCPU, tlbmiss_st_exits),
64 STATS_DESC_COUNTER(VCPU, addrerr_st_exits),
65 STATS_DESC_COUNTER(VCPU, addrerr_ld_exits),
66 STATS_DESC_COUNTER(VCPU, syscall_exits),
67 STATS_DESC_COUNTER(VCPU, resvd_inst_exits),
68 STATS_DESC_COUNTER(VCPU, break_inst_exits),
69 STATS_DESC_COUNTER(VCPU, trap_inst_exits),
70 STATS_DESC_COUNTER(VCPU, msa_fpe_exits),
71 STATS_DESC_COUNTER(VCPU, fpe_exits),
72 STATS_DESC_COUNTER(VCPU, msa_disabled_exits),
73 STATS_DESC_COUNTER(VCPU, flush_dcache_exits),
74 STATS_DESC_COUNTER(VCPU, vz_gpsi_exits),
75 STATS_DESC_COUNTER(VCPU, vz_gsfc_exits),
76 STATS_DESC_COUNTER(VCPU, vz_hc_exits),
77 STATS_DESC_COUNTER(VCPU, vz_grr_exits),
78 STATS_DESC_COUNTER(VCPU, vz_gva_exits),
79 STATS_DESC_COUNTER(VCPU, vz_ghfc_exits),
80 STATS_DESC_COUNTER(VCPU, vz_gpa_exits),
81 STATS_DESC_COUNTER(VCPU, vz_resvd_exits),
82 #ifdef CONFIG_CPU_LOONGSON64
83 STATS_DESC_COUNTER(VCPU, vz_cpucfg_exits),
87 const struct kvm_stats_header kvm_vcpu_stats_header = {
88 .name_size = KVM_STATS_NAME_SIZE,
89 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
90 .id_offset = sizeof(struct kvm_stats_header),
91 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
92 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
93 sizeof(kvm_vcpu_stats_desc),
96 bool kvm_trace_guest_mode_change;
98 int kvm_guest_mode_change_trace_reg(void)
100 kvm_trace_guest_mode_change = true;
104 void kvm_guest_mode_change_trace_unreg(void)
106 kvm_trace_guest_mode_change = false;
110 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
111 * Config7, so we are "runnable" if interrupts are pending
113 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
115 return !!(vcpu->arch.pending_exceptions);
118 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
123 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
128 int kvm_arch_hardware_enable(void)
130 return kvm_mips_callbacks->hardware_enable();
133 void kvm_arch_hardware_disable(void)
135 kvm_mips_callbacks->hardware_disable();
138 int kvm_arch_hardware_setup(void *opaque)
143 int kvm_arch_check_processor_compat(void *opaque)
148 extern void kvm_init_loongson_ipi(struct kvm *kvm);
150 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
153 case KVM_VM_MIPS_AUTO:
158 /* Unsupported KVM type */
162 /* Allocate page table to map GPA -> RPA */
163 kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
164 if (!kvm->arch.gpa_mm.pgd)
167 #ifdef CONFIG_CPU_LOONGSON64
168 kvm_init_loongson_ipi(kvm);
174 static void kvm_mips_free_gpa_pt(struct kvm *kvm)
176 /* It should always be safe to remove after flushing the whole range */
177 WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0));
178 pgd_free(NULL, kvm->arch.gpa_mm.pgd);
181 void kvm_arch_destroy_vm(struct kvm *kvm)
183 kvm_destroy_vcpus(kvm);
184 kvm_mips_free_gpa_pt(kvm);
187 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
193 void kvm_arch_flush_shadow_all(struct kvm *kvm)
195 /* Flush whole GPA */
196 kvm_mips_flush_gpa_pt(kvm, 0, ~0);
197 kvm_flush_remote_tlbs(kvm);
200 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
201 struct kvm_memory_slot *slot)
204 * The slot has been made invalid (ready for moving or deletion), so we
205 * need to ensure that it can no longer be accessed by any guest VCPUs.
208 spin_lock(&kvm->mmu_lock);
209 /* Flush slot from GPA */
210 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn,
211 slot->base_gfn + slot->npages - 1);
212 kvm_arch_flush_remote_tlbs_memslot(kvm, slot);
213 spin_unlock(&kvm->mmu_lock);
216 int kvm_arch_prepare_memory_region(struct kvm *kvm,
217 const struct kvm_memory_slot *old,
218 struct kvm_memory_slot *new,
219 enum kvm_mr_change change)
224 void kvm_arch_commit_memory_region(struct kvm *kvm,
225 struct kvm_memory_slot *old,
226 const struct kvm_memory_slot *new,
227 enum kvm_mr_change change)
232 * If dirty page logging is enabled, write protect all pages in the slot
233 * ready for dirty logging.
235 * There is no need to do this in any of the following cases:
236 * CREATE: No dirty mappings will already exist.
237 * MOVE/DELETE: The old mappings will already have been cleaned up by
238 * kvm_arch_flush_shadow_memslot()
240 if (change == KVM_MR_FLAGS_ONLY &&
241 (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) &&
242 new->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
243 spin_lock(&kvm->mmu_lock);
244 /* Write protect GPA page table entries */
245 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn,
246 new->base_gfn + new->npages - 1);
248 kvm_arch_flush_remote_tlbs_memslot(kvm, new);
249 spin_unlock(&kvm->mmu_lock);
253 static inline void dump_handler(const char *symbol, void *start, void *end)
257 pr_debug("LEAF(%s)\n", symbol);
259 pr_debug("\t.set push\n");
260 pr_debug("\t.set noreorder\n");
262 for (p = start; p < (u32 *)end; ++p)
263 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
265 pr_debug("\t.set\tpop\n");
267 pr_debug("\tEND(%s)\n", symbol);
270 /* low level hrtimer wake routine */
271 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
273 struct kvm_vcpu *vcpu;
275 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
277 kvm_mips_callbacks->queue_timer_int(vcpu);
280 rcuwait_wake_up(&vcpu->wait);
282 return kvm_mips_count_timeout(vcpu);
285 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
290 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
293 void *gebase, *p, *handler, *refill_start, *refill_end;
296 kvm_debug("kvm @ %p: create cpu %d at %p\n",
297 vcpu->kvm, vcpu->vcpu_id, vcpu);
299 err = kvm_mips_callbacks->vcpu_init(vcpu);
303 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
305 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
308 * Allocate space for host mode exception handlers that handle
311 if (cpu_has_veic || cpu_has_vint)
312 size = 0x200 + VECTORSPACING * 64;
316 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
320 goto out_uninit_vcpu;
322 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
323 ALIGN(size, PAGE_SIZE), gebase);
326 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
327 * limits us to the low 512MB of physical address space. If the memory
328 * we allocate is out of range, just give up now.
330 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
331 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
334 goto out_free_gebase;
338 vcpu->arch.guest_ebase = gebase;
340 /* Build guest exception vectors dynamically in unmapped memory */
341 handler = gebase + 0x2000;
343 /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */
344 refill_start = gebase;
345 if (IS_ENABLED(CONFIG_64BIT))
346 refill_start += 0x080;
347 refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler);
349 /* General Exception Entry point */
350 kvm_mips_build_exception(gebase + 0x180, handler);
352 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
353 for (i = 0; i < 8; i++) {
354 kvm_debug("L1 Vectored handler @ %p\n",
355 gebase + 0x200 + (i * VECTORSPACING));
356 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
360 /* General exit handler */
362 p = kvm_mips_build_exit(p);
364 /* Guest entry routine */
365 vcpu->arch.vcpu_run = p;
366 p = kvm_mips_build_vcpu_run(p);
368 /* Dump the generated code */
369 pr_debug("#include <asm/asm.h>\n");
370 pr_debug("#include <asm/regdef.h>\n");
372 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
373 dump_handler("kvm_tlb_refill", refill_start, refill_end);
374 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
375 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
377 /* Invalidate the icache for these ranges */
378 flush_icache_range((unsigned long)gebase,
379 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
382 vcpu->arch.last_sched_cpu = -1;
383 vcpu->arch.last_exec_cpu = -1;
385 /* Initial guest state */
386 err = kvm_mips_callbacks->vcpu_setup(vcpu);
388 goto out_free_gebase;
395 kvm_mips_callbacks->vcpu_uninit(vcpu);
399 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
401 hrtimer_cancel(&vcpu->arch.comparecount_timer);
403 kvm_mips_dump_stats(vcpu);
405 kvm_mmu_free_memory_caches(vcpu);
406 kfree(vcpu->arch.guest_ebase);
408 kvm_mips_callbacks->vcpu_uninit(vcpu);
411 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
412 struct kvm_guest_debug *dbg)
417 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
423 kvm_sigset_activate(vcpu);
425 if (vcpu->mmio_needed) {
426 if (!vcpu->mmio_is_write)
427 kvm_mips_complete_mmio_load(vcpu);
428 vcpu->mmio_needed = 0;
431 if (vcpu->run->immediate_exit)
437 guest_enter_irqoff();
438 trace_kvm_enter(vcpu);
441 * Make sure the read of VCPU requests in vcpu_run() callback is not
442 * reordered ahead of the write to vcpu->mode, or we could miss a TLB
443 * flush request while the requester sees the VCPU as outside of guest
444 * mode and not needing an IPI.
446 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
448 r = kvm_mips_callbacks->vcpu_run(vcpu);
455 kvm_sigset_deactivate(vcpu);
461 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
462 struct kvm_mips_interrupt *irq)
464 int intr = (int)irq->irq;
465 struct kvm_vcpu *dvcpu = NULL;
467 if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] ||
468 intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] ||
469 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) ||
470 intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2]))
471 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
477 dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
479 if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
480 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
482 } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) {
483 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
485 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
490 dvcpu->arch.wait = 0;
492 rcuwait_wake_up(&dvcpu->wait);
497 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
498 struct kvm_mp_state *mp_state)
503 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
504 struct kvm_mp_state *mp_state)
509 static u64 kvm_mips_get_one_regs[] = {
543 #ifndef CONFIG_CPU_MIPSR6
550 static u64 kvm_mips_get_one_regs_fpu[] = {
552 KVM_REG_MIPS_FCR_CSR,
555 static u64 kvm_mips_get_one_regs_msa[] = {
557 KVM_REG_MIPS_MSA_CSR,
560 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
564 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
565 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
566 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
568 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
571 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
572 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
573 ret += kvm_mips_callbacks->num_regs(vcpu);
578 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
583 if (copy_to_user(indices, kvm_mips_get_one_regs,
584 sizeof(kvm_mips_get_one_regs)))
586 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
588 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
589 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
590 sizeof(kvm_mips_get_one_regs_fpu)))
592 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
594 for (i = 0; i < 32; ++i) {
595 index = KVM_REG_MIPS_FPR_32(i);
596 if (copy_to_user(indices, &index, sizeof(index)))
600 /* skip odd doubles if no F64 */
601 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
604 index = KVM_REG_MIPS_FPR_64(i);
605 if (copy_to_user(indices, &index, sizeof(index)))
611 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
612 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
613 sizeof(kvm_mips_get_one_regs_msa)))
615 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
617 for (i = 0; i < 32; ++i) {
618 index = KVM_REG_MIPS_VEC_128(i);
619 if (copy_to_user(indices, &index, sizeof(index)))
625 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
628 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
629 const struct kvm_one_reg *reg)
631 struct mips_coproc *cop0 = vcpu->arch.cop0;
632 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
639 /* General purpose registers */
640 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
641 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
643 #ifndef CONFIG_CPU_MIPSR6
644 case KVM_REG_MIPS_HI:
645 v = (long)vcpu->arch.hi;
647 case KVM_REG_MIPS_LO:
648 v = (long)vcpu->arch.lo;
651 case KVM_REG_MIPS_PC:
652 v = (long)vcpu->arch.pc;
655 /* Floating point registers */
656 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
657 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
659 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
660 /* Odd singles in top of even double when FR=0 */
661 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
662 v = get_fpr32(&fpu->fpr[idx], 0);
664 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
666 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
667 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
669 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
670 /* Can't access odd doubles in FR=0 mode */
671 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
673 v = get_fpr64(&fpu->fpr[idx], 0);
675 case KVM_REG_MIPS_FCR_IR:
676 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
678 v = boot_cpu_data.fpu_id;
680 case KVM_REG_MIPS_FCR_CSR:
681 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
686 /* MIPS SIMD Architecture (MSA) registers */
687 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
688 if (!kvm_mips_guest_has_msa(&vcpu->arch))
690 /* Can't access MSA registers in FR=0 mode */
691 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
693 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
694 #ifdef CONFIG_CPU_LITTLE_ENDIAN
695 /* least significant byte first */
696 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
697 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
699 /* most significant byte first */
700 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
701 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
704 case KVM_REG_MIPS_MSA_IR:
705 if (!kvm_mips_guest_has_msa(&vcpu->arch))
707 v = boot_cpu_data.msa_id;
709 case KVM_REG_MIPS_MSA_CSR:
710 if (!kvm_mips_guest_has_msa(&vcpu->arch))
715 /* registers to be handled specially */
717 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
722 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
723 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
725 return put_user(v, uaddr64);
726 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
727 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
730 return put_user(v32, uaddr32);
731 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
732 void __user *uaddr = (void __user *)(long)reg->addr;
734 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
740 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
741 const struct kvm_one_reg *reg)
743 struct mips_coproc *cop0 = vcpu->arch.cop0;
744 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
749 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
750 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
752 if (get_user(v, uaddr64) != 0)
754 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
755 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
758 if (get_user(v32, uaddr32) != 0)
761 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
762 void __user *uaddr = (void __user *)(long)reg->addr;
764 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
770 /* General purpose registers */
771 case KVM_REG_MIPS_R0:
772 /* Silently ignore requests to set $0 */
774 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
775 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
777 #ifndef CONFIG_CPU_MIPSR6
778 case KVM_REG_MIPS_HI:
781 case KVM_REG_MIPS_LO:
785 case KVM_REG_MIPS_PC:
789 /* Floating point registers */
790 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
791 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
793 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
794 /* Odd singles in top of even double when FR=0 */
795 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
796 set_fpr32(&fpu->fpr[idx], 0, v);
798 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
800 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
801 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
803 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
804 /* Can't access odd doubles in FR=0 mode */
805 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
807 set_fpr64(&fpu->fpr[idx], 0, v);
809 case KVM_REG_MIPS_FCR_IR:
810 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
814 case KVM_REG_MIPS_FCR_CSR:
815 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
820 /* MIPS SIMD Architecture (MSA) registers */
821 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
822 if (!kvm_mips_guest_has_msa(&vcpu->arch))
824 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
825 #ifdef CONFIG_CPU_LITTLE_ENDIAN
826 /* least significant byte first */
827 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
828 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
830 /* most significant byte first */
831 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
832 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
835 case KVM_REG_MIPS_MSA_IR:
836 if (!kvm_mips_guest_has_msa(&vcpu->arch))
840 case KVM_REG_MIPS_MSA_CSR:
841 if (!kvm_mips_guest_has_msa(&vcpu->arch))
846 /* registers to be handled specially */
848 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
853 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
854 struct kvm_enable_cap *cap)
858 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
866 case KVM_CAP_MIPS_FPU:
867 vcpu->arch.fpu_enabled = true;
869 case KVM_CAP_MIPS_MSA:
870 vcpu->arch.msa_enabled = true;
880 long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl,
883 struct kvm_vcpu *vcpu = filp->private_data;
884 void __user *argp = (void __user *)arg;
886 if (ioctl == KVM_INTERRUPT) {
887 struct kvm_mips_interrupt irq;
889 if (copy_from_user(&irq, argp, sizeof(irq)))
891 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
894 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
900 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
903 struct kvm_vcpu *vcpu = filp->private_data;
904 void __user *argp = (void __user *)arg;
910 case KVM_SET_ONE_REG:
911 case KVM_GET_ONE_REG: {
912 struct kvm_one_reg reg;
915 if (copy_from_user(®, argp, sizeof(reg)))
917 if (ioctl == KVM_SET_ONE_REG)
918 r = kvm_mips_set_reg(vcpu, ®);
920 r = kvm_mips_get_reg(vcpu, ®);
923 case KVM_GET_REG_LIST: {
924 struct kvm_reg_list __user *user_list = argp;
925 struct kvm_reg_list reg_list;
929 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
932 reg_list.n = kvm_mips_num_regs(vcpu);
933 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
938 r = kvm_mips_copy_reg_indices(vcpu, user_list->reg);
941 case KVM_ENABLE_CAP: {
942 struct kvm_enable_cap cap;
945 if (copy_from_user(&cap, argp, sizeof(cap)))
947 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
958 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
963 int kvm_arch_flush_remote_tlb(struct kvm *kvm)
965 kvm_mips_callbacks->prepare_flush_shadow(kvm);
969 void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
970 const struct kvm_memory_slot *memslot)
972 kvm_flush_remote_tlbs(kvm);
975 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
987 int kvm_arch_init(void *opaque)
989 if (kvm_mips_callbacks) {
990 kvm_err("kvm: module already exists\n");
994 return kvm_mips_emulation_init(&kvm_mips_callbacks);
997 void kvm_arch_exit(void)
999 kvm_mips_callbacks = NULL;
1002 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1003 struct kvm_sregs *sregs)
1005 return -ENOIOCTLCMD;
1008 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1009 struct kvm_sregs *sregs)
1011 return -ENOIOCTLCMD;
1014 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1018 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1020 return -ENOIOCTLCMD;
1023 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1025 return -ENOIOCTLCMD;
1028 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1030 return VM_FAULT_SIGBUS;
1033 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1038 case KVM_CAP_ONE_REG:
1039 case KVM_CAP_ENABLE_CAP:
1040 case KVM_CAP_READONLY_MEM:
1041 case KVM_CAP_SYNC_MMU:
1042 case KVM_CAP_IMMEDIATE_EXIT:
1045 case KVM_CAP_NR_VCPUS:
1046 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
1048 case KVM_CAP_MAX_VCPUS:
1051 case KVM_CAP_MAX_VCPU_ID:
1052 r = KVM_MAX_VCPU_IDS;
1054 case KVM_CAP_MIPS_FPU:
1055 /* We don't handle systems with inconsistent cpu_has_fpu */
1056 r = !!raw_cpu_has_fpu;
1058 case KVM_CAP_MIPS_MSA:
1060 * We don't support MSA vector partitioning yet:
1061 * 1) It would require explicit support which can't be tested
1062 * yet due to lack of support in current hardware.
1063 * 2) It extends the state that would need to be saved/restored
1064 * by e.g. QEMU for migration.
1066 * When vector partitioning hardware becomes available, support
1067 * could be added by requiring a flag when enabling
1068 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1069 * to save/restore the appropriate extra state.
1071 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1074 r = kvm_mips_callbacks->check_extension(kvm, ext);
1080 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1082 return kvm_mips_pending_timer(vcpu) ||
1083 kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
1086 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1089 struct mips_coproc *cop0;
1094 kvm_debug("VCPU Register Dump:\n");
1095 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1096 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1098 for (i = 0; i < 32; i += 4) {
1099 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1101 vcpu->arch.gprs[i + 1],
1102 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1104 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1105 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1107 cop0 = vcpu->arch.cop0;
1108 kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
1109 kvm_read_c0_guest_status(cop0),
1110 kvm_read_c0_guest_cause(cop0));
1112 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1117 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1123 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1124 vcpu->arch.gprs[i] = regs->gpr[i];
1125 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1126 vcpu->arch.hi = regs->hi;
1127 vcpu->arch.lo = regs->lo;
1128 vcpu->arch.pc = regs->pc;
1134 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1140 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1141 regs->gpr[i] = vcpu->arch.gprs[i];
1143 regs->hi = vcpu->arch.hi;
1144 regs->lo = vcpu->arch.lo;
1145 regs->pc = vcpu->arch.pc;
1151 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1152 struct kvm_translation *tr)
1157 static void kvm_mips_set_c0_status(void)
1159 u32 status = read_c0_status();
1164 write_c0_status(status);
1169 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1171 int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
1173 struct kvm_run *run = vcpu->run;
1174 u32 cause = vcpu->arch.host_cp0_cause;
1175 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1176 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1177 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1178 enum emulation_result er = EMULATE_DONE;
1180 int ret = RESUME_GUEST;
1182 vcpu->mode = OUTSIDE_GUEST_MODE;
1184 /* Set a default exit reason */
1185 run->exit_reason = KVM_EXIT_UNKNOWN;
1186 run->ready_for_interrupt_injection = 1;
1189 * Set the appropriate status bits based on host CPU features,
1190 * before we hit the scheduler
1192 kvm_mips_set_c0_status();
1196 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1197 cause, opc, run, vcpu);
1198 trace_kvm_exit(vcpu, exccode);
1202 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1204 ++vcpu->stat.int_exits;
1213 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1215 ++vcpu->stat.cop_unusable_exits;
1216 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1217 /* XXXKYMA: Might need to return to user space */
1218 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1223 ++vcpu->stat.tlbmod_exits;
1224 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1228 kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
1229 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1232 ++vcpu->stat.tlbmiss_st_exits;
1233 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1237 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1238 cause, opc, badvaddr);
1240 ++vcpu->stat.tlbmiss_ld_exits;
1241 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1245 ++vcpu->stat.addrerr_st_exits;
1246 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1250 ++vcpu->stat.addrerr_ld_exits;
1251 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1255 ++vcpu->stat.syscall_exits;
1256 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1260 ++vcpu->stat.resvd_inst_exits;
1261 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1265 ++vcpu->stat.break_inst_exits;
1266 ret = kvm_mips_callbacks->handle_break(vcpu);
1270 ++vcpu->stat.trap_inst_exits;
1271 ret = kvm_mips_callbacks->handle_trap(vcpu);
1274 case EXCCODE_MSAFPE:
1275 ++vcpu->stat.msa_fpe_exits;
1276 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1280 ++vcpu->stat.fpe_exits;
1281 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1284 case EXCCODE_MSADIS:
1285 ++vcpu->stat.msa_disabled_exits;
1286 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1290 /* defer exit accounting to handler */
1291 ret = kvm_mips_callbacks->handle_guest_exit(vcpu);
1295 if (cause & CAUSEF_BD)
1298 kvm_get_badinstr(opc, vcpu, &inst);
1299 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
1300 exccode, opc, inst, badvaddr,
1301 kvm_read_c0_guest_status(vcpu->arch.cop0));
1302 kvm_arch_vcpu_dump_regs(vcpu);
1303 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1309 local_irq_disable();
1311 if (ret == RESUME_GUEST)
1312 kvm_vz_acquire_htimer(vcpu);
1314 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1315 kvm_mips_deliver_interrupts(vcpu, cause);
1317 if (!(ret & RESUME_HOST)) {
1318 /* Only check for signals if not already exiting to userspace */
1319 if (signal_pending(current)) {
1320 run->exit_reason = KVM_EXIT_INTR;
1321 ret = (-EINTR << 2) | RESUME_HOST;
1322 ++vcpu->stat.signal_exits;
1323 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1327 if (ret == RESUME_GUEST) {
1328 trace_kvm_reenter(vcpu);
1331 * Make sure the read of VCPU requests in vcpu_reenter()
1332 * callback is not reordered ahead of the write to vcpu->mode,
1333 * or we could miss a TLB flush request while the requester sees
1334 * the VCPU as outside of guest mode and not needing an IPI.
1336 smp_store_mb(vcpu->mode, IN_GUEST_MODE);
1338 kvm_mips_callbacks->vcpu_reenter(vcpu);
1341 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1342 * is live), restore FCR31 / MSACSR.
1344 * This should be before returning to the guest exception
1345 * vector, as it may well cause an [MSA] FP exception if there
1346 * are pending exception bits unmasked. (see
1347 * kvm_mips_csr_die_notifier() for how that is handled).
1349 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1350 read_c0_status() & ST0_CU1)
1351 __kvm_restore_fcsr(&vcpu->arch);
1353 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1354 read_c0_config5() & MIPS_CONF5_MSAEN)
1355 __kvm_restore_msacsr(&vcpu->arch);
1360 /* Enable FPU for guest and restore context */
1361 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1363 struct mips_coproc *cop0 = vcpu->arch.cop0;
1364 unsigned int sr, cfg5;
1368 sr = kvm_read_c0_guest_status(cop0);
1371 * If MSA state is already live, it is undefined how it interacts with
1372 * FR=0 FPU state, and we don't want to hit reserved instruction
1373 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1374 * play it safe and save it first.
1376 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1377 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1381 * Enable FPU for guest
1382 * We set FR and FRE according to guest context
1384 change_c0_status(ST0_CU1 | ST0_FR, sr);
1386 cfg5 = kvm_read_c0_guest_config5(cop0);
1387 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1389 enable_fpu_hazard();
1391 /* If guest FPU state not active, restore it now */
1392 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1393 __kvm_restore_fpu(&vcpu->arch);
1394 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1395 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1397 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1403 #ifdef CONFIG_CPU_HAS_MSA
1404 /* Enable MSA for guest and restore context */
1405 void kvm_own_msa(struct kvm_vcpu *vcpu)
1407 struct mips_coproc *cop0 = vcpu->arch.cop0;
1408 unsigned int sr, cfg5;
1413 * Enable FPU if enabled in guest, since we're restoring FPU context
1414 * anyway. We set FR and FRE according to guest context.
1416 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1417 sr = kvm_read_c0_guest_status(cop0);
1420 * If FR=0 FPU state is already live, it is undefined how it
1421 * interacts with MSA state, so play it safe and save it first.
1423 if (!(sr & ST0_FR) &&
1424 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1425 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1428 change_c0_status(ST0_CU1 | ST0_FR, sr);
1429 if (sr & ST0_CU1 && cpu_has_fre) {
1430 cfg5 = kvm_read_c0_guest_config5(cop0);
1431 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1435 /* Enable MSA for guest */
1436 set_c0_config5(MIPS_CONF5_MSAEN);
1437 enable_fpu_hazard();
1439 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1440 case KVM_MIPS_AUX_FPU:
1442 * Guest FPU state already loaded, only restore upper MSA state
1444 __kvm_restore_msa_upper(&vcpu->arch);
1445 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1446 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1449 /* Neither FPU or MSA already active, restore full MSA state */
1450 __kvm_restore_msa(&vcpu->arch);
1451 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1452 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1453 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1454 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1455 KVM_TRACE_AUX_FPU_MSA);
1458 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1466 /* Drop FPU & MSA without saving it */
1467 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1470 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1472 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1473 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1475 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1476 clear_c0_status(ST0_CU1 | ST0_FR);
1477 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1478 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1483 /* Save and disable FPU & MSA */
1484 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1487 * With T&E, FPU & MSA get disabled in root context (hardware) when it
1488 * is disabled in guest context (software), but the register state in
1489 * the hardware may still be in use.
1490 * This is why we explicitly re-enable the hardware before saving.
1494 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1495 __kvm_save_msa(&vcpu->arch);
1496 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1498 /* Disable MSA & FPU */
1500 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1501 clear_c0_status(ST0_CU1 | ST0_FR);
1502 disable_fpu_hazard();
1504 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1505 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1506 __kvm_save_fpu(&vcpu->arch);
1507 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1508 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1511 clear_c0_status(ST0_CU1 | ST0_FR);
1512 disable_fpu_hazard();
1518 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1519 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1520 * exception if cause bits are set in the value being written.
1522 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1523 unsigned long cmd, void *ptr)
1525 struct die_args *args = (struct die_args *)ptr;
1526 struct pt_regs *regs = args->regs;
1529 /* Only interested in FPE and MSAFPE */
1530 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1533 /* Return immediately if guest context isn't active */
1534 if (!(current->flags & PF_VCPU))
1537 /* Should never get here from user mode */
1538 BUG_ON(user_mode(regs));
1540 pc = instruction_pointer(regs);
1543 /* match 2nd instruction in __kvm_restore_fcsr */
1544 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1548 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1550 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1551 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1556 /* Move PC forward a little and continue executing */
1557 instruction_pointer(regs) += 4;
1562 static struct notifier_block kvm_mips_csr_die_notifier = {
1563 .notifier_call = kvm_mips_csr_die_notify,
1566 static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = {
1567 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1568 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1569 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
1570 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
1573 static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = {
1574 [MIPS_EXC_INT_TIMER] = C_IRQ5,
1575 [MIPS_EXC_INT_IO_1] = C_IRQ0,
1576 [MIPS_EXC_INT_IO_2] = C_IRQ1,
1577 [MIPS_EXC_INT_IPI_1] = C_IRQ4,
1580 u32 *kvm_priority_to_irq = kvm_default_priority_to_irq;
1582 u32 kvm_irq_to_priority(u32 irq)
1586 for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) {
1587 if (kvm_priority_to_irq[i] == (1 << (irq + 8)))
1591 return MIPS_EXC_MAX;
1594 static int __init kvm_mips_init(void)
1599 pr_warn("KVM does not yet support MMIDs. KVM Disabled\n");
1603 ret = kvm_mips_entry_setup();
1607 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1612 if (boot_cpu_type() == CPU_LOONGSON64)
1613 kvm_priority_to_irq = kvm_loongson3_priority_to_irq;
1615 register_die_notifier(&kvm_mips_csr_die_notifier);
1620 static void __exit kvm_mips_exit(void)
1624 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1627 module_init(kvm_mips_init);
1628 module_exit(kvm_mips_exit);
1630 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);