2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: MIPS specific KVM APIs
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/bitops.h>
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kdebug.h>
16 #include <linux/module.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
20 #include <linux/bootmem.h>
23 #include <asm/cacheflush.h>
24 #include <asm/mmu_context.h>
25 #include <asm/pgtable.h>
27 #include <linux/kvm_host.h>
29 #include "interrupt.h"
32 #define CREATE_TRACE_POINTS
36 #define VECTORSPACING 0x100 /* for EI/VI mode */
39 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
40 struct kvm_stats_debugfs_item debugfs_entries[] = {
41 { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
42 { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
43 { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
44 { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
45 { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
46 { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
47 { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
48 { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
49 { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
50 { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
51 { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
52 { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
53 { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
54 { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU },
55 { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU },
56 { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU },
57 { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
58 { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
59 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
60 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
61 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
62 { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
66 static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
70 for_each_possible_cpu(i) {
71 vcpu->arch.guest_kernel_asid[i] = 0;
72 vcpu->arch.guest_user_asid[i] = 0;
79 * XXXKYMA: We are simulatoring a processor that has the WII bit set in
80 * Config7, so we are "runnable" if interrupts are pending
82 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
84 return !!(vcpu->arch.pending_exceptions);
87 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
92 int kvm_arch_hardware_enable(void)
97 int kvm_arch_hardware_setup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
107 static void kvm_mips_init_tlbs(struct kvm *kvm)
112 * Add a wired entry to the TLB, it is used to map the commpage to
115 wired = read_c0_wired();
116 write_c0_wired(wired + 1);
118 kvm->arch.commpage_tlb = wired;
120 kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
121 kvm->arch.commpage_tlb);
124 static void kvm_mips_init_vm_percpu(void *arg)
126 struct kvm *kvm = (struct kvm *)arg;
128 kvm_mips_init_tlbs(kvm);
129 kvm_mips_callbacks->vm_init(kvm);
133 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
135 if (atomic_inc_return(&kvm_mips_instance) == 1) {
136 kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
138 on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
144 bool kvm_arch_has_vcpu_debugfs(void)
149 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
154 void kvm_mips_free_vcpus(struct kvm *kvm)
157 struct kvm_vcpu *vcpu;
159 /* Put the pages we reserved for the guest pmap */
160 for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
161 if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
162 kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
164 kfree(kvm->arch.guest_pmap);
166 kvm_for_each_vcpu(i, vcpu, kvm) {
167 kvm_arch_vcpu_free(vcpu);
170 mutex_lock(&kvm->lock);
172 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
173 kvm->vcpus[i] = NULL;
175 atomic_set(&kvm->online_vcpus, 0);
177 mutex_unlock(&kvm->lock);
180 static void kvm_mips_uninit_tlbs(void *arg)
182 /* Restore wired count */
185 /* Clear out all the TLBs */
186 kvm_local_flush_tlb_all();
189 void kvm_arch_destroy_vm(struct kvm *kvm)
191 kvm_mips_free_vcpus(kvm);
193 /* If this is the last instance, restore wired count */
194 if (atomic_dec_return(&kvm_mips_instance) == 0) {
195 kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
197 on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
201 long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
207 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
208 unsigned long npages)
213 int kvm_arch_prepare_memory_region(struct kvm *kvm,
214 struct kvm_memory_slot *memslot,
215 const struct kvm_userspace_memory_region *mem,
216 enum kvm_mr_change change)
221 void kvm_arch_commit_memory_region(struct kvm *kvm,
222 const struct kvm_userspace_memory_region *mem,
223 const struct kvm_memory_slot *old,
224 const struct kvm_memory_slot *new,
225 enum kvm_mr_change change)
227 unsigned long npages = 0;
230 kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
231 __func__, kvm, mem->slot, mem->guest_phys_addr,
232 mem->memory_size, mem->userspace_addr);
234 /* Setup Guest PMAP table */
235 if (!kvm->arch.guest_pmap) {
237 npages = mem->memory_size >> PAGE_SHIFT;
240 kvm->arch.guest_pmap_npages = npages;
241 kvm->arch.guest_pmap =
242 kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
244 if (!kvm->arch.guest_pmap) {
245 kvm_err("Failed to allocate guest PMAP\n");
249 kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
250 npages, kvm->arch.guest_pmap);
252 /* Now setup the page table */
253 for (i = 0; i < npages; i++)
254 kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
259 static inline void dump_handler(const char *symbol, void *start, void *end)
263 pr_debug("LEAF(%s)\n", symbol);
265 pr_debug("\t.set push\n");
266 pr_debug("\t.set noreorder\n");
268 for (p = start; p < (u32 *)end; ++p)
269 pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
271 pr_debug("\t.set\tpop\n");
273 pr_debug("\tEND(%s)\n", symbol);
276 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
279 void *gebase, *p, *handler;
282 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
289 err = kvm_vcpu_init(vcpu, kvm, id);
294 kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
297 * Allocate space for host mode exception handlers that handle
300 if (cpu_has_veic || cpu_has_vint)
301 size = 0x200 + VECTORSPACING * 64;
305 gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
311 kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
312 ALIGN(size, PAGE_SIZE), gebase);
315 * Check new ebase actually fits in CP0_EBase. The lack of a write gate
316 * limits us to the low 512MB of physical address space. If the memory
317 * we allocate is out of range, just give up now.
319 if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
320 kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
323 goto out_free_gebase;
327 vcpu->arch.guest_ebase = gebase;
329 /* Build guest exception vectors dynamically in unmapped memory */
330 handler = gebase + 0x2000;
332 /* TLB Refill, EXL = 0 */
333 kvm_mips_build_exception(gebase, handler);
335 /* General Exception Entry point */
336 kvm_mips_build_exception(gebase + 0x180, handler);
338 /* For vectored interrupts poke the exception code @ all offsets 0-7 */
339 for (i = 0; i < 8; i++) {
340 kvm_debug("L1 Vectored handler @ %p\n",
341 gebase + 0x200 + (i * VECTORSPACING));
342 kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
346 /* General exit handler */
348 p = kvm_mips_build_exit(p);
350 /* Guest entry routine */
351 vcpu->arch.vcpu_run = p;
352 p = kvm_mips_build_vcpu_run(p);
354 /* Dump the generated code */
355 pr_debug("#include <asm/asm.h>\n");
356 pr_debug("#include <asm/regdef.h>\n");
358 dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
359 dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
360 dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
362 /* Invalidate the icache for these ranges */
363 local_flush_icache_range((unsigned long)gebase,
364 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
367 * Allocate comm page for guest kernel, a TLB will be reserved for
368 * mapping GVA @ 0xFFFF8000 to this page
370 vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
372 if (!vcpu->arch.kseg0_commpage) {
374 goto out_free_gebase;
377 kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
378 kvm_mips_commpage_init(vcpu);
381 vcpu->arch.last_sched_cpu = -1;
383 /* Start off the timer */
384 kvm_mips_init_count(vcpu);
392 kvm_vcpu_uninit(vcpu);
401 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
403 hrtimer_cancel(&vcpu->arch.comparecount_timer);
405 kvm_vcpu_uninit(vcpu);
407 kvm_mips_dump_stats(vcpu);
409 kfree(vcpu->arch.guest_ebase);
410 kfree(vcpu->arch.kseg0_commpage);
414 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
416 kvm_arch_vcpu_free(vcpu);
419 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
420 struct kvm_guest_debug *dbg)
425 /* Must be called with preemption disabled, just before entering guest */
426 static void kvm_mips_check_asids(struct kvm_vcpu *vcpu)
428 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 int cpu = smp_processor_id();
433 * Lazy host ASID regeneration for guest user mode.
434 * If the guest ASID has changed since the last guest usermode
435 * execution, regenerate the host ASID so as to invalidate stale TLB
438 if (!KVM_GUEST_KERNEL_MODE(vcpu)) {
439 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
440 if (gasid != vcpu->arch.last_user_gasid) {
441 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu,
443 vcpu->arch.guest_user_asid[cpu] =
444 vcpu->arch.guest_user_mm.context.asid[cpu];
445 vcpu->arch.last_user_gasid = gasid;
450 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
455 if (vcpu->sigset_active)
456 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
458 if (vcpu->mmio_needed) {
459 if (!vcpu->mmio_is_write)
460 kvm_mips_complete_mmio_load(vcpu, run);
461 vcpu->mmio_needed = 0;
467 /* Check if we have any exceptions/interrupts pending */
468 kvm_mips_deliver_interrupts(vcpu,
469 kvm_read_c0_guest_cause(vcpu->arch.cop0));
471 guest_enter_irqoff();
473 /* Disable hardware page table walking while in guest */
476 trace_kvm_enter(vcpu);
478 kvm_mips_check_asids(vcpu);
480 r = vcpu->arch.vcpu_run(run, vcpu);
483 /* Re-enable HTW before enabling interrupts */
489 if (vcpu->sigset_active)
490 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
495 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
496 struct kvm_mips_interrupt *irq)
498 int intr = (int)irq->irq;
499 struct kvm_vcpu *dvcpu = NULL;
501 if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
502 kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
508 dvcpu = vcpu->kvm->vcpus[irq->cpu];
510 if (intr == 2 || intr == 3 || intr == 4) {
511 kvm_mips_callbacks->queue_io_int(dvcpu, irq);
513 } else if (intr == -2 || intr == -3 || intr == -4) {
514 kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
516 kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
521 dvcpu->arch.wait = 0;
523 if (swait_active(&dvcpu->wq))
524 swake_up(&dvcpu->wq);
529 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
530 struct kvm_mp_state *mp_state)
535 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
536 struct kvm_mp_state *mp_state)
541 static u64 kvm_mips_get_one_regs[] = {
575 #ifndef CONFIG_CPU_MIPSR6
581 KVM_REG_MIPS_CP0_INDEX,
582 KVM_REG_MIPS_CP0_CONTEXT,
583 KVM_REG_MIPS_CP0_USERLOCAL,
584 KVM_REG_MIPS_CP0_PAGEMASK,
585 KVM_REG_MIPS_CP0_WIRED,
586 KVM_REG_MIPS_CP0_HWRENA,
587 KVM_REG_MIPS_CP0_BADVADDR,
588 KVM_REG_MIPS_CP0_COUNT,
589 KVM_REG_MIPS_CP0_ENTRYHI,
590 KVM_REG_MIPS_CP0_COMPARE,
591 KVM_REG_MIPS_CP0_STATUS,
592 KVM_REG_MIPS_CP0_CAUSE,
593 KVM_REG_MIPS_CP0_EPC,
594 KVM_REG_MIPS_CP0_PRID,
595 KVM_REG_MIPS_CP0_CONFIG,
596 KVM_REG_MIPS_CP0_CONFIG1,
597 KVM_REG_MIPS_CP0_CONFIG2,
598 KVM_REG_MIPS_CP0_CONFIG3,
599 KVM_REG_MIPS_CP0_CONFIG4,
600 KVM_REG_MIPS_CP0_CONFIG5,
601 KVM_REG_MIPS_CP0_CONFIG7,
602 KVM_REG_MIPS_CP0_ERROREPC,
604 KVM_REG_MIPS_COUNT_CTL,
605 KVM_REG_MIPS_COUNT_RESUME,
606 KVM_REG_MIPS_COUNT_HZ,
609 static u64 kvm_mips_get_one_regs_fpu[] = {
611 KVM_REG_MIPS_FCR_CSR,
614 static u64 kvm_mips_get_one_regs_msa[] = {
616 KVM_REG_MIPS_MSA_CSR,
619 static u64 kvm_mips_get_one_regs_kscratch[] = {
620 KVM_REG_MIPS_CP0_KSCRATCH1,
621 KVM_REG_MIPS_CP0_KSCRATCH2,
622 KVM_REG_MIPS_CP0_KSCRATCH3,
623 KVM_REG_MIPS_CP0_KSCRATCH4,
624 KVM_REG_MIPS_CP0_KSCRATCH5,
625 KVM_REG_MIPS_CP0_KSCRATCH6,
628 static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
632 ret = ARRAY_SIZE(kvm_mips_get_one_regs);
633 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
634 ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
636 if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
639 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
640 ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
641 ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
642 ret += kvm_mips_callbacks->num_regs(vcpu);
647 static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
652 if (copy_to_user(indices, kvm_mips_get_one_regs,
653 sizeof(kvm_mips_get_one_regs)))
655 indices += ARRAY_SIZE(kvm_mips_get_one_regs);
657 if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
658 if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
659 sizeof(kvm_mips_get_one_regs_fpu)))
661 indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
663 for (i = 0; i < 32; ++i) {
664 index = KVM_REG_MIPS_FPR_32(i);
665 if (copy_to_user(indices, &index, sizeof(index)))
669 /* skip odd doubles if no F64 */
670 if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
673 index = KVM_REG_MIPS_FPR_64(i);
674 if (copy_to_user(indices, &index, sizeof(index)))
680 if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
681 if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
682 sizeof(kvm_mips_get_one_regs_msa)))
684 indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
686 for (i = 0; i < 32; ++i) {
687 index = KVM_REG_MIPS_VEC_128(i);
688 if (copy_to_user(indices, &index, sizeof(index)))
694 for (i = 0; i < 6; ++i) {
695 if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
698 if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
699 sizeof(kvm_mips_get_one_regs_kscratch[i])))
704 return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
707 static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
708 const struct kvm_one_reg *reg)
710 struct mips_coproc *cop0 = vcpu->arch.cop0;
711 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
718 /* General purpose registers */
719 case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
720 v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
722 #ifndef CONFIG_CPU_MIPSR6
723 case KVM_REG_MIPS_HI:
724 v = (long)vcpu->arch.hi;
726 case KVM_REG_MIPS_LO:
727 v = (long)vcpu->arch.lo;
730 case KVM_REG_MIPS_PC:
731 v = (long)vcpu->arch.pc;
734 /* Floating point registers */
735 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
736 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
738 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
739 /* Odd singles in top of even double when FR=0 */
740 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
741 v = get_fpr32(&fpu->fpr[idx], 0);
743 v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1);
745 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
746 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
748 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
749 /* Can't access odd doubles in FR=0 mode */
750 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
752 v = get_fpr64(&fpu->fpr[idx], 0);
754 case KVM_REG_MIPS_FCR_IR:
755 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
757 v = boot_cpu_data.fpu_id;
759 case KVM_REG_MIPS_FCR_CSR:
760 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
765 /* MIPS SIMD Architecture (MSA) registers */
766 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
767 if (!kvm_mips_guest_has_msa(&vcpu->arch))
769 /* Can't access MSA registers in FR=0 mode */
770 if (!(kvm_read_c0_guest_status(cop0) & ST0_FR))
772 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
773 #ifdef CONFIG_CPU_LITTLE_ENDIAN
774 /* least significant byte first */
775 vs[0] = get_fpr64(&fpu->fpr[idx], 0);
776 vs[1] = get_fpr64(&fpu->fpr[idx], 1);
778 /* most significant byte first */
779 vs[0] = get_fpr64(&fpu->fpr[idx], 1);
780 vs[1] = get_fpr64(&fpu->fpr[idx], 0);
783 case KVM_REG_MIPS_MSA_IR:
784 if (!kvm_mips_guest_has_msa(&vcpu->arch))
786 v = boot_cpu_data.msa_id;
788 case KVM_REG_MIPS_MSA_CSR:
789 if (!kvm_mips_guest_has_msa(&vcpu->arch))
794 /* Co-processor 0 registers */
795 case KVM_REG_MIPS_CP0_INDEX:
796 v = (long)kvm_read_c0_guest_index(cop0);
798 case KVM_REG_MIPS_CP0_CONTEXT:
799 v = (long)kvm_read_c0_guest_context(cop0);
801 case KVM_REG_MIPS_CP0_USERLOCAL:
802 v = (long)kvm_read_c0_guest_userlocal(cop0);
804 case KVM_REG_MIPS_CP0_PAGEMASK:
805 v = (long)kvm_read_c0_guest_pagemask(cop0);
807 case KVM_REG_MIPS_CP0_WIRED:
808 v = (long)kvm_read_c0_guest_wired(cop0);
810 case KVM_REG_MIPS_CP0_HWRENA:
811 v = (long)kvm_read_c0_guest_hwrena(cop0);
813 case KVM_REG_MIPS_CP0_BADVADDR:
814 v = (long)kvm_read_c0_guest_badvaddr(cop0);
816 case KVM_REG_MIPS_CP0_ENTRYHI:
817 v = (long)kvm_read_c0_guest_entryhi(cop0);
819 case KVM_REG_MIPS_CP0_COMPARE:
820 v = (long)kvm_read_c0_guest_compare(cop0);
822 case KVM_REG_MIPS_CP0_STATUS:
823 v = (long)kvm_read_c0_guest_status(cop0);
825 case KVM_REG_MIPS_CP0_CAUSE:
826 v = (long)kvm_read_c0_guest_cause(cop0);
828 case KVM_REG_MIPS_CP0_EPC:
829 v = (long)kvm_read_c0_guest_epc(cop0);
831 case KVM_REG_MIPS_CP0_PRID:
832 v = (long)kvm_read_c0_guest_prid(cop0);
834 case KVM_REG_MIPS_CP0_CONFIG:
835 v = (long)kvm_read_c0_guest_config(cop0);
837 case KVM_REG_MIPS_CP0_CONFIG1:
838 v = (long)kvm_read_c0_guest_config1(cop0);
840 case KVM_REG_MIPS_CP0_CONFIG2:
841 v = (long)kvm_read_c0_guest_config2(cop0);
843 case KVM_REG_MIPS_CP0_CONFIG3:
844 v = (long)kvm_read_c0_guest_config3(cop0);
846 case KVM_REG_MIPS_CP0_CONFIG4:
847 v = (long)kvm_read_c0_guest_config4(cop0);
849 case KVM_REG_MIPS_CP0_CONFIG5:
850 v = (long)kvm_read_c0_guest_config5(cop0);
852 case KVM_REG_MIPS_CP0_CONFIG7:
853 v = (long)kvm_read_c0_guest_config7(cop0);
855 case KVM_REG_MIPS_CP0_ERROREPC:
856 v = (long)kvm_read_c0_guest_errorepc(cop0);
858 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
859 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
860 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
864 v = (long)kvm_read_c0_guest_kscratch1(cop0);
867 v = (long)kvm_read_c0_guest_kscratch2(cop0);
870 v = (long)kvm_read_c0_guest_kscratch3(cop0);
873 v = (long)kvm_read_c0_guest_kscratch4(cop0);
876 v = (long)kvm_read_c0_guest_kscratch5(cop0);
879 v = (long)kvm_read_c0_guest_kscratch6(cop0);
883 /* registers to be handled specially */
885 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
890 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
891 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
893 return put_user(v, uaddr64);
894 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
895 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
898 return put_user(v32, uaddr32);
899 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
900 void __user *uaddr = (void __user *)(long)reg->addr;
902 return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
908 static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
909 const struct kvm_one_reg *reg)
911 struct mips_coproc *cop0 = vcpu->arch.cop0;
912 struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
917 if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
918 u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
920 if (get_user(v, uaddr64) != 0)
922 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
923 u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
926 if (get_user(v32, uaddr32) != 0)
929 } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
930 void __user *uaddr = (void __user *)(long)reg->addr;
932 return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
938 /* General purpose registers */
939 case KVM_REG_MIPS_R0:
940 /* Silently ignore requests to set $0 */
942 case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
943 vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
945 #ifndef CONFIG_CPU_MIPSR6
946 case KVM_REG_MIPS_HI:
949 case KVM_REG_MIPS_LO:
953 case KVM_REG_MIPS_PC:
957 /* Floating point registers */
958 case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31):
959 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
961 idx = reg->id - KVM_REG_MIPS_FPR_32(0);
962 /* Odd singles in top of even double when FR=0 */
963 if (kvm_read_c0_guest_status(cop0) & ST0_FR)
964 set_fpr32(&fpu->fpr[idx], 0, v);
966 set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v);
968 case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31):
969 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
971 idx = reg->id - KVM_REG_MIPS_FPR_64(0);
972 /* Can't access odd doubles in FR=0 mode */
973 if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR))
975 set_fpr64(&fpu->fpr[idx], 0, v);
977 case KVM_REG_MIPS_FCR_IR:
978 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
982 case KVM_REG_MIPS_FCR_CSR:
983 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
988 /* MIPS SIMD Architecture (MSA) registers */
989 case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31):
990 if (!kvm_mips_guest_has_msa(&vcpu->arch))
992 idx = reg->id - KVM_REG_MIPS_VEC_128(0);
993 #ifdef CONFIG_CPU_LITTLE_ENDIAN
994 /* least significant byte first */
995 set_fpr64(&fpu->fpr[idx], 0, vs[0]);
996 set_fpr64(&fpu->fpr[idx], 1, vs[1]);
998 /* most significant byte first */
999 set_fpr64(&fpu->fpr[idx], 1, vs[0]);
1000 set_fpr64(&fpu->fpr[idx], 0, vs[1]);
1003 case KVM_REG_MIPS_MSA_IR:
1004 if (!kvm_mips_guest_has_msa(&vcpu->arch))
1008 case KVM_REG_MIPS_MSA_CSR:
1009 if (!kvm_mips_guest_has_msa(&vcpu->arch))
1014 /* Co-processor 0 registers */
1015 case KVM_REG_MIPS_CP0_INDEX:
1016 kvm_write_c0_guest_index(cop0, v);
1018 case KVM_REG_MIPS_CP0_CONTEXT:
1019 kvm_write_c0_guest_context(cop0, v);
1021 case KVM_REG_MIPS_CP0_USERLOCAL:
1022 kvm_write_c0_guest_userlocal(cop0, v);
1024 case KVM_REG_MIPS_CP0_PAGEMASK:
1025 kvm_write_c0_guest_pagemask(cop0, v);
1027 case KVM_REG_MIPS_CP0_WIRED:
1028 kvm_write_c0_guest_wired(cop0, v);
1030 case KVM_REG_MIPS_CP0_HWRENA:
1031 kvm_write_c0_guest_hwrena(cop0, v);
1033 case KVM_REG_MIPS_CP0_BADVADDR:
1034 kvm_write_c0_guest_badvaddr(cop0, v);
1036 case KVM_REG_MIPS_CP0_ENTRYHI:
1037 kvm_write_c0_guest_entryhi(cop0, v);
1039 case KVM_REG_MIPS_CP0_STATUS:
1040 kvm_write_c0_guest_status(cop0, v);
1042 case KVM_REG_MIPS_CP0_EPC:
1043 kvm_write_c0_guest_epc(cop0, v);
1045 case KVM_REG_MIPS_CP0_PRID:
1046 kvm_write_c0_guest_prid(cop0, v);
1048 case KVM_REG_MIPS_CP0_ERROREPC:
1049 kvm_write_c0_guest_errorepc(cop0, v);
1051 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1052 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1053 if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
1057 kvm_write_c0_guest_kscratch1(cop0, v);
1060 kvm_write_c0_guest_kscratch2(cop0, v);
1063 kvm_write_c0_guest_kscratch3(cop0, v);
1066 kvm_write_c0_guest_kscratch4(cop0, v);
1069 kvm_write_c0_guest_kscratch5(cop0, v);
1072 kvm_write_c0_guest_kscratch6(cop0, v);
1076 /* registers to be handled specially */
1078 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
1083 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1084 struct kvm_enable_cap *cap)
1088 if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap))
1096 case KVM_CAP_MIPS_FPU:
1097 vcpu->arch.fpu_enabled = true;
1099 case KVM_CAP_MIPS_MSA:
1100 vcpu->arch.msa_enabled = true;
1110 long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
1113 struct kvm_vcpu *vcpu = filp->private_data;
1114 void __user *argp = (void __user *)arg;
1118 case KVM_SET_ONE_REG:
1119 case KVM_GET_ONE_REG: {
1120 struct kvm_one_reg reg;
1122 if (copy_from_user(®, argp, sizeof(reg)))
1124 if (ioctl == KVM_SET_ONE_REG)
1125 return kvm_mips_set_reg(vcpu, ®);
1127 return kvm_mips_get_reg(vcpu, ®);
1129 case KVM_GET_REG_LIST: {
1130 struct kvm_reg_list __user *user_list = argp;
1131 struct kvm_reg_list reg_list;
1134 if (copy_from_user(®_list, user_list, sizeof(reg_list)))
1137 reg_list.n = kvm_mips_num_regs(vcpu);
1138 if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
1142 return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
1145 /* Treat the NMI as a CPU reset */
1146 r = kvm_mips_reset_vcpu(vcpu);
1150 struct kvm_mips_interrupt irq;
1153 if (copy_from_user(&irq, argp, sizeof(irq)))
1156 kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
1159 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
1162 case KVM_ENABLE_CAP: {
1163 struct kvm_enable_cap cap;
1166 if (copy_from_user(&cap, argp, sizeof(cap)))
1168 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1179 /* Get (and clear) the dirty memory log for a memory slot. */
1180 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1182 struct kvm_memslots *slots;
1183 struct kvm_memory_slot *memslot;
1184 unsigned long ga, ga_end;
1189 mutex_lock(&kvm->slots_lock);
1191 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1195 /* If nothing is dirty, don't bother messing with page tables. */
1197 slots = kvm_memslots(kvm);
1198 memslot = id_to_memslot(slots, log->slot);
1200 ga = memslot->base_gfn << PAGE_SHIFT;
1201 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1203 kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
1206 n = kvm_dirty_bitmap_bytes(memslot);
1207 memset(memslot->dirty_bitmap, 0, n);
1212 mutex_unlock(&kvm->slots_lock);
1217 long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
1229 int kvm_arch_init(void *opaque)
1231 if (kvm_mips_callbacks) {
1232 kvm_err("kvm: module already exists\n");
1236 return kvm_mips_emulation_init(&kvm_mips_callbacks);
1239 void kvm_arch_exit(void)
1241 kvm_mips_callbacks = NULL;
1244 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1245 struct kvm_sregs *sregs)
1247 return -ENOIOCTLCMD;
1250 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1251 struct kvm_sregs *sregs)
1253 return -ENOIOCTLCMD;
1256 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
1260 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1262 return -ENOIOCTLCMD;
1265 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1267 return -ENOIOCTLCMD;
1270 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1272 return VM_FAULT_SIGBUS;
1275 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
1280 case KVM_CAP_ONE_REG:
1281 case KVM_CAP_ENABLE_CAP:
1284 case KVM_CAP_COALESCED_MMIO:
1285 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1287 case KVM_CAP_MIPS_FPU:
1288 /* We don't handle systems with inconsistent cpu_has_fpu */
1289 r = !!raw_cpu_has_fpu;
1291 case KVM_CAP_MIPS_MSA:
1293 * We don't support MSA vector partitioning yet:
1294 * 1) It would require explicit support which can't be tested
1295 * yet due to lack of support in current hardware.
1296 * 2) It extends the state that would need to be saved/restored
1297 * by e.g. QEMU for migration.
1299 * When vector partitioning hardware becomes available, support
1300 * could be added by requiring a flag when enabling
1301 * KVM_CAP_MIPS_MSA capability to indicate that userland knows
1302 * to save/restore the appropriate extra state.
1304 r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF);
1313 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1315 return kvm_mips_pending_timer(vcpu);
1318 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
1321 struct mips_coproc *cop0;
1326 kvm_debug("VCPU Register Dump:\n");
1327 kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
1328 kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
1330 for (i = 0; i < 32; i += 4) {
1331 kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
1333 vcpu->arch.gprs[i + 1],
1334 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
1336 kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
1337 kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
1339 cop0 = vcpu->arch.cop0;
1340 kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
1341 kvm_read_c0_guest_status(cop0),
1342 kvm_read_c0_guest_cause(cop0));
1344 kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
1349 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1353 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1354 vcpu->arch.gprs[i] = regs->gpr[i];
1355 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
1356 vcpu->arch.hi = regs->hi;
1357 vcpu->arch.lo = regs->lo;
1358 vcpu->arch.pc = regs->pc;
1363 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1367 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
1368 regs->gpr[i] = vcpu->arch.gprs[i];
1370 regs->hi = vcpu->arch.hi;
1371 regs->lo = vcpu->arch.lo;
1372 regs->pc = vcpu->arch.pc;
1377 static void kvm_mips_comparecount_func(unsigned long data)
1379 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1381 kvm_mips_callbacks->queue_timer_int(vcpu);
1383 vcpu->arch.wait = 0;
1384 if (swait_active(&vcpu->wq))
1385 swake_up(&vcpu->wq);
1388 /* low level hrtimer wake routine */
1389 static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
1391 struct kvm_vcpu *vcpu;
1393 vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
1394 kvm_mips_comparecount_func((unsigned long) vcpu);
1395 return kvm_mips_count_timeout(vcpu);
1398 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1400 kvm_mips_callbacks->vcpu_init(vcpu);
1401 hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
1403 vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
1407 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1408 struct kvm_translation *tr)
1413 /* Initial guest state */
1414 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1416 return kvm_mips_callbacks->vcpu_setup(vcpu);
1419 static void kvm_mips_set_c0_status(void)
1421 u32 status = read_c0_status();
1426 write_c0_status(status);
1431 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1433 int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1435 u32 cause = vcpu->arch.host_cp0_cause;
1436 u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1437 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
1438 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
1439 enum emulation_result er = EMULATE_DONE;
1440 int ret = RESUME_GUEST;
1442 /* re-enable HTW before enabling interrupts */
1445 /* Set a default exit reason */
1446 run->exit_reason = KVM_EXIT_UNKNOWN;
1447 run->ready_for_interrupt_injection = 1;
1450 * Set the appropriate status bits based on host CPU features,
1451 * before we hit the scheduler
1453 kvm_mips_set_c0_status();
1457 kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
1458 cause, opc, run, vcpu);
1459 trace_kvm_exit(vcpu, exccode);
1462 * Do a privilege check, if in UM most of these exit conditions end up
1463 * causing an exception to be delivered to the Guest Kernel
1465 er = kvm_mips_check_privilege(cause, opc, run, vcpu);
1466 if (er == EMULATE_PRIV_FAIL) {
1468 } else if (er == EMULATE_FAIL) {
1469 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1476 kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
1478 ++vcpu->stat.int_exits;
1487 kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
1489 ++vcpu->stat.cop_unusable_exits;
1490 ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
1491 /* XXXKYMA: Might need to return to user space */
1492 if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
1497 ++vcpu->stat.tlbmod_exits;
1498 ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
1502 kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
1503 cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
1506 ++vcpu->stat.tlbmiss_st_exits;
1507 ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
1511 kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
1512 cause, opc, badvaddr);
1514 ++vcpu->stat.tlbmiss_ld_exits;
1515 ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
1519 ++vcpu->stat.addrerr_st_exits;
1520 ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
1524 ++vcpu->stat.addrerr_ld_exits;
1525 ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
1529 ++vcpu->stat.syscall_exits;
1530 ret = kvm_mips_callbacks->handle_syscall(vcpu);
1534 ++vcpu->stat.resvd_inst_exits;
1535 ret = kvm_mips_callbacks->handle_res_inst(vcpu);
1539 ++vcpu->stat.break_inst_exits;
1540 ret = kvm_mips_callbacks->handle_break(vcpu);
1544 ++vcpu->stat.trap_inst_exits;
1545 ret = kvm_mips_callbacks->handle_trap(vcpu);
1548 case EXCCODE_MSAFPE:
1549 ++vcpu->stat.msa_fpe_exits;
1550 ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
1554 ++vcpu->stat.fpe_exits;
1555 ret = kvm_mips_callbacks->handle_fpe(vcpu);
1558 case EXCCODE_MSADIS:
1559 ++vcpu->stat.msa_disabled_exits;
1560 ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1564 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1565 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1566 kvm_read_c0_guest_status(vcpu->arch.cop0));
1567 kvm_arch_vcpu_dump_regs(vcpu);
1568 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1575 local_irq_disable();
1577 if (er == EMULATE_DONE && !(ret & RESUME_HOST))
1578 kvm_mips_deliver_interrupts(vcpu, cause);
1580 if (!(ret & RESUME_HOST)) {
1581 /* Only check for signals if not already exiting to userspace */
1582 if (signal_pending(current)) {
1583 run->exit_reason = KVM_EXIT_INTR;
1584 ret = (-EINTR << 2) | RESUME_HOST;
1585 ++vcpu->stat.signal_exits;
1586 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
1590 if (ret == RESUME_GUEST) {
1591 trace_kvm_reenter(vcpu);
1593 kvm_mips_check_asids(vcpu);
1596 * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
1597 * is live), restore FCR31 / MSACSR.
1599 * This should be before returning to the guest exception
1600 * vector, as it may well cause an [MSA] FP exception if there
1601 * are pending exception bits unmasked. (see
1602 * kvm_mips_csr_die_notifier() for how that is handled).
1604 if (kvm_mips_guest_has_fpu(&vcpu->arch) &&
1605 read_c0_status() & ST0_CU1)
1606 __kvm_restore_fcsr(&vcpu->arch);
1608 if (kvm_mips_guest_has_msa(&vcpu->arch) &&
1609 read_c0_config5() & MIPS_CONF5_MSAEN)
1610 __kvm_restore_msacsr(&vcpu->arch);
1613 /* Disable HTW before returning to guest or host */
1619 /* Enable FPU for guest and restore context */
1620 void kvm_own_fpu(struct kvm_vcpu *vcpu)
1622 struct mips_coproc *cop0 = vcpu->arch.cop0;
1623 unsigned int sr, cfg5;
1627 sr = kvm_read_c0_guest_status(cop0);
1630 * If MSA state is already live, it is undefined how it interacts with
1631 * FR=0 FPU state, and we don't want to hit reserved instruction
1632 * exceptions trying to save the MSA state later when CU=1 && FR=1, so
1633 * play it safe and save it first.
1635 * In theory we shouldn't ever hit this case since kvm_lose_fpu() should
1636 * get called when guest CU1 is set, however we can't trust the guest
1637 * not to clobber the status register directly via the commpage.
1639 if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
1640 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1644 * Enable FPU for guest
1645 * We set FR and FRE according to guest context
1647 change_c0_status(ST0_CU1 | ST0_FR, sr);
1649 cfg5 = kvm_read_c0_guest_config5(cop0);
1650 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1652 enable_fpu_hazard();
1654 /* If guest FPU state not active, restore it now */
1655 if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1656 __kvm_restore_fpu(&vcpu->arch);
1657 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1658 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
1660 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
1666 #ifdef CONFIG_CPU_HAS_MSA
1667 /* Enable MSA for guest and restore context */
1668 void kvm_own_msa(struct kvm_vcpu *vcpu)
1670 struct mips_coproc *cop0 = vcpu->arch.cop0;
1671 unsigned int sr, cfg5;
1676 * Enable FPU if enabled in guest, since we're restoring FPU context
1677 * anyway. We set FR and FRE according to guest context.
1679 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
1680 sr = kvm_read_c0_guest_status(cop0);
1683 * If FR=0 FPU state is already live, it is undefined how it
1684 * interacts with MSA state, so play it safe and save it first.
1686 if (!(sr & ST0_FR) &&
1687 (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
1688 KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
1691 change_c0_status(ST0_CU1 | ST0_FR, sr);
1692 if (sr & ST0_CU1 && cpu_has_fre) {
1693 cfg5 = kvm_read_c0_guest_config5(cop0);
1694 change_c0_config5(MIPS_CONF5_FRE, cfg5);
1698 /* Enable MSA for guest */
1699 set_c0_config5(MIPS_CONF5_MSAEN);
1700 enable_fpu_hazard();
1702 switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
1703 case KVM_MIPS_AUX_FPU:
1705 * Guest FPU state already loaded, only restore upper MSA state
1707 __kvm_restore_msa_upper(&vcpu->arch);
1708 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1709 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
1712 /* Neither FPU or MSA already active, restore full MSA state */
1713 __kvm_restore_msa(&vcpu->arch);
1714 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
1715 if (kvm_mips_guest_has_fpu(&vcpu->arch))
1716 vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
1717 trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
1718 KVM_TRACE_AUX_FPU_MSA);
1721 trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
1729 /* Drop FPU & MSA without saving it */
1730 void kvm_drop_fpu(struct kvm_vcpu *vcpu)
1733 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1735 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
1736 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
1738 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1739 clear_c0_status(ST0_CU1 | ST0_FR);
1740 trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
1741 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1746 /* Save and disable FPU & MSA */
1747 void kvm_lose_fpu(struct kvm_vcpu *vcpu)
1750 * FPU & MSA get disabled in root context (hardware) when it is disabled
1751 * in guest context (software), but the register state in the hardware
1752 * may still be in use. This is why we explicitly re-enable the hardware
1757 if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1758 set_c0_config5(MIPS_CONF5_MSAEN);
1759 enable_fpu_hazard();
1761 __kvm_save_msa(&vcpu->arch);
1762 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
1764 /* Disable MSA & FPU */
1766 if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1767 clear_c0_status(ST0_CU1 | ST0_FR);
1768 disable_fpu_hazard();
1770 vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
1771 } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
1772 set_c0_status(ST0_CU1);
1773 enable_fpu_hazard();
1775 __kvm_save_fpu(&vcpu->arch);
1776 vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
1777 trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
1780 clear_c0_status(ST0_CU1 | ST0_FR);
1781 disable_fpu_hazard();
1787 * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are
1788 * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP
1789 * exception if cause bits are set in the value being written.
1791 static int kvm_mips_csr_die_notify(struct notifier_block *self,
1792 unsigned long cmd, void *ptr)
1794 struct die_args *args = (struct die_args *)ptr;
1795 struct pt_regs *regs = args->regs;
1798 /* Only interested in FPE and MSAFPE */
1799 if (cmd != DIE_FP && cmd != DIE_MSAFP)
1802 /* Return immediately if guest context isn't active */
1803 if (!(current->flags & PF_VCPU))
1806 /* Should never get here from user mode */
1807 BUG_ON(user_mode(regs));
1809 pc = instruction_pointer(regs);
1812 /* match 2nd instruction in __kvm_restore_fcsr */
1813 if (pc != (unsigned long)&__kvm_restore_fcsr + 4)
1817 /* match 2nd/3rd instruction in __kvm_restore_msacsr */
1819 pc < (unsigned long)&__kvm_restore_msacsr + 4 ||
1820 pc > (unsigned long)&__kvm_restore_msacsr + 8)
1825 /* Move PC forward a little and continue executing */
1826 instruction_pointer(regs) += 4;
1831 static struct notifier_block kvm_mips_csr_die_notifier = {
1832 .notifier_call = kvm_mips_csr_die_notify,
1835 static int __init kvm_mips_init(void)
1839 ret = kvm_mips_entry_setup();
1843 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1848 register_die_notifier(&kvm_mips_csr_die_notifier);
1853 static void __exit kvm_mips_exit(void)
1857 unregister_die_notifier(&kvm_mips_csr_die_notifier);
1860 module_init(kvm_mips_init);
1861 module_exit(kvm_mips_exit);
1863 EXPORT_TRACEPOINT_SYMBOL(kvm_exit);