2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS: Support for hardware virtualization extensions
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
12 #include <linux/errno.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 #include <linux/preempt.h>
16 #include <linux/vmalloc.h>
17 #include <asm/cacheflush.h>
18 #include <asm/cacheops.h>
19 #include <asm/cmpxchg.h>
21 #include <asm/hazards.h>
23 #include <asm/mmu_context.h>
24 #include <asm/r4kcache.h>
27 #include <asm/tlbex.h>
29 #include <linux/kvm_host.h>
31 #include "interrupt.h"
32 #include "loongson_regs.h"
36 /* Pointers to last VCPU loaded on each physical CPU */
37 static struct kvm_vcpu *last_vcpu[NR_CPUS];
38 /* Pointers to last VCPU executed on each physical CPU */
39 static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
42 * Number of guest VTLB entries to use, so we can catch inconsistency between
45 static unsigned int kvm_vz_guest_vtlb_size;
47 static inline long kvm_vz_read_gc0_ebase(void)
49 if (sizeof(long) == 8 && cpu_has_ebase_wg)
50 return read_gc0_ebase_64();
52 return read_gc0_ebase();
55 static inline void kvm_vz_write_gc0_ebase(long v)
58 * First write with WG=1 to write upper bits, then write again in case
59 * WG should be left at 0.
60 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
62 if (sizeof(long) == 8 &&
63 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
64 write_gc0_ebase_64(v | MIPS_EBASE_WG);
65 write_gc0_ebase_64(v);
67 write_gc0_ebase(v | MIPS_EBASE_WG);
73 * These Config bits may be writable by the guest:
74 * Config: [K23, KU] (!TLB), K0
76 * Config2: [TU, SU] (impl)
78 * Config4: FTLBPageSize
79 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
82 static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
87 static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
92 static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
97 static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
99 return MIPS_CONF3_ISA_OE;
102 static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
104 /* no need to be exact */
105 return MIPS_CONF4_VFTLBPAGESIZE;
108 static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
110 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
112 /* Permit MSAEn changes if MSA supported and enabled */
113 if (kvm_mips_guest_has_msa(&vcpu->arch))
114 mask |= MIPS_CONF5_MSAEN;
117 * Permit guest FPU mode changes if FPU is enabled and the relevant
118 * feature exists according to FIR register.
120 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
122 mask |= MIPS_CONF5_UFR;
124 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
131 * VZ optionally allows these additional Config bits to be written by root:
133 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
135 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
136 * VInt, SP, CDMM, MT, SM, TL]
137 * Config4: M, [VTLBSizeExt, MMUSizeExt]
141 static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
143 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
146 static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
148 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
150 /* Permit FPU to be present if FPU is supported */
151 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
152 mask |= MIPS_CONF1_FP;
157 static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
159 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
162 static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
164 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
165 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
167 /* Permit MSA to be present if MSA is supported */
168 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
169 mask |= MIPS_CONF3_MSA;
174 static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
176 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
179 static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
181 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
184 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
186 /* VZ guest has already converted gva to gpa */
190 static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
192 set_bit(priority, &vcpu->arch.pending_exceptions);
193 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
196 static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
198 clear_bit(priority, &vcpu->arch.pending_exceptions);
199 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
202 static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
205 * timer expiry is asynchronous to vcpu execution therefore defer guest
208 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
211 static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
214 * timer expiry is asynchronous to vcpu execution therefore defer guest
217 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
220 static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
221 struct kvm_mips_interrupt *irq)
223 int intr = (int)irq->irq;
226 * interrupts are asynchronous to vcpu execution therefore defer guest
229 kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr));
232 static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
233 struct kvm_mips_interrupt *irq)
235 int intr = (int)irq->irq;
238 * interrupts are asynchronous to vcpu execution therefore defer guest
241 kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr));
244 static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
247 u32 irq = (priority < MIPS_EXC_MAX) ?
248 kvm_priority_to_irq[priority] : 0;
251 case MIPS_EXC_INT_TIMER:
255 case MIPS_EXC_INT_IO_1:
256 case MIPS_EXC_INT_IO_2:
257 case MIPS_EXC_INT_IPI_1:
258 case MIPS_EXC_INT_IPI_2:
259 if (cpu_has_guestctl2)
260 set_c0_guestctl2(irq);
269 clear_bit(priority, &vcpu->arch.pending_exceptions);
273 static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
276 u32 irq = (priority < MIPS_EXC_MAX) ?
277 kvm_priority_to_irq[priority] : 0;
280 case MIPS_EXC_INT_TIMER:
282 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
283 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
284 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
285 * supported or if not using GuestCtl2 Hardware Clear.
287 if (cpu_has_guestctl2) {
288 if (!(read_c0_guestctl2() & (irq << 14)))
289 clear_c0_guestctl2(irq);
291 clear_gc0_cause(irq);
295 case MIPS_EXC_INT_IO_1:
296 case MIPS_EXC_INT_IO_2:
297 case MIPS_EXC_INT_IPI_1:
298 case MIPS_EXC_INT_IPI_2:
299 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
300 if (cpu_has_guestctl2) {
301 if (!(read_c0_guestctl2() & (irq << 14)))
302 clear_c0_guestctl2(irq);
304 clear_gc0_cause(irq);
312 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
317 * VZ guest timer handling.
321 * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer.
322 * @vcpu: Virtual CPU.
324 * Returns: true if the VZ GTOffset & real guest CP0_Count should be used
325 * instead of software emulation of guest timer.
328 static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu)
330 if (kvm_mips_count_disabled(vcpu))
333 /* Chosen frequency must match real frequency */
334 if (mips_hpt_frequency != vcpu->arch.count_hz)
337 /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */
338 if (current_cpu_data.gtoffset_mask != 0xffffffff)
345 * _kvm_vz_restore_stimer() - Restore soft timer state.
346 * @vcpu: Virtual CPU.
347 * @compare: CP0_Compare register value, restored by caller.
348 * @cause: CP0_Cause register to restore.
350 * Restore VZ state relating to the soft timer. The hard timer can be enabled
353 static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
357 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
358 * after Guest CP0_Compare.
360 write_c0_gtoffset(compare - read_c0_count());
362 back_to_back_c0_hazard();
363 write_gc0_cause(cause);
367 * _kvm_vz_restore_htimer() - Restore hard timer state.
368 * @vcpu: Virtual CPU.
369 * @compare: CP0_Compare register value, restored by caller.
370 * @cause: CP0_Cause register to restore.
372 * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the
373 * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause.
375 static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
376 u32 compare, u32 cause)
378 u32 start_count, after_count;
383 * Freeze the soft-timer and sync the guest CP0_Count with it. We do
384 * this with interrupts disabled to avoid latency.
386 local_irq_save(flags);
387 freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count);
388 write_c0_gtoffset(start_count - read_c0_count());
389 local_irq_restore(flags);
391 /* restore guest CP0_Cause, as TI may already be set */
392 back_to_back_c0_hazard();
393 write_gc0_cause(cause);
396 * The above sequence isn't atomic and would result in lost timer
397 * interrupts if we're not careful. Detect if a timer interrupt is due
400 back_to_back_c0_hazard();
401 after_count = read_gc0_count();
402 if (after_count - start_count > compare - start_count - 1)
403 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
407 * kvm_vz_restore_timer() - Restore timer state.
408 * @vcpu: Virtual CPU.
410 * Restore soft timer state from saved context.
412 static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
414 struct mips_coproc *cop0 = vcpu->arch.cop0;
417 compare = kvm_read_sw_gc0_compare(cop0);
418 cause = kvm_read_sw_gc0_cause(cop0);
420 write_gc0_compare(compare);
421 _kvm_vz_restore_stimer(vcpu, compare, cause);
425 * kvm_vz_acquire_htimer() - Switch to hard timer state.
426 * @vcpu: Virtual CPU.
428 * Restore hard timer state on top of existing soft timer state if possible.
430 * Since hard timer won't remain active over preemption, preemption should be
431 * disabled by the caller.
433 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
437 gctl0 = read_c0_guestctl0();
438 if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) {
439 /* enable guest access to hard timer */
440 write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT);
442 _kvm_vz_restore_htimer(vcpu, read_gc0_compare(),
448 * _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
449 * @vcpu: Virtual CPU.
450 * @compare: Pointer to write compare value to.
451 * @cause: Pointer to write cause value to.
453 * Save VZ guest timer state and switch to software emulation of guest CP0
454 * timer. The hard timer must already be in use, so preemption should be
457 static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
458 u32 *out_compare, u32 *out_cause)
460 u32 cause, compare, before_count, end_count;
463 compare = read_gc0_compare();
464 *out_compare = compare;
466 before_time = ktime_get();
469 * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time
470 * at which no pending timer interrupt is missing.
472 before_count = read_gc0_count();
473 back_to_back_c0_hazard();
474 cause = read_gc0_cause();
478 * Record a final CP0_Count which we will transfer to the soft-timer.
479 * This is recorded *after* saving CP0_Cause, so we don't get any timer
480 * interrupts from just after the final CP0_Count point.
482 back_to_back_c0_hazard();
483 end_count = read_gc0_count();
486 * The above sequence isn't atomic, so we could miss a timer interrupt
487 * between reading CP0_Cause and end_count. Detect and record any timer
488 * interrupt due between before_count and end_count.
490 if (end_count - before_count > compare - before_count - 1)
491 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
494 * Restore soft-timer, ignoring a small amount of negative drift due to
495 * delay between freeze_hrtimer and setting CP0_GTOffset.
497 kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000);
501 * kvm_vz_save_timer() - Save guest timer state.
502 * @vcpu: Virtual CPU.
504 * Save VZ guest timer state and switch to soft guest timer if hard timer was in
507 static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
509 struct mips_coproc *cop0 = vcpu->arch.cop0;
510 u32 gctl0, compare, cause;
512 gctl0 = read_c0_guestctl0();
513 if (gctl0 & MIPS_GCTL0_GT) {
514 /* disable guest use of hard timer */
515 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
517 /* save hard timer state */
518 _kvm_vz_save_htimer(vcpu, &compare, &cause);
520 compare = read_gc0_compare();
521 cause = read_gc0_cause();
524 /* save timer-related state to VCPU context */
525 kvm_write_sw_gc0_cause(cop0, cause);
526 kvm_write_sw_gc0_compare(cop0, compare);
530 * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use.
531 * @vcpu: Virtual CPU.
533 * Transfers the state of the hard guest timer to the soft guest timer, leaving
534 * guest state intact so it can continue to be used with the soft timer.
536 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu)
538 u32 gctl0, compare, cause;
541 gctl0 = read_c0_guestctl0();
542 if (gctl0 & MIPS_GCTL0_GT) {
543 /* disable guest use of timer */
544 write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT);
546 /* switch to soft timer */
547 _kvm_vz_save_htimer(vcpu, &compare, &cause);
549 /* leave soft timer in usable state */
550 _kvm_vz_restore_stimer(vcpu, compare, cause);
556 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
557 * @inst: 32-bit instruction encoding.
559 * Finds whether @inst encodes an EVA memory access instruction, which would
560 * indicate that emulation of it should access the user mode address space
561 * instead of the kernel mode address space. This matters for MUSUK segments
562 * which are TLB mapped for user mode but unmapped for kernel mode.
564 * Returns: Whether @inst encodes an EVA accessor instruction.
566 static bool is_eva_access(union mips_instruction inst)
568 if (inst.spec3_format.opcode != spec3_op)
571 switch (inst.spec3_format.func) {
595 * is_eva_am_mapped() - Find whether an access mode is mapped.
596 * @vcpu: KVM VCPU state.
597 * @am: 3-bit encoded access mode.
598 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
600 * Decode @am to find whether it encodes a mapped segment for the current VCPU
601 * state. Where necessary @eu and the actual instruction causing the fault are
602 * taken into account to make the decision.
604 * Returns: Whether the VCPU faulted on a TLB mapped address.
606 static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
612 * Interpret access control mode. We assume address errors will already
613 * have been caught by the guest, leaving us with:
614 * AM UM SM KM 31..24 23..16
617 * MSK 2 010 TLB TLB 1
618 * MUSK 3 011 TLB TLB TLB 1
619 * MUSUK 4 100 TLB TLB Unm 0 1
620 * USK 5 101 Unm Unm 0 0
622 * UUSK 7 111 Unm Unm Unm 0 0
624 * We shift a magic value by AM across the sign bit to find if always
625 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
627 am_lookup = 0x70080000 << am;
628 if ((s32)am_lookup < 0) {
631 * Always TLB mapped, unless SegCtl.EU && ERL
633 if (!eu || !(read_gc0_status() & ST0_ERL))
637 if ((s32)am_lookup < 0) {
638 union mips_instruction inst;
644 * TLB mapped if not in kernel mode
646 status = read_gc0_status();
647 if (!(status & (ST0_EXL | ST0_ERL)) &&
651 * EVA access instructions in kernel
652 * mode access user address space.
654 opc = (u32 *)vcpu->arch.pc;
655 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
657 err = kvm_get_badinstr(opc, vcpu, &inst.word);
658 if (!err && is_eva_access(inst))
667 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
668 * @vcpu: KVM VCPU state.
669 * @gva: Guest virtual address to convert.
670 * @gpa: Output guest physical address.
672 * Convert a guest virtual address (GVA) which is valid according to the guest
673 * context, to a guest physical address (GPA).
675 * Returns: 0 on success.
678 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
682 unsigned long segctl;
684 if ((long)gva == (s32)gva32) {
685 /* Handle canonical 32-bit virtual address */
686 if (cpu_guest_has_segments) {
687 unsigned long mask, pa;
689 switch (gva32 >> 29) {
691 case 1: /* CFG5 (1GB) */
692 segctl = read_gc0_segctl2() >> 16;
693 mask = (unsigned long)0xfc0000000ull;
696 case 3: /* CFG4 (1GB) */
697 segctl = read_gc0_segctl2();
698 mask = (unsigned long)0xfc0000000ull;
700 case 4: /* CFG3 (512MB) */
701 segctl = read_gc0_segctl1() >> 16;
702 mask = (unsigned long)0xfe0000000ull;
704 case 5: /* CFG2 (512MB) */
705 segctl = read_gc0_segctl1();
706 mask = (unsigned long)0xfe0000000ull;
708 case 6: /* CFG1 (512MB) */
709 segctl = read_gc0_segctl0() >> 16;
710 mask = (unsigned long)0xfe0000000ull;
712 case 7: /* CFG0 (512MB) */
713 segctl = read_gc0_segctl0();
714 mask = (unsigned long)0xfe0000000ull;
718 * GCC 4.9 isn't smart enough to figure out that
719 * segctl and mask are always initialised.
724 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
728 /* Unmapped, find guest physical address */
729 pa = (segctl << 20) & mask;
733 } else if ((s32)gva32 < (s32)0xc0000000) {
734 /* legacy unmapped KSeg0 or KSeg1 */
735 *gpa = gva32 & 0x1fffffff;
739 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
741 if (cpu_guest_has_segments) {
743 * Each of the 8 regions can be overridden by SegCtl2.XR
744 * to use SegCtl1.XAM.
746 segctl = read_gc0_segctl2();
747 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
748 segctl = read_gc0_segctl1();
749 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
756 * Traditionally fully unmapped.
757 * Bits 61:59 specify the CCA, which we can just mask off here.
758 * Bits 58:PABITS should be zero, but we shouldn't have got here
761 *gpa = gva & 0x07ffffffffffffff;
767 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
771 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
772 * @vcpu: KVM VCPU state.
773 * @badvaddr: Root BadVAddr.
774 * @gpa: Output guest physical address.
776 * VZ implementations are permitted to report guest virtual addresses (GVA) in
777 * BadVAddr on a root exception during guest execution, instead of the more
778 * convenient guest physical addresses (GPA). When we get a GVA, this function
779 * converts it to a GPA, taking into account guest segmentation and guest TLB
782 * Returns: 0 on success.
785 static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
788 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
789 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
791 /* If BadVAddr is GPA, then all is well in the world */
792 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
797 /* Otherwise we'd expect it to be GVA ... */
798 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
799 "Unexpected gexccode %#x\n", gexccode))
802 /* ... and we need to perform the GVA->GPA translation in software */
803 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
806 static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
808 u32 *opc = (u32 *) vcpu->arch.pc;
809 u32 cause = vcpu->arch.host_cp0_cause;
810 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
811 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
815 * Fetch the instruction.
817 if (cause & CAUSEF_BD)
819 kvm_get_badinstr(opc, vcpu, &inst);
821 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
822 exccode, opc, inst, badvaddr,
824 kvm_arch_vcpu_dump_regs(vcpu);
825 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
829 static unsigned long mips_process_maar(unsigned int op, unsigned long val)
831 /* Mask off unused bits */
832 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
834 if (read_gc0_pagegrain() & PG_ELPA)
835 mask |= 0x00ffffff00000000ull;
836 if (cpu_guest_has_mvh)
837 mask |= MIPS_MAAR_VH;
839 /* Set or clear VH */
842 val &= ~MIPS_MAAR_VH;
843 } else if (op == dmtc_op) {
844 /* set VH to match VL */
845 val &= ~MIPS_MAAR_VH;
846 if (val & MIPS_MAAR_VL)
853 static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
855 struct mips_coproc *cop0 = vcpu->arch.cop0;
857 val &= MIPS_MAARI_INDEX;
858 if (val == MIPS_MAARI_INDEX)
859 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
860 else if (val < ARRAY_SIZE(vcpu->arch.maar))
861 kvm_write_sw_gc0_maari(cop0, val);
864 static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
867 struct kvm_vcpu *vcpu)
869 struct mips_coproc *cop0 = vcpu->arch.cop0;
870 enum emulation_result er = EMULATE_DONE;
872 unsigned long curr_pc;
876 * Update PC and hold onto current PC in case there is
877 * an error and we want to rollback the PC
879 curr_pc = vcpu->arch.pc;
880 er = update_pc(vcpu, cause);
881 if (er == EMULATE_FAIL)
884 if (inst.co_format.co) {
885 switch (inst.co_format.func) {
887 er = kvm_mips_emul_wait(vcpu);
893 rt = inst.c0r_format.rt;
894 rd = inst.c0r_format.rd;
895 sel = inst.c0r_format.sel;
897 switch (inst.c0r_format.rs) {
900 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
901 cop0->stat[rd][sel]++;
903 if (rd == MIPS_CP0_COUNT &&
904 sel == 0) { /* Count */
905 val = kvm_mips_read_count(vcpu);
906 } else if (rd == MIPS_CP0_COMPARE &&
907 sel == 0) { /* Compare */
908 val = read_gc0_compare();
909 } else if (rd == MIPS_CP0_LLADDR &&
910 sel == 0) { /* LLAddr */
911 if (cpu_guest_has_rw_llb)
912 val = read_gc0_lladdr() &
916 } else if (rd == MIPS_CP0_LLADDR &&
917 sel == 1 && /* MAAR */
918 cpu_guest_has_maar &&
919 !cpu_guest_has_dyn_maar) {
920 /* MAARI must be in range */
921 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
922 ARRAY_SIZE(vcpu->arch.maar));
923 val = vcpu->arch.maar[
924 kvm_read_sw_gc0_maari(cop0)];
925 } else if ((rd == MIPS_CP0_PRID &&
926 (sel == 0 || /* PRid */
927 sel == 2 || /* CDMMBase */
928 sel == 3)) || /* CMGCRBase */
929 (rd == MIPS_CP0_STATUS &&
930 (sel == 2 || /* SRSCtl */
931 sel == 3)) || /* SRSMap */
932 (rd == MIPS_CP0_CONFIG &&
933 (sel == 7)) || /* Config7 */
934 (rd == MIPS_CP0_LLADDR &&
935 (sel == 2) && /* MAARI */
936 cpu_guest_has_maar &&
937 !cpu_guest_has_dyn_maar) ||
938 (rd == MIPS_CP0_ERRCTL &&
939 (sel == 0))) { /* ErrCtl */
940 val = cop0->reg[rd][sel];
946 if (er != EMULATE_FAIL) {
948 if (inst.c0r_format.rs == mfc_op)
950 vcpu->arch.gprs[rt] = val;
953 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
954 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
955 KVM_TRACE_COP0(rd, sel), val);
960 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
961 cop0->stat[rd][sel]++;
963 val = vcpu->arch.gprs[rt];
964 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
965 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
966 KVM_TRACE_COP0(rd, sel), val);
968 if (rd == MIPS_CP0_COUNT &&
969 sel == 0) { /* Count */
970 kvm_vz_lose_htimer(vcpu);
971 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
972 } else if (rd == MIPS_CP0_COMPARE &&
973 sel == 0) { /* Compare */
974 kvm_mips_write_compare(vcpu,
977 } else if (rd == MIPS_CP0_LLADDR &&
978 sel == 0) { /* LLAddr */
980 * P5600 generates GPSI on guest MTC0 LLAddr.
981 * Only allow the guest to clear LLB.
983 if (cpu_guest_has_rw_llb &&
984 !(val & MIPS_LLADDR_LLB))
986 } else if (rd == MIPS_CP0_LLADDR &&
987 sel == 1 && /* MAAR */
988 cpu_guest_has_maar &&
989 !cpu_guest_has_dyn_maar) {
990 val = mips_process_maar(inst.c0r_format.rs,
993 /* MAARI must be in range */
994 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
995 ARRAY_SIZE(vcpu->arch.maar));
996 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
998 } else if (rd == MIPS_CP0_LLADDR &&
999 (sel == 2) && /* MAARI */
1000 cpu_guest_has_maar &&
1001 !cpu_guest_has_dyn_maar) {
1002 kvm_write_maari(vcpu, val);
1003 } else if (rd == MIPS_CP0_ERRCTL &&
1004 (sel == 0)) { /* ErrCtl */
1005 /* ignore the written value */
1016 /* Rollback PC only if emulation was unsuccessful */
1017 if (er == EMULATE_FAIL) {
1018 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
1019 curr_pc, __func__, inst.word);
1021 vcpu->arch.pc = curr_pc;
1027 static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
1028 u32 *opc, u32 cause,
1029 struct kvm_run *run,
1030 struct kvm_vcpu *vcpu)
1032 enum emulation_result er = EMULATE_DONE;
1033 u32 cache, op_inst, op, base;
1035 struct kvm_vcpu_arch *arch = &vcpu->arch;
1036 unsigned long va, curr_pc;
1039 * Update PC and hold onto current PC in case there is
1040 * an error and we want to rollback the PC
1042 curr_pc = vcpu->arch.pc;
1043 er = update_pc(vcpu, cause);
1044 if (er == EMULATE_FAIL)
1047 base = inst.i_format.rs;
1048 op_inst = inst.i_format.rt;
1049 if (cpu_has_mips_r6)
1050 offset = inst.spec3_format.simmediate;
1052 offset = inst.i_format.simmediate;
1053 cache = op_inst & CacheOp_Cache;
1054 op = op_inst & CacheOp_Op;
1056 va = arch->gprs[base] + offset;
1058 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1059 cache, op, base, arch->gprs[base], offset);
1061 /* Secondary or tirtiary cache ops ignored */
1062 if (cache != Cache_I && cache != Cache_D)
1063 return EMULATE_DONE;
1066 case Index_Invalidate_I:
1067 flush_icache_line_indexed(va);
1068 return EMULATE_DONE;
1069 case Index_Writeback_Inv_D:
1070 flush_dcache_line_indexed(va);
1071 return EMULATE_DONE;
1072 case Hit_Invalidate_I:
1073 case Hit_Invalidate_D:
1074 case Hit_Writeback_Inv_D:
1075 if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) {
1076 /* We can just flush entire icache */
1077 local_flush_icache_range(0, 0);
1078 return EMULATE_DONE;
1081 /* So far, other platforms support guest hit cache ops */
1087 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1088 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
1091 vcpu->arch.pc = curr_pc;
1093 return EMULATE_FAIL;
1096 #ifdef CONFIG_CPU_LOONGSON64
1097 static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst,
1098 u32 *opc, u32 cause,
1099 struct kvm_run *run,
1100 struct kvm_vcpu *vcpu)
1102 unsigned int rs, rd;
1103 unsigned int hostcfg;
1104 unsigned long curr_pc;
1105 enum emulation_result er = EMULATE_DONE;
1108 * Update PC and hold onto current PC in case there is
1109 * an error and we want to rollback the PC
1111 curr_pc = vcpu->arch.pc;
1112 er = update_pc(vcpu, cause);
1113 if (er == EMULATE_FAIL)
1116 rs = inst.loongson3_lscsr_format.rs;
1117 rd = inst.loongson3_lscsr_format.rd;
1118 switch (inst.loongson3_lscsr_format.fr) {
1119 case 0x8: /* Read CPUCFG */
1120 ++vcpu->stat.vz_cpucfg_exits;
1121 hostcfg = read_cpucfg(vcpu->arch.gprs[rs]);
1123 switch (vcpu->arch.gprs[rs]) {
1125 vcpu->arch.gprs[rd] = 0x14c000;
1128 hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI |
1129 LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 |
1130 LOONGSON_CFG1_SFBP);
1131 vcpu->arch.gprs[rd] = hostcfg;
1134 hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 |
1135 LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW);
1136 vcpu->arch.gprs[rd] = hostcfg;
1139 vcpu->arch.gprs[rd] = hostcfg;
1142 /* Don't export any other advanced features to guest */
1143 vcpu->arch.gprs[rd] = 0;
1149 kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n",
1150 inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc);
1155 /* Rollback PC only if emulation was unsuccessful */
1156 if (er == EMULATE_FAIL) {
1157 kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n",
1158 curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr);
1160 vcpu->arch.pc = curr_pc;
1167 static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
1168 struct kvm_vcpu *vcpu)
1170 enum emulation_result er = EMULATE_DONE;
1171 struct kvm_vcpu_arch *arch = &vcpu->arch;
1172 struct kvm_run *run = vcpu->run;
1173 union mips_instruction inst;
1178 * Fetch the instruction.
1180 if (cause & CAUSEF_BD)
1182 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1184 return EMULATE_FAIL;
1186 switch (inst.r_format.opcode) {
1188 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
1190 #ifndef CONFIG_CPU_MIPSR6
1192 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1193 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1196 #ifdef CONFIG_CPU_LOONGSON64
1198 er = kvm_vz_gpsi_lwc2(inst, opc, cause, run, vcpu);
1202 switch (inst.spec3_format.func) {
1203 #ifdef CONFIG_CPU_MIPSR6
1205 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
1206 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
1210 if (inst.r_format.rs || (inst.r_format.re >> 3))
1213 rd = inst.r_format.rd;
1214 rt = inst.r_format.rt;
1215 sel = inst.r_format.re & 0x7;
1218 case MIPS_HWR_CC: /* Read count register */
1220 (long)(int)kvm_mips_read_count(vcpu);
1223 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1224 KVM_TRACE_HWR(rd, sel), 0);
1228 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1229 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1231 er = update_pc(vcpu, cause);
1240 kvm_err("GPSI exception not supported (%p/%#x)\n",
1242 kvm_arch_vcpu_dump_regs(vcpu);
1250 static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1251 struct kvm_vcpu *vcpu)
1253 enum emulation_result er = EMULATE_DONE;
1254 struct kvm_vcpu_arch *arch = &vcpu->arch;
1255 union mips_instruction inst;
1259 * Fetch the instruction.
1261 if (cause & CAUSEF_BD)
1263 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1265 return EMULATE_FAIL;
1267 /* complete MTC0 on behalf of guest and advance EPC */
1268 if (inst.c0r_format.opcode == cop0_op &&
1269 inst.c0r_format.rs == mtc_op &&
1270 inst.c0r_format.z == 0) {
1271 int rt = inst.c0r_format.rt;
1272 int rd = inst.c0r_format.rd;
1273 int sel = inst.c0r_format.sel;
1274 unsigned int val = arch->gprs[rt];
1275 unsigned int old_val, change;
1277 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1280 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1281 /* FR bit should read as zero if no FPU */
1282 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1283 val &= ~(ST0_CU1 | ST0_FR);
1286 * Also don't allow FR to be set if host doesn't support
1289 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1292 old_val = read_gc0_status();
1293 change = val ^ old_val;
1295 if (change & ST0_FR) {
1297 * FPU and Vector register state is made
1298 * UNPREDICTABLE by a change of FR, so don't
1299 * even bother saving it.
1305 * If MSA state is already live, it is undefined how it
1306 * interacts with FR=0 FPU state, and we don't want to
1307 * hit reserved instruction exceptions trying to save
1308 * the MSA state later when CU=1 && FR=1, so play it
1309 * safe and save it first.
1311 if (change & ST0_CU1 && !(val & ST0_FR) &&
1312 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1315 write_gc0_status(val);
1316 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1317 u32 old_cause = read_gc0_cause();
1318 u32 change = old_cause ^ val;
1320 /* DC bit enabling/disabling timer? */
1321 if (change & CAUSEF_DC) {
1322 if (val & CAUSEF_DC) {
1323 kvm_vz_lose_htimer(vcpu);
1324 kvm_mips_count_disable_cause(vcpu);
1326 kvm_mips_count_enable_cause(vcpu);
1330 /* Only certain bits are RW to the guest */
1331 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1332 CAUSEF_IP0 | CAUSEF_IP1);
1334 /* WP can only be cleared */
1335 change &= ~CAUSEF_WP | old_cause;
1337 write_gc0_cause(old_cause ^ change);
1338 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1339 write_gc0_intctl(val);
1340 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1341 old_val = read_gc0_config5();
1342 change = val ^ old_val;
1343 /* Handle changes in FPU/MSA modes */
1347 * Propagate FRE changes immediately if the FPU
1348 * context is already loaded.
1350 if (change & MIPS_CONF5_FRE &&
1351 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1352 change_c0_config5(MIPS_CONF5_FRE, val);
1357 (change & kvm_vz_config5_guest_wrmask(vcpu));
1358 write_gc0_config5(val);
1360 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1365 if (er != EMULATE_FAIL)
1366 er = update_pc(vcpu, cause);
1368 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1376 static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc,
1377 struct kvm_vcpu *vcpu)
1380 * Presumably this is due to MC (guest mode change), so lets trace some
1383 trace_kvm_guest_mode_change(vcpu);
1385 return EMULATE_DONE;
1388 static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1389 struct kvm_vcpu *vcpu)
1391 enum emulation_result er;
1392 union mips_instruction inst;
1393 unsigned long curr_pc;
1396 if (cause & CAUSEF_BD)
1398 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1400 return EMULATE_FAIL;
1403 * Update PC and hold onto current PC in case there is
1404 * an error and we want to rollback the PC
1406 curr_pc = vcpu->arch.pc;
1407 er = update_pc(vcpu, cause);
1408 if (er == EMULATE_FAIL)
1411 er = kvm_mips_emul_hypcall(vcpu, inst);
1412 if (er == EMULATE_FAIL)
1413 vcpu->arch.pc = curr_pc;
1418 static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1421 struct kvm_vcpu *vcpu)
1426 * Fetch the instruction.
1428 if (cause & CAUSEF_BD)
1430 kvm_get_badinstr(opc, vcpu, &inst);
1432 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1433 gexccode, opc, inst, read_gc0_status());
1435 return EMULATE_FAIL;
1438 static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1440 u32 *opc = (u32 *) vcpu->arch.pc;
1441 u32 cause = vcpu->arch.host_cp0_cause;
1442 enum emulation_result er = EMULATE_DONE;
1443 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1444 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1445 int ret = RESUME_GUEST;
1447 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1449 case MIPS_GCTL0_GEXC_GPSI:
1450 ++vcpu->stat.vz_gpsi_exits;
1451 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1453 case MIPS_GCTL0_GEXC_GSFC:
1454 ++vcpu->stat.vz_gsfc_exits;
1455 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1457 case MIPS_GCTL0_GEXC_HC:
1458 ++vcpu->stat.vz_hc_exits;
1459 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1461 case MIPS_GCTL0_GEXC_GRR:
1462 ++vcpu->stat.vz_grr_exits;
1463 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1466 case MIPS_GCTL0_GEXC_GVA:
1467 ++vcpu->stat.vz_gva_exits;
1468 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1471 case MIPS_GCTL0_GEXC_GHFC:
1472 ++vcpu->stat.vz_ghfc_exits;
1473 er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu);
1475 case MIPS_GCTL0_GEXC_GPA:
1476 ++vcpu->stat.vz_gpa_exits;
1477 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1481 ++vcpu->stat.vz_resvd_exits;
1482 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1488 if (er == EMULATE_DONE) {
1490 } else if (er == EMULATE_HYPERCALL) {
1491 ret = kvm_mips_handle_hypcall(vcpu);
1493 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1500 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1501 * @vcpu: Virtual CPU context.
1503 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1504 * by the root context.
1506 static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1508 struct kvm_run *run = vcpu->run;
1509 u32 cause = vcpu->arch.host_cp0_cause;
1510 enum emulation_result er = EMULATE_FAIL;
1511 int ret = RESUME_GUEST;
1513 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1515 * If guest FPU not present, the FPU operation should have been
1516 * treated as a reserved instruction!
1517 * If FPU already in use, we shouldn't get this at all.
1519 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1520 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1522 return EMULATE_FAIL;
1528 /* other coprocessors not handled */
1536 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1547 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1548 * @vcpu: Virtual CPU context.
1550 * Handle when the guest attempts to use MSA when it is disabled in the root
1553 static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1555 struct kvm_run *run = vcpu->run;
1558 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1559 * should have been treated as a reserved instruction!
1560 * Same if CU1=1, FR=0.
1561 * If MSA already in use, we shouldn't get this at all.
1563 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1564 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1565 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1566 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1567 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1573 return RESUME_GUEST;
1576 static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1578 struct kvm_run *run = vcpu->run;
1579 u32 *opc = (u32 *) vcpu->arch.pc;
1580 u32 cause = vcpu->arch.host_cp0_cause;
1581 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1582 union mips_instruction inst;
1583 enum emulation_result er = EMULATE_DONE;
1584 int err, ret = RESUME_GUEST;
1586 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1587 /* A code fetch fault doesn't count as an MMIO */
1588 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1589 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1593 /* Fetch the instruction */
1594 if (cause & CAUSEF_BD)
1596 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1598 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1603 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1604 if (er == EMULATE_FAIL) {
1605 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1607 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1611 if (er == EMULATE_DONE) {
1613 } else if (er == EMULATE_DO_MMIO) {
1614 run->exit_reason = KVM_EXIT_MMIO;
1617 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1623 static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1625 struct kvm_run *run = vcpu->run;
1626 u32 *opc = (u32 *) vcpu->arch.pc;
1627 u32 cause = vcpu->arch.host_cp0_cause;
1628 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1629 union mips_instruction inst;
1630 enum emulation_result er = EMULATE_DONE;
1632 int ret = RESUME_GUEST;
1634 /* Just try the access again if we couldn't do the translation */
1635 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1636 return RESUME_GUEST;
1637 vcpu->arch.host_cp0_badvaddr = badvaddr;
1639 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1640 /* Fetch the instruction */
1641 if (cause & CAUSEF_BD)
1643 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1645 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1650 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1651 if (er == EMULATE_FAIL) {
1652 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1654 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1658 if (er == EMULATE_DONE) {
1660 } else if (er == EMULATE_DO_MMIO) {
1661 run->exit_reason = KVM_EXIT_MMIO;
1664 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1670 static u64 kvm_vz_get_one_regs[] = {
1671 KVM_REG_MIPS_CP0_INDEX,
1672 KVM_REG_MIPS_CP0_ENTRYLO0,
1673 KVM_REG_MIPS_CP0_ENTRYLO1,
1674 KVM_REG_MIPS_CP0_CONTEXT,
1675 KVM_REG_MIPS_CP0_PAGEMASK,
1676 KVM_REG_MIPS_CP0_PAGEGRAIN,
1677 KVM_REG_MIPS_CP0_WIRED,
1678 KVM_REG_MIPS_CP0_HWRENA,
1679 KVM_REG_MIPS_CP0_BADVADDR,
1680 KVM_REG_MIPS_CP0_COUNT,
1681 KVM_REG_MIPS_CP0_ENTRYHI,
1682 KVM_REG_MIPS_CP0_COMPARE,
1683 KVM_REG_MIPS_CP0_STATUS,
1684 KVM_REG_MIPS_CP0_INTCTL,
1685 KVM_REG_MIPS_CP0_CAUSE,
1686 KVM_REG_MIPS_CP0_EPC,
1687 KVM_REG_MIPS_CP0_PRID,
1688 KVM_REG_MIPS_CP0_EBASE,
1689 KVM_REG_MIPS_CP0_CONFIG,
1690 KVM_REG_MIPS_CP0_CONFIG1,
1691 KVM_REG_MIPS_CP0_CONFIG2,
1692 KVM_REG_MIPS_CP0_CONFIG3,
1693 KVM_REG_MIPS_CP0_CONFIG4,
1694 KVM_REG_MIPS_CP0_CONFIG5,
1696 KVM_REG_MIPS_CP0_XCONTEXT,
1698 KVM_REG_MIPS_CP0_ERROREPC,
1700 KVM_REG_MIPS_COUNT_CTL,
1701 KVM_REG_MIPS_COUNT_RESUME,
1702 KVM_REG_MIPS_COUNT_HZ,
1705 static u64 kvm_vz_get_one_regs_contextconfig[] = {
1706 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1708 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1712 static u64 kvm_vz_get_one_regs_segments[] = {
1713 KVM_REG_MIPS_CP0_SEGCTL0,
1714 KVM_REG_MIPS_CP0_SEGCTL1,
1715 KVM_REG_MIPS_CP0_SEGCTL2,
1718 static u64 kvm_vz_get_one_regs_htw[] = {
1719 KVM_REG_MIPS_CP0_PWBASE,
1720 KVM_REG_MIPS_CP0_PWFIELD,
1721 KVM_REG_MIPS_CP0_PWSIZE,
1722 KVM_REG_MIPS_CP0_PWCTL,
1725 static u64 kvm_vz_get_one_regs_kscratch[] = {
1726 KVM_REG_MIPS_CP0_KSCRATCH1,
1727 KVM_REG_MIPS_CP0_KSCRATCH2,
1728 KVM_REG_MIPS_CP0_KSCRATCH3,
1729 KVM_REG_MIPS_CP0_KSCRATCH4,
1730 KVM_REG_MIPS_CP0_KSCRATCH5,
1731 KVM_REG_MIPS_CP0_KSCRATCH6,
1734 static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1738 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1739 if (cpu_guest_has_userlocal)
1741 if (cpu_guest_has_badinstr)
1743 if (cpu_guest_has_badinstrp)
1745 if (cpu_guest_has_contextconfig)
1746 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1747 if (cpu_guest_has_segments)
1748 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1749 if (cpu_guest_has_htw || cpu_guest_has_ldpte)
1750 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1751 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1752 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
1753 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1758 static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1763 if (copy_to_user(indices, kvm_vz_get_one_regs,
1764 sizeof(kvm_vz_get_one_regs)))
1766 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1768 if (cpu_guest_has_userlocal) {
1769 index = KVM_REG_MIPS_CP0_USERLOCAL;
1770 if (copy_to_user(indices, &index, sizeof(index)))
1774 if (cpu_guest_has_badinstr) {
1775 index = KVM_REG_MIPS_CP0_BADINSTR;
1776 if (copy_to_user(indices, &index, sizeof(index)))
1780 if (cpu_guest_has_badinstrp) {
1781 index = KVM_REG_MIPS_CP0_BADINSTRP;
1782 if (copy_to_user(indices, &index, sizeof(index)))
1786 if (cpu_guest_has_contextconfig) {
1787 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1788 sizeof(kvm_vz_get_one_regs_contextconfig)))
1790 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1792 if (cpu_guest_has_segments) {
1793 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1794 sizeof(kvm_vz_get_one_regs_segments)))
1796 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1798 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
1799 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1800 sizeof(kvm_vz_get_one_regs_htw)))
1802 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1804 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1805 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1806 index = KVM_REG_MIPS_CP0_MAAR(i);
1807 if (copy_to_user(indices, &index, sizeof(index)))
1812 index = KVM_REG_MIPS_CP0_MAARI;
1813 if (copy_to_user(indices, &index, sizeof(index)))
1817 for (i = 0; i < 6; ++i) {
1818 if (!cpu_guest_has_kscr(i + 2))
1821 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1822 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1830 static inline s64 entrylo_kvm_to_user(unsigned long v)
1834 if (BITS_PER_LONG == 32) {
1836 * KVM API exposes 64-bit version of the register, so move the
1837 * RI/XI bits up into place.
1839 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1841 ret |= ((s64)v & mask) << 32;
1846 static inline unsigned long entrylo_user_to_kvm(s64 v)
1848 unsigned long mask, ret = v;
1850 if (BITS_PER_LONG == 32) {
1852 * KVM API exposes 64-bit versiono of the register, so move the
1853 * RI/XI bits down into place.
1855 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1857 ret |= (v >> 32) & mask;
1862 static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1863 const struct kvm_one_reg *reg,
1866 struct mips_coproc *cop0 = vcpu->arch.cop0;
1870 case KVM_REG_MIPS_CP0_INDEX:
1871 *v = (long)read_gc0_index();
1873 case KVM_REG_MIPS_CP0_ENTRYLO0:
1874 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1876 case KVM_REG_MIPS_CP0_ENTRYLO1:
1877 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1879 case KVM_REG_MIPS_CP0_CONTEXT:
1880 *v = (long)read_gc0_context();
1882 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1883 if (!cpu_guest_has_contextconfig)
1885 *v = read_gc0_contextconfig();
1887 case KVM_REG_MIPS_CP0_USERLOCAL:
1888 if (!cpu_guest_has_userlocal)
1890 *v = read_gc0_userlocal();
1893 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1894 if (!cpu_guest_has_contextconfig)
1896 *v = read_gc0_xcontextconfig();
1899 case KVM_REG_MIPS_CP0_PAGEMASK:
1900 *v = (long)read_gc0_pagemask();
1902 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1903 *v = (long)read_gc0_pagegrain();
1905 case KVM_REG_MIPS_CP0_SEGCTL0:
1906 if (!cpu_guest_has_segments)
1908 *v = read_gc0_segctl0();
1910 case KVM_REG_MIPS_CP0_SEGCTL1:
1911 if (!cpu_guest_has_segments)
1913 *v = read_gc0_segctl1();
1915 case KVM_REG_MIPS_CP0_SEGCTL2:
1916 if (!cpu_guest_has_segments)
1918 *v = read_gc0_segctl2();
1920 case KVM_REG_MIPS_CP0_PWBASE:
1921 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1923 *v = read_gc0_pwbase();
1925 case KVM_REG_MIPS_CP0_PWFIELD:
1926 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1928 *v = read_gc0_pwfield();
1930 case KVM_REG_MIPS_CP0_PWSIZE:
1931 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1933 *v = read_gc0_pwsize();
1935 case KVM_REG_MIPS_CP0_WIRED:
1936 *v = (long)read_gc0_wired();
1938 case KVM_REG_MIPS_CP0_PWCTL:
1939 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
1941 *v = read_gc0_pwctl();
1943 case KVM_REG_MIPS_CP0_HWRENA:
1944 *v = (long)read_gc0_hwrena();
1946 case KVM_REG_MIPS_CP0_BADVADDR:
1947 *v = (long)read_gc0_badvaddr();
1949 case KVM_REG_MIPS_CP0_BADINSTR:
1950 if (!cpu_guest_has_badinstr)
1952 *v = read_gc0_badinstr();
1954 case KVM_REG_MIPS_CP0_BADINSTRP:
1955 if (!cpu_guest_has_badinstrp)
1957 *v = read_gc0_badinstrp();
1959 case KVM_REG_MIPS_CP0_COUNT:
1960 *v = kvm_mips_read_count(vcpu);
1962 case KVM_REG_MIPS_CP0_ENTRYHI:
1963 *v = (long)read_gc0_entryhi();
1965 case KVM_REG_MIPS_CP0_COMPARE:
1966 *v = (long)read_gc0_compare();
1968 case KVM_REG_MIPS_CP0_STATUS:
1969 *v = (long)read_gc0_status();
1971 case KVM_REG_MIPS_CP0_INTCTL:
1972 *v = read_gc0_intctl();
1974 case KVM_REG_MIPS_CP0_CAUSE:
1975 *v = (long)read_gc0_cause();
1977 case KVM_REG_MIPS_CP0_EPC:
1978 *v = (long)read_gc0_epc();
1980 case KVM_REG_MIPS_CP0_PRID:
1981 switch (boot_cpu_type()) {
1982 case CPU_CAVIUM_OCTEON3:
1983 /* Octeon III has a read-only guest.PRid */
1984 *v = read_gc0_prid();
1987 *v = (long)kvm_read_c0_guest_prid(cop0);
1991 case KVM_REG_MIPS_CP0_EBASE:
1992 *v = kvm_vz_read_gc0_ebase();
1994 case KVM_REG_MIPS_CP0_CONFIG:
1995 *v = read_gc0_config();
1997 case KVM_REG_MIPS_CP0_CONFIG1:
1998 if (!cpu_guest_has_conf1)
2000 *v = read_gc0_config1();
2002 case KVM_REG_MIPS_CP0_CONFIG2:
2003 if (!cpu_guest_has_conf2)
2005 *v = read_gc0_config2();
2007 case KVM_REG_MIPS_CP0_CONFIG3:
2008 if (!cpu_guest_has_conf3)
2010 *v = read_gc0_config3();
2012 case KVM_REG_MIPS_CP0_CONFIG4:
2013 if (!cpu_guest_has_conf4)
2015 *v = read_gc0_config4();
2017 case KVM_REG_MIPS_CP0_CONFIG5:
2018 if (!cpu_guest_has_conf5)
2020 *v = read_gc0_config5();
2022 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2023 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2025 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2026 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2028 *v = vcpu->arch.maar[idx];
2030 case KVM_REG_MIPS_CP0_MAARI:
2031 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2033 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
2036 case KVM_REG_MIPS_CP0_XCONTEXT:
2037 *v = read_gc0_xcontext();
2040 case KVM_REG_MIPS_CP0_ERROREPC:
2041 *v = (long)read_gc0_errorepc();
2043 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2044 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2045 if (!cpu_guest_has_kscr(idx))
2049 *v = (long)read_gc0_kscratch1();
2052 *v = (long)read_gc0_kscratch2();
2055 *v = (long)read_gc0_kscratch3();
2058 *v = (long)read_gc0_kscratch4();
2061 *v = (long)read_gc0_kscratch5();
2064 *v = (long)read_gc0_kscratch6();
2068 case KVM_REG_MIPS_COUNT_CTL:
2069 *v = vcpu->arch.count_ctl;
2071 case KVM_REG_MIPS_COUNT_RESUME:
2072 *v = ktime_to_ns(vcpu->arch.count_resume);
2074 case KVM_REG_MIPS_COUNT_HZ:
2075 *v = vcpu->arch.count_hz;
2083 static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
2084 const struct kvm_one_reg *reg,
2087 struct mips_coproc *cop0 = vcpu->arch.cop0;
2090 unsigned int cur, change;
2093 case KVM_REG_MIPS_CP0_INDEX:
2096 case KVM_REG_MIPS_CP0_ENTRYLO0:
2097 write_gc0_entrylo0(entrylo_user_to_kvm(v));
2099 case KVM_REG_MIPS_CP0_ENTRYLO1:
2100 write_gc0_entrylo1(entrylo_user_to_kvm(v));
2102 case KVM_REG_MIPS_CP0_CONTEXT:
2103 write_gc0_context(v);
2105 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
2106 if (!cpu_guest_has_contextconfig)
2108 write_gc0_contextconfig(v);
2110 case KVM_REG_MIPS_CP0_USERLOCAL:
2111 if (!cpu_guest_has_userlocal)
2113 write_gc0_userlocal(v);
2116 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
2117 if (!cpu_guest_has_contextconfig)
2119 write_gc0_xcontextconfig(v);
2122 case KVM_REG_MIPS_CP0_PAGEMASK:
2123 write_gc0_pagemask(v);
2125 case KVM_REG_MIPS_CP0_PAGEGRAIN:
2126 write_gc0_pagegrain(v);
2128 case KVM_REG_MIPS_CP0_SEGCTL0:
2129 if (!cpu_guest_has_segments)
2131 write_gc0_segctl0(v);
2133 case KVM_REG_MIPS_CP0_SEGCTL1:
2134 if (!cpu_guest_has_segments)
2136 write_gc0_segctl1(v);
2138 case KVM_REG_MIPS_CP0_SEGCTL2:
2139 if (!cpu_guest_has_segments)
2141 write_gc0_segctl2(v);
2143 case KVM_REG_MIPS_CP0_PWBASE:
2144 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2146 write_gc0_pwbase(v);
2148 case KVM_REG_MIPS_CP0_PWFIELD:
2149 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2151 write_gc0_pwfield(v);
2153 case KVM_REG_MIPS_CP0_PWSIZE:
2154 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2156 write_gc0_pwsize(v);
2158 case KVM_REG_MIPS_CP0_WIRED:
2159 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
2161 case KVM_REG_MIPS_CP0_PWCTL:
2162 if (!cpu_guest_has_htw && !cpu_guest_has_ldpte)
2166 case KVM_REG_MIPS_CP0_HWRENA:
2167 write_gc0_hwrena(v);
2169 case KVM_REG_MIPS_CP0_BADVADDR:
2170 write_gc0_badvaddr(v);
2172 case KVM_REG_MIPS_CP0_BADINSTR:
2173 if (!cpu_guest_has_badinstr)
2175 write_gc0_badinstr(v);
2177 case KVM_REG_MIPS_CP0_BADINSTRP:
2178 if (!cpu_guest_has_badinstrp)
2180 write_gc0_badinstrp(v);
2182 case KVM_REG_MIPS_CP0_COUNT:
2183 kvm_mips_write_count(vcpu, v);
2185 case KVM_REG_MIPS_CP0_ENTRYHI:
2186 write_gc0_entryhi(v);
2188 case KVM_REG_MIPS_CP0_COMPARE:
2189 kvm_mips_write_compare(vcpu, v, false);
2191 case KVM_REG_MIPS_CP0_STATUS:
2192 write_gc0_status(v);
2194 case KVM_REG_MIPS_CP0_INTCTL:
2195 write_gc0_intctl(v);
2197 case KVM_REG_MIPS_CP0_CAUSE:
2199 * If the timer is stopped or started (DC bit) it must look
2200 * atomic with changes to the timer interrupt pending bit (TI).
2201 * A timer interrupt should not happen in between.
2203 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
2204 if (v & CAUSEF_DC) {
2205 /* disable timer first */
2206 kvm_mips_count_disable_cause(vcpu);
2207 change_gc0_cause((u32)~CAUSEF_DC, v);
2209 /* enable timer last */
2210 change_gc0_cause((u32)~CAUSEF_DC, v);
2211 kvm_mips_count_enable_cause(vcpu);
2217 case KVM_REG_MIPS_CP0_EPC:
2220 case KVM_REG_MIPS_CP0_PRID:
2221 switch (boot_cpu_type()) {
2222 case CPU_CAVIUM_OCTEON3:
2223 /* Octeon III has a guest.PRid, but its read-only */
2226 kvm_write_c0_guest_prid(cop0, v);
2230 case KVM_REG_MIPS_CP0_EBASE:
2231 kvm_vz_write_gc0_ebase(v);
2233 case KVM_REG_MIPS_CP0_CONFIG:
2234 cur = read_gc0_config();
2235 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
2238 write_gc0_config(v);
2241 case KVM_REG_MIPS_CP0_CONFIG1:
2242 if (!cpu_guest_has_conf1)
2244 cur = read_gc0_config1();
2245 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
2248 write_gc0_config1(v);
2251 case KVM_REG_MIPS_CP0_CONFIG2:
2252 if (!cpu_guest_has_conf2)
2254 cur = read_gc0_config2();
2255 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
2258 write_gc0_config2(v);
2261 case KVM_REG_MIPS_CP0_CONFIG3:
2262 if (!cpu_guest_has_conf3)
2264 cur = read_gc0_config3();
2265 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2268 write_gc0_config3(v);
2271 case KVM_REG_MIPS_CP0_CONFIG4:
2272 if (!cpu_guest_has_conf4)
2274 cur = read_gc0_config4();
2275 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2278 write_gc0_config4(v);
2281 case KVM_REG_MIPS_CP0_CONFIG5:
2282 if (!cpu_guest_has_conf5)
2284 cur = read_gc0_config5();
2285 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2288 write_gc0_config5(v);
2291 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2292 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2294 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2295 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2297 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2299 case KVM_REG_MIPS_CP0_MAARI:
2300 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2302 kvm_write_maari(vcpu, v);
2305 case KVM_REG_MIPS_CP0_XCONTEXT:
2306 write_gc0_xcontext(v);
2309 case KVM_REG_MIPS_CP0_ERROREPC:
2310 write_gc0_errorepc(v);
2312 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2313 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2314 if (!cpu_guest_has_kscr(idx))
2318 write_gc0_kscratch1(v);
2321 write_gc0_kscratch2(v);
2324 write_gc0_kscratch3(v);
2327 write_gc0_kscratch4(v);
2330 write_gc0_kscratch5(v);
2333 write_gc0_kscratch6(v);
2337 case KVM_REG_MIPS_COUNT_CTL:
2338 ret = kvm_mips_set_count_ctl(vcpu, v);
2340 case KVM_REG_MIPS_COUNT_RESUME:
2341 ret = kvm_mips_set_count_resume(vcpu, v);
2343 case KVM_REG_MIPS_COUNT_HZ:
2344 ret = kvm_mips_set_count_hz(vcpu, v);
2352 #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2353 static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2355 unsigned long guestid = guestid_cache(cpu);
2357 if (!(++guestid & GUESTID_MASK)) {
2358 if (cpu_has_vtag_icache)
2361 if (!guestid) /* fix version if needed */
2362 guestid = GUESTID_FIRST_VERSION;
2364 ++guestid; /* guestid 0 reserved for root */
2366 /* start new guestid cycle */
2367 kvm_vz_local_flush_roottlb_all_guests();
2368 kvm_vz_local_flush_guesttlb_all();
2371 guestid_cache(cpu) = guestid;
2374 /* Returns 1 if the guest TLB may be clobbered */
2375 static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2380 if (!kvm_request_pending(vcpu))
2383 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2384 if (cpu_has_guestid) {
2385 /* Drop all GuestIDs for this VCPU */
2386 for_each_possible_cpu(i)
2387 vcpu->arch.vzguestid[i] = 0;
2388 /* This will clobber guest TLB contents too */
2392 * For Root ASID Dealias (RAD) we don't do anything here, but we
2393 * still need the request to ensure we recheck asid_flush_mask.
2394 * We can still return 0 as only the root TLB will be affected
2395 * by a root ASID flush.
2402 static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2404 unsigned int wired = read_gc0_wired();
2405 struct kvm_mips_tlb *tlbs;
2408 /* Expand the wired TLB array if necessary */
2409 wired &= MIPSR6_WIRED_WIRED;
2410 if (wired > vcpu->arch.wired_tlb_limit) {
2411 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2412 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2413 if (WARN_ON(!tlbs)) {
2414 /* Save whatever we can */
2415 wired = vcpu->arch.wired_tlb_limit;
2417 vcpu->arch.wired_tlb = tlbs;
2418 vcpu->arch.wired_tlb_limit = wired;
2423 /* Save wired entries from the guest TLB */
2424 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2425 /* Invalidate any dropped entries since last time */
2426 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2427 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2428 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2429 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2430 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2432 vcpu->arch.wired_tlb_used = wired;
2435 static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2437 /* Load wired entries into the guest TLB */
2438 if (vcpu->arch.wired_tlb)
2439 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2440 vcpu->arch.wired_tlb_used);
2443 static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2445 struct kvm *kvm = vcpu->kvm;
2446 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2450 * Are we entering guest context on a different CPU to last time?
2451 * If so, the VCPU's guest TLB state on this CPU may be stale.
2453 migrated = (vcpu->arch.last_exec_cpu != cpu);
2454 vcpu->arch.last_exec_cpu = cpu;
2457 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2458 * remains set until another vcpu is loaded in. As a rule GuestRID
2459 * remains zeroed when in root context unless the kernel is busy
2460 * manipulating guest tlb entries.
2462 if (cpu_has_guestid) {
2464 * Check if our GuestID is of an older version and thus invalid.
2466 * We also discard the stored GuestID if we've executed on
2467 * another CPU, as the guest mappings may have changed without
2468 * hypervisor knowledge.
2471 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2472 GUESTID_VERSION_MASK) {
2473 kvm_vz_get_new_guestid(cpu, vcpu);
2474 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2475 trace_kvm_guestid_change(vcpu,
2476 vcpu->arch.vzguestid[cpu]);
2479 /* Restore GuestID */
2480 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2483 * The Guest TLB only stores a single guest's TLB state, so
2484 * flush it if another VCPU has executed on this CPU.
2486 * We also flush if we've executed on another CPU, as the guest
2487 * mappings may have changed without hypervisor knowledge.
2489 if (migrated || last_exec_vcpu[cpu] != vcpu)
2490 kvm_vz_local_flush_guesttlb_all();
2491 last_exec_vcpu[cpu] = vcpu;
2494 * Root ASID dealiases guest GPA mappings in the root TLB.
2495 * Allocate new root ASID if needed.
2497 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask))
2498 get_new_mmu_context(gpa_mm);
2500 check_mmu_context(gpa_mm);
2504 static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2506 struct mips_coproc *cop0 = vcpu->arch.cop0;
2510 * Have we migrated to a different CPU?
2511 * If so, any old guest TLB state may be stale.
2513 migrated = (vcpu->arch.last_sched_cpu != cpu);
2516 * Was this the last VCPU to run on this CPU?
2517 * If not, any old guest state from this VCPU will have been clobbered.
2519 all = migrated || (last_vcpu[cpu] != vcpu);
2520 last_vcpu[cpu] = vcpu;
2523 * Restore CP0_Wired unconditionally as we clear it after use, and
2524 * restore wired guest TLB entries (while in guest context).
2526 kvm_restore_gc0_wired(cop0);
2527 if (current->flags & PF_VCPU) {
2529 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2530 kvm_vz_vcpu_load_wired(vcpu);
2534 * Restore timer state regardless, as e.g. Cause.TI can change over time
2535 * if left unmaintained.
2537 kvm_vz_restore_timer(vcpu);
2539 /* Set MC bit if we want to trace guest mode changes */
2540 if (kvm_trace_guest_mode_change)
2541 set_c0_guestctl0(MIPS_GCTL0_MC);
2543 clear_c0_guestctl0(MIPS_GCTL0_MC);
2545 /* Don't bother restoring registers multiple times unless necessary */
2550 * Restore config registers first, as some implementations restrict
2551 * writes to other registers when the corresponding feature bits aren't
2552 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2554 kvm_restore_gc0_config(cop0);
2555 if (cpu_guest_has_conf1)
2556 kvm_restore_gc0_config1(cop0);
2557 if (cpu_guest_has_conf2)
2558 kvm_restore_gc0_config2(cop0);
2559 if (cpu_guest_has_conf3)
2560 kvm_restore_gc0_config3(cop0);
2561 if (cpu_guest_has_conf4)
2562 kvm_restore_gc0_config4(cop0);
2563 if (cpu_guest_has_conf5)
2564 kvm_restore_gc0_config5(cop0);
2565 if (cpu_guest_has_conf6)
2566 kvm_restore_gc0_config6(cop0);
2567 if (cpu_guest_has_conf7)
2568 kvm_restore_gc0_config7(cop0);
2570 kvm_restore_gc0_index(cop0);
2571 kvm_restore_gc0_entrylo0(cop0);
2572 kvm_restore_gc0_entrylo1(cop0);
2573 kvm_restore_gc0_context(cop0);
2574 if (cpu_guest_has_contextconfig)
2575 kvm_restore_gc0_contextconfig(cop0);
2577 kvm_restore_gc0_xcontext(cop0);
2578 if (cpu_guest_has_contextconfig)
2579 kvm_restore_gc0_xcontextconfig(cop0);
2581 kvm_restore_gc0_pagemask(cop0);
2582 kvm_restore_gc0_pagegrain(cop0);
2583 kvm_restore_gc0_hwrena(cop0);
2584 kvm_restore_gc0_badvaddr(cop0);
2585 kvm_restore_gc0_entryhi(cop0);
2586 kvm_restore_gc0_status(cop0);
2587 kvm_restore_gc0_intctl(cop0);
2588 kvm_restore_gc0_epc(cop0);
2589 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2590 if (cpu_guest_has_userlocal)
2591 kvm_restore_gc0_userlocal(cop0);
2593 kvm_restore_gc0_errorepc(cop0);
2595 /* restore KScratch registers if enabled in guest */
2596 if (cpu_guest_has_conf4) {
2597 if (cpu_guest_has_kscr(2))
2598 kvm_restore_gc0_kscratch1(cop0);
2599 if (cpu_guest_has_kscr(3))
2600 kvm_restore_gc0_kscratch2(cop0);
2601 if (cpu_guest_has_kscr(4))
2602 kvm_restore_gc0_kscratch3(cop0);
2603 if (cpu_guest_has_kscr(5))
2604 kvm_restore_gc0_kscratch4(cop0);
2605 if (cpu_guest_has_kscr(6))
2606 kvm_restore_gc0_kscratch5(cop0);
2607 if (cpu_guest_has_kscr(7))
2608 kvm_restore_gc0_kscratch6(cop0);
2611 if (cpu_guest_has_badinstr)
2612 kvm_restore_gc0_badinstr(cop0);
2613 if (cpu_guest_has_badinstrp)
2614 kvm_restore_gc0_badinstrp(cop0);
2616 if (cpu_guest_has_segments) {
2617 kvm_restore_gc0_segctl0(cop0);
2618 kvm_restore_gc0_segctl1(cop0);
2619 kvm_restore_gc0_segctl2(cop0);
2622 /* restore HTW registers */
2623 if (cpu_guest_has_htw || cpu_guest_has_ldpte) {
2624 kvm_restore_gc0_pwbase(cop0);
2625 kvm_restore_gc0_pwfield(cop0);
2626 kvm_restore_gc0_pwsize(cop0);
2627 kvm_restore_gc0_pwctl(cop0);
2630 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2631 if (cpu_has_guestctl2)
2633 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2636 * We should clear linked load bit to break interrupted atomics. This
2637 * prevents a SC on the next VCPU from succeeding by matching a LL on
2638 * the previous VCPU.
2640 if (cpu_guest_has_rw_llb)
2641 write_gc0_lladdr(0);
2646 static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2648 struct mips_coproc *cop0 = vcpu->arch.cop0;
2650 if (current->flags & PF_VCPU)
2651 kvm_vz_vcpu_save_wired(vcpu);
2655 kvm_save_gc0_index(cop0);
2656 kvm_save_gc0_entrylo0(cop0);
2657 kvm_save_gc0_entrylo1(cop0);
2658 kvm_save_gc0_context(cop0);
2659 if (cpu_guest_has_contextconfig)
2660 kvm_save_gc0_contextconfig(cop0);
2662 kvm_save_gc0_xcontext(cop0);
2663 if (cpu_guest_has_contextconfig)
2664 kvm_save_gc0_xcontextconfig(cop0);
2666 kvm_save_gc0_pagemask(cop0);
2667 kvm_save_gc0_pagegrain(cop0);
2668 kvm_save_gc0_wired(cop0);
2669 /* allow wired TLB entries to be overwritten */
2670 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2671 kvm_save_gc0_hwrena(cop0);
2672 kvm_save_gc0_badvaddr(cop0);
2673 kvm_save_gc0_entryhi(cop0);
2674 kvm_save_gc0_status(cop0);
2675 kvm_save_gc0_intctl(cop0);
2676 kvm_save_gc0_epc(cop0);
2677 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2678 if (cpu_guest_has_userlocal)
2679 kvm_save_gc0_userlocal(cop0);
2681 /* only save implemented config registers */
2682 kvm_save_gc0_config(cop0);
2683 if (cpu_guest_has_conf1)
2684 kvm_save_gc0_config1(cop0);
2685 if (cpu_guest_has_conf2)
2686 kvm_save_gc0_config2(cop0);
2687 if (cpu_guest_has_conf3)
2688 kvm_save_gc0_config3(cop0);
2689 if (cpu_guest_has_conf4)
2690 kvm_save_gc0_config4(cop0);
2691 if (cpu_guest_has_conf5)
2692 kvm_save_gc0_config5(cop0);
2693 if (cpu_guest_has_conf6)
2694 kvm_save_gc0_config6(cop0);
2695 if (cpu_guest_has_conf7)
2696 kvm_save_gc0_config7(cop0);
2698 kvm_save_gc0_errorepc(cop0);
2700 /* save KScratch registers if enabled in guest */
2701 if (cpu_guest_has_conf4) {
2702 if (cpu_guest_has_kscr(2))
2703 kvm_save_gc0_kscratch1(cop0);
2704 if (cpu_guest_has_kscr(3))
2705 kvm_save_gc0_kscratch2(cop0);
2706 if (cpu_guest_has_kscr(4))
2707 kvm_save_gc0_kscratch3(cop0);
2708 if (cpu_guest_has_kscr(5))
2709 kvm_save_gc0_kscratch4(cop0);
2710 if (cpu_guest_has_kscr(6))
2711 kvm_save_gc0_kscratch5(cop0);
2712 if (cpu_guest_has_kscr(7))
2713 kvm_save_gc0_kscratch6(cop0);
2716 if (cpu_guest_has_badinstr)
2717 kvm_save_gc0_badinstr(cop0);
2718 if (cpu_guest_has_badinstrp)
2719 kvm_save_gc0_badinstrp(cop0);
2721 if (cpu_guest_has_segments) {
2722 kvm_save_gc0_segctl0(cop0);
2723 kvm_save_gc0_segctl1(cop0);
2724 kvm_save_gc0_segctl2(cop0);
2727 /* save HTW registers if enabled in guest */
2728 if (cpu_guest_has_ldpte || (cpu_guest_has_htw &&
2729 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) {
2730 kvm_save_gc0_pwbase(cop0);
2731 kvm_save_gc0_pwfield(cop0);
2732 kvm_save_gc0_pwsize(cop0);
2733 kvm_save_gc0_pwctl(cop0);
2736 kvm_vz_save_timer(vcpu);
2738 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2739 if (cpu_has_guestctl2)
2740 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2741 read_c0_guestctl2();
2747 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2748 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2750 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2751 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2752 * entries in the root VTLB.
2754 * Returns: The resulting guest VTLB size.
2756 static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2758 unsigned int config4 = 0, ret = 0, limit;
2760 /* Write MMUSize - 1 into guest Config registers */
2761 if (cpu_guest_has_conf1)
2762 change_gc0_config1(MIPS_CONF1_TLBS,
2763 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2764 if (cpu_guest_has_conf4) {
2765 config4 = read_gc0_config4();
2766 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2767 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2768 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2769 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2770 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2771 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2772 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2773 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2774 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2775 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2777 write_gc0_config4(config4);
2781 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2782 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2785 if (cpu_has_mips_r6) {
2786 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2787 MIPSR6_WIRED_LIMIT_SHIFT;
2788 if (size - 1 <= limit)
2790 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2793 /* Read back MMUSize - 1 */
2794 back_to_back_c0_hazard();
2795 if (cpu_guest_has_conf1)
2796 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2797 MIPS_CONF1_TLBS_SHIFT;
2799 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2800 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2801 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2802 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2803 MIPS_CONF1_TLBS_SIZE;
2804 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2805 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2806 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2807 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2808 MIPS_CONF1_TLBS_SIZE;
2813 static int kvm_vz_hardware_enable(void)
2815 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2816 u64 guest_cvmctl, cvmvmconfig;
2818 switch (current_cpu_type()) {
2819 case CPU_CAVIUM_OCTEON3:
2820 /* Set up guest timer/perfcount IRQ lines */
2821 guest_cvmctl = read_gc0_cvmctl();
2822 guest_cvmctl &= ~CVMCTL_IPTI;
2823 guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT;
2824 guest_cvmctl &= ~CVMCTL_IPPCI;
2825 guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT;
2826 write_gc0_cvmctl(guest_cvmctl);
2828 cvmvmconfig = read_c0_cvmvmconfig();
2829 /* No I/O hole translation. */
2830 cvmvmconfig |= CVMVMCONF_DGHT;
2831 /* Halve the root MMU size */
2832 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2833 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2834 guest_mmu_size = mmu_size / 2;
2835 mmu_size -= guest_mmu_size;
2836 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2837 cvmvmconfig |= mmu_size - 1;
2838 write_c0_cvmvmconfig(cvmvmconfig);
2840 /* Update our records */
2841 current_cpu_data.tlbsize = mmu_size;
2842 current_cpu_data.tlbsizevtlb = mmu_size;
2843 current_cpu_data.guest.tlbsize = guest_mmu_size;
2845 /* Flush moved entries in new (guest) context */
2846 kvm_vz_local_flush_guesttlb_all();
2850 * ImgTec cores tend to use a shared root/guest TLB. To avoid
2851 * overlap of root wired and guest entries, the guest TLB may
2854 mmu_size = current_cpu_data.tlbsizevtlb;
2855 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2857 /* Try switching to maximum guest VTLB size for flush */
2858 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2859 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2860 kvm_vz_local_flush_guesttlb_all();
2863 * Reduce to make space for root wired entries and at least 2
2864 * root non-wired entries. This does assume that long-term wired
2865 * entries won't be added later.
2867 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2868 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2869 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2872 * Write the VTLB size, but if another CPU has already written,
2873 * check it matches or we won't provide a consistent view to the
2874 * guest. If this ever happens it suggests an asymmetric number
2877 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2878 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2879 "Available guest VTLB size mismatch"))
2885 * Enable virtualization features granting guest direct control of
2887 * CP0=1: Guest coprocessor 0 context.
2888 * AT=Guest: Guest MMU.
2889 * CG=1: Hit (virtual address) CACHE operations (optional).
2890 * CF=1: Guest Config registers.
2891 * CGI=1: Indexed flush CACHE operations (optional).
2893 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2894 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2895 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2896 if (cpu_has_guestctl0ext) {
2897 if (current_cpu_type() != CPU_LOONGSON64)
2898 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2900 clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2903 if (cpu_has_guestid) {
2904 write_c0_guestctl1(0);
2905 kvm_vz_local_flush_roottlb_all_guests();
2907 GUESTID_MASK = current_cpu_data.guestid_mask;
2908 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2909 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2911 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2914 /* clear any pending injected virtual guest interrupts */
2915 if (cpu_has_guestctl2)
2916 clear_c0_guestctl2(0x3f << 10);
2918 #ifdef CONFIG_CPU_LOONGSON64
2919 /* Control guest CCA attribute */
2921 csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec);
2927 static void kvm_vz_hardware_disable(void)
2930 unsigned int mmu_size;
2932 /* Flush any remaining guest TLB entries */
2933 kvm_vz_local_flush_guesttlb_all();
2935 switch (current_cpu_type()) {
2936 case CPU_CAVIUM_OCTEON3:
2938 * Allocate whole TLB for root. Existing guest TLB entries will
2939 * change ownership to the root TLB. We should be safe though as
2940 * they've already been flushed above while in guest TLB.
2942 cvmvmconfig = read_c0_cvmvmconfig();
2943 mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1)
2944 >> CVMVMCONF_MMUSIZEM1_S) + 1;
2945 cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1;
2946 cvmvmconfig |= mmu_size - 1;
2947 write_c0_cvmvmconfig(cvmvmconfig);
2949 /* Update our records */
2950 current_cpu_data.tlbsize = mmu_size;
2951 current_cpu_data.tlbsizevtlb = mmu_size;
2952 current_cpu_data.guest.tlbsize = 0;
2954 /* Flush moved entries in new (root) context */
2955 local_flush_tlb_all();
2959 if (cpu_has_guestid) {
2960 write_c0_guestctl1(0);
2961 kvm_vz_local_flush_roottlb_all_guests();
2965 static int kvm_vz_check_extension(struct kvm *kvm, long ext)
2970 case KVM_CAP_MIPS_VZ:
2971 /* we wouldn't be here unless cpu_has_vz */
2975 case KVM_CAP_MIPS_64BIT:
2976 /* We support 64-bit registers/operations and addresses */
2980 case KVM_CAP_IOEVENTFD:
2991 static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
2995 for_each_possible_cpu(i)
2996 vcpu->arch.vzguestid[i] = 0;
3001 static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
3006 * If the VCPU is freed and reused as another VCPU, we don't want the
3007 * matching pointer wrongly hanging around in last_vcpu[] or
3010 for_each_possible_cpu(cpu) {
3011 if (last_vcpu[cpu] == vcpu)
3012 last_vcpu[cpu] = NULL;
3013 if (last_exec_vcpu[cpu] == vcpu)
3014 last_exec_vcpu[cpu] = NULL;
3018 static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
3020 struct mips_coproc *cop0 = vcpu->arch.cop0;
3021 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
3024 * Start off the timer at the same frequency as the host timer, but the
3025 * soft timer doesn't handle frequencies greater than 1GHz yet.
3027 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
3028 count_hz = mips_hpt_frequency;
3029 kvm_mips_init_count(vcpu, count_hz);
3032 * Initialize guest register state to valid architectural reset state.
3036 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3037 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
3039 if (cpu_has_mips_r6)
3040 kvm_write_sw_gc0_wired(cop0,
3041 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
3043 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
3044 if (cpu_has_mips_r5 || cpu_has_mips_r6)
3045 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
3047 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
3048 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
3050 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
3052 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
3054 kvm_save_gc0_config(cop0);
3055 /* architecturally writable (e.g. from guest) */
3056 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
3057 _page_cachable_default >> _CACHE_SHIFT);
3058 /* architecturally read only, but maybe writable from root */
3059 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
3060 if (cpu_guest_has_conf1) {
3061 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
3063 kvm_save_gc0_config1(cop0);
3064 /* architecturally read only, but maybe writable from root */
3065 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
3072 if (cpu_guest_has_conf2) {
3073 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
3075 kvm_save_gc0_config2(cop0);
3077 if (cpu_guest_has_conf3) {
3078 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
3080 kvm_save_gc0_config3(cop0);
3081 /* architecturally writable (e.g. from guest) */
3082 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
3083 /* architecturally read only, but maybe writable from root */
3084 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
3099 if (cpu_guest_has_conf4) {
3100 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
3102 kvm_save_gc0_config4(cop0);
3104 if (cpu_guest_has_conf5) {
3105 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
3107 kvm_save_gc0_config5(cop0);
3108 /* architecturally writable (e.g. from guest) */
3109 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
3116 /* architecturally read only, but maybe writable from root */
3117 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
3120 if (cpu_guest_has_contextconfig) {
3122 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
3124 /* XContextConfig */
3125 /* bits SEGBITS-13+3:4 set */
3126 kvm_write_sw_gc0_xcontextconfig(cop0,
3127 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
3131 /* Implementation dependent, use the legacy layout */
3132 if (cpu_guest_has_segments) {
3133 /* SegCtl0, SegCtl1, SegCtl2 */
3134 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
3135 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
3136 (_page_cachable_default >> _CACHE_SHIFT) <<
3137 (16 + MIPS_SEGCFG_C_SHIFT));
3138 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
3141 /* reset HTW registers */
3142 if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) {
3144 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
3146 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
3149 /* start with no pending virtual guest interrupts */
3150 if (cpu_has_guestctl2)
3151 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
3153 /* Put PC at reset vector */
3154 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
3159 static void kvm_vz_flush_shadow_all(struct kvm *kvm)
3161 if (cpu_has_guestid) {
3162 /* Flush GuestID for each VCPU individually */
3163 kvm_flush_remote_tlbs(kvm);
3166 * For each CPU there is a single GPA ASID used by all VCPUs in
3167 * the VM, so it doesn't make sense for the VCPUs to handle
3168 * invalidation of these ASIDs individually.
3170 * Instead mark all CPUs as needing ASID invalidation in
3171 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
3172 * kick any running VCPUs so they check asid_flush_mask.
3174 cpumask_setall(&kvm->arch.asid_flush_mask);
3175 kvm_flush_remote_tlbs(kvm);
3179 static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
3180 const struct kvm_memory_slot *slot)
3182 kvm_vz_flush_shadow_all(kvm);
3185 static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
3187 int cpu = smp_processor_id();
3188 int preserve_guest_tlb;
3190 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
3192 if (preserve_guest_tlb)
3193 kvm_vz_vcpu_save_wired(vcpu);
3195 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3197 if (preserve_guest_tlb)
3198 kvm_vz_vcpu_load_wired(vcpu);
3201 static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
3203 int cpu = smp_processor_id();
3206 kvm_vz_acquire_htimer(vcpu);
3207 /* Check if we have any exceptions/interrupts pending */
3208 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
3210 kvm_vz_check_requests(vcpu, cpu);
3211 kvm_vz_vcpu_load_tlb(vcpu, cpu);
3212 kvm_vz_vcpu_load_wired(vcpu);
3214 r = vcpu->arch.vcpu_run(run, vcpu);
3216 kvm_vz_vcpu_save_wired(vcpu);
3221 static struct kvm_mips_callbacks kvm_vz_callbacks = {
3222 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
3223 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
3224 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
3225 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
3226 .handle_addr_err_st = kvm_trap_vz_no_handler,
3227 .handle_addr_err_ld = kvm_trap_vz_no_handler,
3228 .handle_syscall = kvm_trap_vz_no_handler,
3229 .handle_res_inst = kvm_trap_vz_no_handler,
3230 .handle_break = kvm_trap_vz_no_handler,
3231 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
3232 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
3234 .hardware_enable = kvm_vz_hardware_enable,
3235 .hardware_disable = kvm_vz_hardware_disable,
3236 .check_extension = kvm_vz_check_extension,
3237 .vcpu_init = kvm_vz_vcpu_init,
3238 .vcpu_uninit = kvm_vz_vcpu_uninit,
3239 .vcpu_setup = kvm_vz_vcpu_setup,
3240 .flush_shadow_all = kvm_vz_flush_shadow_all,
3241 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
3242 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
3243 .queue_timer_int = kvm_vz_queue_timer_int_cb,
3244 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
3245 .queue_io_int = kvm_vz_queue_io_int_cb,
3246 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
3247 .irq_deliver = kvm_vz_irq_deliver_cb,
3248 .irq_clear = kvm_vz_irq_clear_cb,
3249 .num_regs = kvm_vz_num_regs,
3250 .copy_reg_indices = kvm_vz_copy_reg_indices,
3251 .get_one_reg = kvm_vz_get_one_reg,
3252 .set_one_reg = kvm_vz_set_one_reg,
3253 .vcpu_load = kvm_vz_vcpu_load,
3254 .vcpu_put = kvm_vz_vcpu_put,
3255 .vcpu_run = kvm_vz_vcpu_run,
3256 .vcpu_reenter = kvm_vz_vcpu_reenter,
3259 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
3265 * VZ requires at least 2 KScratch registers, so it should have been
3266 * possible to allocate pgd_reg.
3268 if (WARN(pgd_reg == -1,
3269 "pgd_reg not allocated even though cpu_has_vz\n"))
3272 pr_info("Starting KVM with MIPS VZ extensions\n");
3274 *install_callbacks = &kvm_vz_callbacks;