2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * KVM/MIPS MMU handling in the KVM module.
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <asm/mmu_context.h>
16 static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
18 int cpu = smp_processor_id();
20 return vcpu->arch.guest_kernel_asid[cpu] &
21 cpu_asid_mask(&cpu_data[cpu]);
24 static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
26 int cpu = smp_processor_id();
28 return vcpu->arch.guest_user_asid[cpu] &
29 cpu_asid_mask(&cpu_data[cpu]);
32 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
34 int srcu_idx, err = 0;
37 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
40 srcu_idx = srcu_read_lock(&kvm->srcu);
41 pfn = gfn_to_pfn(kvm, gfn);
43 if (is_error_pfn(pfn)) {
44 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
49 kvm->arch.guest_pmap[gfn] = pfn;
51 srcu_read_unlock(&kvm->srcu, srcu_idx);
55 /* Translate guest KSEG0 addresses to Host PA */
56 unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
60 unsigned long offset = gva & ~PAGE_MASK;
61 struct kvm *kvm = vcpu->kvm;
63 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
64 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
65 __builtin_return_address(0), gva);
66 return KVM_INVALID_PAGE;
69 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
71 if (gfn >= kvm->arch.guest_pmap_npages) {
72 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
74 return KVM_INVALID_PAGE;
77 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
78 return KVM_INVALID_ADDR;
80 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
83 /* XXXKYMA: Must be called with interrupts disabled */
84 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
85 struct kvm_vcpu *vcpu)
89 unsigned long vaddr = 0;
90 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
91 struct kvm *kvm = vcpu->kvm;
92 const int flush_dcache_mask = 0;
95 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
96 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
97 kvm_mips_dump_host_tlbs();
101 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
102 if (gfn >= kvm->arch.guest_pmap_npages) {
103 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
105 kvm_mips_dump_host_tlbs();
108 vaddr = badvaddr & (PAGE_MASK << 1);
110 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
113 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
116 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
117 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
119 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
120 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
121 ENTRYLO_D | ENTRYLO_V;
122 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
123 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
124 ENTRYLO_D | ENTRYLO_V;
127 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
128 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
135 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
136 struct kvm_mips_tlb *tlb)
138 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
139 struct kvm *kvm = vcpu->kvm;
140 kvm_pfn_t pfn0, pfn1;
143 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
147 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
151 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
155 pfn0 = kvm->arch.guest_pmap[
156 mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
157 pfn1 = kvm->arch.guest_pmap[
158 mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
161 /* Get attributes from the Guest TLB */
162 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
163 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
164 (tlb->tlb_lo[0] & ENTRYLO_D) |
165 (tlb->tlb_lo[0] & ENTRYLO_V);
166 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
167 ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
168 (tlb->tlb_lo[1] & ENTRYLO_D) |
169 (tlb->tlb_lo[1] & ENTRYLO_V);
171 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
172 tlb->tlb_lo[0], tlb->tlb_lo[1]);
175 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
176 kvm_mips_get_kernel_asid(vcpu) :
177 kvm_mips_get_user_asid(vcpu));
178 ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
185 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
186 struct kvm_vcpu *vcpu)
188 unsigned long asid = asid_cache(cpu);
190 asid += cpu_asid_inc();
191 if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
192 if (cpu_has_vtag_icache)
195 kvm_local_flush_tlb_all(); /* start new asid cycle */
197 if (!asid) /* fix version if needed */
198 asid = asid_first_version(cpu);
201 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
205 * kvm_mips_migrate_count() - Migrate timer.
206 * @vcpu: Virtual CPU.
208 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
209 * if it was running prior to being cancelled.
211 * Must be called when the VCPU is migrated to a different CPU to ensure that
212 * timer expiry during guest execution interrupts the guest and causes the
213 * interrupt to be delivered in a timely manner.
215 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
217 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
218 hrtimer_restart(&vcpu->arch.comparecount_timer);
221 /* Restore ASID once we are scheduled back after preemption */
222 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
224 unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
228 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
230 /* Allocate new kernel and user ASIDs if needed */
232 local_irq_save(flags);
234 if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
235 asid_version_mask(cpu)) {
236 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
237 vcpu->arch.guest_kernel_asid[cpu] =
238 vcpu->arch.guest_kernel_mm.context.asid[cpu];
239 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
240 vcpu->arch.guest_user_asid[cpu] =
241 vcpu->arch.guest_user_mm.context.asid[cpu];
244 kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
245 cpu_context(cpu, current->mm));
246 kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
247 cpu, vcpu->arch.guest_kernel_asid[cpu]);
248 kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
249 vcpu->arch.guest_user_asid[cpu]);
252 if (vcpu->arch.last_sched_cpu != cpu) {
253 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
254 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
256 * Migrate the timer interrupt to the current CPU so that it
257 * always interrupts the guest and synchronously triggers a
258 * guest timer interrupt.
260 kvm_mips_migrate_count(vcpu);
265 * If we preempted while the guest was executing, then reload
266 * the pre-empted ASID
268 if (current->flags & PF_VCPU) {
269 write_c0_entryhi(vcpu->arch.
270 preempt_entryhi & asid_mask);
274 /* New ASIDs were allocated for the VM */
277 * Were we in guest context? If so then the pre-empted ASID is
278 * no longer valid, we need to set it to what it should be based
279 * on the mode of the Guest (Kernel/User)
281 if (current->flags & PF_VCPU) {
282 if (KVM_GUEST_KERNEL_MODE(vcpu))
283 write_c0_entryhi(vcpu->arch.
284 guest_kernel_asid[cpu] &
287 write_c0_entryhi(vcpu->arch.
288 guest_user_asid[cpu] &
294 /* restore guest state to registers */
295 kvm_mips_callbacks->vcpu_set_regs(vcpu);
297 local_irq_restore(flags);
301 /* ASID can change if another task is scheduled during preemption */
302 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
307 local_irq_save(flags);
309 cpu = smp_processor_id();
311 vcpu->arch.preempt_entryhi = read_c0_entryhi();
312 vcpu->arch.last_sched_cpu = cpu;
314 /* save guest state in registers */
315 kvm_mips_callbacks->vcpu_get_regs(vcpu);
317 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
318 asid_version_mask(cpu))) {
319 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
320 cpu_context(cpu, current->mm));
321 drop_mmu_context(current->mm, cpu);
323 write_c0_entryhi(cpu_asid(cpu, current->mm));
326 local_irq_restore(flags);
329 u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
331 struct mips_coproc *cop0 = vcpu->arch.cop0;
332 unsigned long paddr, flags, vpn2, asid;
333 unsigned long va = (unsigned long)opc;
338 if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
339 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
340 local_irq_save(flags);
341 index = kvm_mips_host_tlb_lookup(vcpu, va);
345 vpn2 = va & VPN2_MASK;
346 asid = kvm_read_c0_guest_entryhi(cop0) &
348 index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
350 kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
351 __func__, opc, vcpu, read_c0_entryhi());
352 kvm_mips_dump_host_tlbs();
353 kvm_mips_dump_guest_tlbs(vcpu);
354 local_irq_restore(flags);
355 return KVM_INVALID_INST;
357 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
362 local_irq_restore(flags);
363 } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
364 paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
365 vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
366 vaddr += paddr & ~PAGE_MASK;
367 inst = *(u32 *)vaddr;
368 kunmap_atomic(vaddr);
370 kvm_err("%s: illegal address: %p\n", __func__, opc);
371 return KVM_INVALID_INST;