2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
16 #include <asm/tlbflush.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
19 #include <asm/mmu-hash64.h>
20 #include <asm/hvcall.h>
21 #include <asm/synch.h>
22 #include <asm/ppc-opcode.h>
24 /* Translate address of a vmalloc'd thing to a linear map address */
25 static void *real_vmalloc_addr(void *x)
27 unsigned long addr = (unsigned long) x;
30 p = find_linux_pte_or_hugepte(swapper_pg_dir, addr, NULL);
31 if (!p || !pte_present(*p))
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
38 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
39 static int global_invalidates(struct kvm *kvm, unsigned long flags)
44 * If there is only one vcore, and it's currently running,
45 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
46 * we can use tlbiel as long as we mark all other physical
47 * cores as potentially having stale TLB entries for this lpid.
48 * Otherwise, don't use tlbiel.
50 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
56 /* any other core might now have stale TLB entries... */
58 cpumask_setall(&kvm->arch.need_tlb_flush);
59 cpumask_clear_cpu(local_paca->kvm_hstate.kvm_vcore->pcpu,
60 &kvm->arch.need_tlb_flush);
67 * Add this HPTE into the chain for the real page.
68 * Must be called with the chain locked; it unlocks the chain.
70 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
71 unsigned long *rmap, long pte_index, int realmode)
73 struct revmap_entry *head, *tail;
76 if (*rmap & KVMPPC_RMAP_PRESENT) {
77 i = *rmap & KVMPPC_RMAP_INDEX;
78 head = &kvm->arch.revmap[i];
80 head = real_vmalloc_addr(head);
81 tail = &kvm->arch.revmap[head->back];
83 tail = real_vmalloc_addr(tail);
85 rev->back = head->back;
86 tail->forw = pte_index;
87 head->back = pte_index;
89 rev->forw = rev->back = pte_index;
90 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
91 pte_index | KVMPPC_RMAP_PRESENT;
95 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
97 /* Remove this HPTE from the chain for a real page */
98 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
99 struct revmap_entry *rev,
100 unsigned long hpte_v, unsigned long hpte_r)
102 struct revmap_entry *next, *prev;
103 unsigned long gfn, ptel, head;
104 struct kvm_memory_slot *memslot;
106 unsigned long rcbits;
108 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
109 ptel = rev->guest_rpte |= rcbits;
110 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
111 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
115 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
118 head = *rmap & KVMPPC_RMAP_INDEX;
119 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
120 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
121 next->back = rev->back;
122 prev->forw = rev->forw;
123 if (head == pte_index) {
125 if (head == pte_index)
126 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
128 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
130 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
134 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
136 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
137 hpte[0] = cpu_to_be64(hpte_v);
140 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
141 long pte_index, unsigned long pteh, unsigned long ptel,
142 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
144 unsigned long i, pa, gpa, gfn, psize;
145 unsigned long slot_fn, hva;
147 struct revmap_entry *rev;
148 unsigned long g_ptel;
149 struct kvm_memory_slot *memslot;
150 unsigned hpage_shift;
154 unsigned int writing;
155 unsigned long mmu_seq;
156 unsigned long rcbits;
158 psize = hpte_page_size(pteh, ptel);
161 writing = hpte_is_writable(ptel);
162 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
163 ptel &= ~HPTE_GR_RESERVED;
166 /* used later to detect if we might have been invalidated */
167 mmu_seq = kvm->mmu_notifier_seq;
170 /* Find the memslot (if any) for this address */
171 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
172 gfn = gpa >> PAGE_SHIFT;
173 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
177 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
178 /* Emulated MMIO - mark this with key=31 */
179 pteh |= HPTE_V_ABSENT;
180 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
184 /* Check if the requested page fits entirely in the memslot. */
185 if (!slot_is_aligned(memslot, psize))
187 slot_fn = gfn - memslot->base_gfn;
188 rmap = &memslot->arch.rmap[slot_fn];
190 /* Translate to host virtual address */
191 hva = __gfn_to_hva_memslot(memslot, gfn);
192 ptep = find_linux_pte_or_hugepte(pgdir, hva, &hpage_shift);
195 unsigned int host_pte_size;
198 host_pte_size = 1ul << hpage_shift;
200 host_pte_size = PAGE_SIZE;
202 * We should always find the guest page size
203 * to <= host page size, if host is using hugepage
205 if (host_pte_size < psize)
208 pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift);
209 if (pte_present(pte) && !pte_protnone(pte)) {
210 if (writing && !pte_write(pte))
211 /* make the actual HPTE be read-only */
212 ptel = hpte_make_readonly(ptel);
213 is_io = hpte_cache_bits(pte_val(pte));
214 pa = pte_pfn(pte) << PAGE_SHIFT;
215 pa |= hva & (host_pte_size - 1);
216 pa |= gpa & ~PAGE_MASK;
220 ptel &= ~(HPTE_R_PP0 - psize);
224 pteh |= HPTE_V_VALID;
226 pteh |= HPTE_V_ABSENT;
229 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
233 * Allow guest to map emulated device memory as
234 * uncacheable, but actually make it cacheable.
236 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
240 /* Find and lock the HPTEG slot to use */
242 if (pte_index >= kvm->arch.hpt_npte)
244 if (likely((flags & H_EXACT) == 0)) {
246 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
247 for (i = 0; i < 8; ++i) {
248 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
249 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
256 * Since try_lock_hpte doesn't retry (not even stdcx.
257 * failures), it could be that there is a free slot
258 * but we transiently failed to lock it. Try again,
259 * actually locking each slot and checking it.
262 for (i = 0; i < 8; ++i) {
264 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
266 pte = be64_to_cpu(*hpte);
267 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
269 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
277 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
278 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
280 /* Lock the slot and check again */
283 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
285 pte = be64_to_cpu(*hpte);
286 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
287 *hpte &= ~cpu_to_be64(HPTE_V_HVLOCK);
293 /* Save away the guest's idea of the second HPTE dword */
294 rev = &kvm->arch.revmap[pte_index];
296 rev = real_vmalloc_addr(rev);
298 rev->guest_rpte = g_ptel;
299 note_hpte_modification(kvm, rev);
302 /* Link HPTE into reverse-map chain */
303 if (pteh & HPTE_V_VALID) {
305 rmap = real_vmalloc_addr(rmap);
307 /* Check for pending invalidations under the rmap chain lock */
308 if (mmu_notifier_retry(kvm, mmu_seq)) {
309 /* inval in progress, write a non-present HPTE */
310 pteh |= HPTE_V_ABSENT;
311 pteh &= ~HPTE_V_VALID;
314 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
316 /* Only set R/C in real HPTE if already set in *rmap */
317 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
318 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
322 hpte[1] = cpu_to_be64(ptel);
324 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
326 hpte[0] = cpu_to_be64(pteh);
327 asm volatile("ptesync" : : : "memory");
329 *pte_idx_ret = pte_index;
332 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
334 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
335 long pte_index, unsigned long pteh, unsigned long ptel)
337 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
338 vcpu->arch.pgdir, true, &vcpu->arch.gpr[4]);
341 #ifdef __BIG_ENDIAN__
342 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
344 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
347 static inline int try_lock_tlbie(unsigned int *lock)
349 unsigned int tmp, old;
350 unsigned int token = LOCK_TOKEN;
352 asm volatile("1:lwarx %1,0,%2\n"
359 : "=&r" (tmp), "=&r" (old)
360 : "r" (lock), "r" (token)
365 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
366 long npages, int global, bool need_sync)
371 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
374 asm volatile("ptesync" : : : "memory");
375 for (i = 0; i < npages; ++i)
376 asm volatile(PPC_TLBIE(%1,%0) : :
377 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
378 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
379 kvm->arch.tlbie_lock = 0;
382 asm volatile("ptesync" : : : "memory");
383 for (i = 0; i < npages; ++i)
384 asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
385 asm volatile("ptesync" : : : "memory");
389 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
390 unsigned long pte_index, unsigned long avpn,
391 unsigned long *hpret)
394 unsigned long v, r, rb;
395 struct revmap_entry *rev;
398 if (pte_index >= kvm->arch.hpt_npte)
400 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
401 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
403 pte = be64_to_cpu(hpte[0]);
404 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
405 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
406 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
407 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
411 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
412 v = pte & ~HPTE_V_HVLOCK;
413 if (v & HPTE_V_VALID) {
416 pte1 = be64_to_cpu(hpte[1]);
417 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
418 rb = compute_tlbie_rb(v, pte1, pte_index);
419 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
420 /* Read PTE low word after tlbie to get final R/C values */
421 remove_revmap_chain(kvm, pte_index, rev, v, pte1);
423 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
424 note_hpte_modification(kvm, rev);
425 unlock_hpte(hpte, 0);
431 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
433 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
434 unsigned long pte_index, unsigned long avpn)
436 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
440 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
442 struct kvm *kvm = vcpu->kvm;
443 unsigned long *args = &vcpu->arch.gpr[4];
444 __be64 *hp, *hptes[4];
445 unsigned long tlbrb[4];
446 long int i, j, k, n, found, indexes[4];
447 unsigned long flags, req, pte_index, rcbits;
449 long int ret = H_SUCCESS;
450 struct revmap_entry *rev, *revs[4];
453 global = global_invalidates(kvm, 0);
454 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
459 flags = pte_index >> 56;
460 pte_index &= ((1ul << 56) - 1);
463 if (req == 3) { /* no more requests */
467 if (req != 1 || flags == 3 ||
468 pte_index >= kvm->arch.hpt_npte) {
469 /* parameter error */
470 args[j] = ((0xa0 | flags) << 56) + pte_index;
474 hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
475 /* to avoid deadlock, don't spin except for first */
476 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
479 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
483 hp0 = be64_to_cpu(hp[0]);
484 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
486 case 0: /* absolute */
489 case 1: /* andcond */
490 if (!(hp0 & args[j + 1]))
494 if ((hp0 & ~0x7fUL) == args[j + 1])
500 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
501 args[j] = ((0x90 | flags) << 56) + pte_index;
505 args[j] = ((0x80 | flags) << 56) + pte_index;
506 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
507 note_hpte_modification(kvm, rev);
509 if (!(hp0 & HPTE_V_VALID)) {
510 /* insert R and C bits from PTE */
511 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
512 args[j] |= rcbits << (56 - 5);
517 /* leave it locked */
518 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
519 tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
520 be64_to_cpu(hp[1]), pte_index);
530 /* Now that we've collected a batch, do the tlbies */
531 do_tlbies(kvm, tlbrb, n, global, true);
533 /* Read PTE low words after tlbie to get final R/C values */
534 for (k = 0; k < n; ++k) {
536 pte_index = args[j] & ((1ul << 56) - 1);
539 remove_revmap_chain(kvm, pte_index, rev,
540 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
541 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
542 args[j] |= rcbits << (56 - 5);
550 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
551 unsigned long pte_index, unsigned long avpn,
554 struct kvm *kvm = vcpu->kvm;
556 struct revmap_entry *rev;
557 unsigned long v, r, rb, mask, bits;
560 if (pte_index >= kvm->arch.hpt_npte)
563 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
564 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
566 pte = be64_to_cpu(hpte[0]);
567 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
568 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn)) {
569 hpte[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
574 bits = (flags << 55) & HPTE_R_PP0;
575 bits |= (flags << 48) & HPTE_R_KEY_HI;
576 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
578 /* Update guest view of 2nd HPTE dword */
579 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
580 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
581 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
583 r = (rev->guest_rpte & ~mask) | bits;
585 note_hpte_modification(kvm, rev);
589 if (v & HPTE_V_VALID) {
591 * If the page is valid, don't let it transition from
592 * readonly to writable. If it should be writable, we'll
593 * take a trap and let the page fault code sort it out.
595 pte = be64_to_cpu(hpte[1]);
596 r = (pte & ~mask) | bits;
597 if (hpte_is_writable(r) && !hpte_is_writable(pte))
598 r = hpte_make_readonly(r);
599 /* If the PTE is changing, invalidate it first */
601 rb = compute_tlbie_rb(v, r, pte_index);
602 hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) |
604 do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags),
606 hpte[1] = cpu_to_be64(r);
609 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
610 asm volatile("ptesync" : : : "memory");
614 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
615 unsigned long pte_index)
617 struct kvm *kvm = vcpu->kvm;
621 struct revmap_entry *rev = NULL;
623 if (pte_index >= kvm->arch.hpt_npte)
625 if (flags & H_READ_4) {
629 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
630 for (i = 0; i < n; ++i, ++pte_index) {
631 hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
632 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
633 r = be64_to_cpu(hpte[1]);
634 if (v & HPTE_V_ABSENT) {
638 if (v & HPTE_V_VALID) {
639 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
640 r &= ~HPTE_GR_RESERVED;
642 vcpu->arch.gpr[4 + i * 2] = v;
643 vcpu->arch.gpr[5 + i * 2] = r;
648 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
649 unsigned long pte_index)
653 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
654 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
656 do_tlbies(kvm, &rb, 1, 1, true);
658 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
660 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
661 unsigned long pte_index)
666 rb = compute_tlbie_rb(be64_to_cpu(hptep[0]), be64_to_cpu(hptep[1]),
668 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
669 /* modify only the second-last byte, which contains the ref bit */
670 *((char *)hptep + 14) = rbyte;
671 do_tlbies(kvm, &rb, 1, 1, false);
673 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
675 static int slb_base_page_shift[4] = {
679 20, /* 1M, unsupported */
682 /* When called from virtmode, this func should be protected by
683 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
684 * can trigger deadlock issue.
686 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
691 unsigned long somask;
692 unsigned long vsid, hash;
695 unsigned long mask, val;
698 /* Get page shift, work out hash and AVPN etc. */
699 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
702 if (slb_v & SLB_VSID_L) {
703 mask |= HPTE_V_LARGE;
705 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
707 if (slb_v & SLB_VSID_B_1T) {
708 somask = (1UL << 40) - 1;
709 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
712 somask = (1UL << 28) - 1;
713 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
715 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvm->arch.hpt_mask;
716 avpn = slb_v & ~(somask >> 16); /* also includes B */
717 avpn |= (eaddr & somask) >> 16;
720 avpn &= ~((1UL << (pshift - 16)) - 1);
726 hpte = (__be64 *)(kvm->arch.hpt_virt + (hash << 7));
728 for (i = 0; i < 16; i += 2) {
729 /* Read the PTE racily */
730 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
732 /* Check valid/absent, hash, segment size and AVPN */
733 if (!(v & valid) || (v & mask) != val)
736 /* Lock the PTE and read it under the lock */
737 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
739 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
740 r = be64_to_cpu(hpte[i+1]);
743 * Check the HPTE again, including base page size
745 if ((v & valid) && (v & mask) == val &&
746 hpte_base_page_size(v, r) == (1ul << pshift))
747 /* Return with the HPTE still locked */
748 return (hash << 3) + (i >> 1);
750 /* Unlock and move on */
751 hpte[i] = cpu_to_be64(v);
754 if (val & HPTE_V_SECONDARY)
756 val |= HPTE_V_SECONDARY;
757 hash = hash ^ kvm->arch.hpt_mask;
761 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
764 * Called in real mode to check whether an HPTE not found fault
765 * is due to accessing a paged-out page or an emulated MMIO page,
766 * or if a protection fault is due to accessing a page that the
767 * guest wanted read/write access to but which we made read-only.
768 * Returns a possibly modified status (DSISR) value if not
769 * (i.e. pass the interrupt to the guest),
770 * -1 to pass the fault up to host kernel mode code, -2 to do that
771 * and also load the instruction word (for MMIO emulation),
772 * or 0 if we should make the guest retry the access.
774 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
775 unsigned long slb_v, unsigned int status, bool data)
777 struct kvm *kvm = vcpu->kvm;
779 unsigned long v, r, gr;
782 struct revmap_entry *rev;
783 unsigned long pp, key;
785 /* For protection fault, expect to find a valid HPTE */
786 valid = HPTE_V_VALID;
787 if (status & DSISR_NOHPTE)
788 valid |= HPTE_V_ABSENT;
790 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
792 if (status & DSISR_NOHPTE)
793 return status; /* there really was no HPTE */
794 return 0; /* for prot fault, HPTE disappeared */
796 hpte = (__be64 *)(kvm->arch.hpt_virt + (index << 4));
797 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
798 r = be64_to_cpu(hpte[1]);
799 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
800 gr = rev->guest_rpte;
802 unlock_hpte(hpte, v);
804 /* For not found, if the HPTE is valid by now, retry the instruction */
805 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
808 /* Check access permissions to the page */
809 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
810 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
811 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
813 if (gr & (HPTE_R_N | HPTE_R_G))
814 return status | SRR1_ISI_N_OR_G;
815 if (!hpte_read_permission(pp, slb_v & key))
816 return status | SRR1_ISI_PROT;
817 } else if (status & DSISR_ISSTORE) {
818 /* check write permission */
819 if (!hpte_write_permission(pp, slb_v & key))
820 return status | DSISR_PROTFAULT;
822 if (!hpte_read_permission(pp, slb_v & key))
823 return status | DSISR_PROTFAULT;
826 /* Check storage key, if applicable */
827 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
828 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
829 if (status & DSISR_ISSTORE)
832 return status | DSISR_KEYFAULT;
835 /* Save HPTE info for virtual-mode handler */
836 vcpu->arch.pgfault_addr = addr;
837 vcpu->arch.pgfault_index = index;
838 vcpu->arch.pgfault_hpte[0] = v;
839 vcpu->arch.pgfault_hpte[1] = r;
841 /* Check the storage key to see if it is possibly emulated MMIO */
842 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
843 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
844 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
845 return -2; /* MMIO emulation - load instr word */
847 return -1; /* send fault up to host kernel mode */