2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/file.h>
15 #include <linux/debugfs.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/kvm_book3s.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pte-walk.h>
26 * Supported radix tree geometry.
27 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
28 * for a page size of 64k or 4k.
30 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
32 int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
33 struct kvmppc_pte *gpte, u64 root,
36 struct kvm *kvm = vcpu->kvm;
38 unsigned long rts, bits, offset, index;
42 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
43 ((root & RTS2_MASK) >> RTS2_SHIFT);
44 bits = root & RPDS_MASK;
45 base = root & RPDB_MASK;
49 /* Current implementations only support 52-bit space */
53 /* Walk each level of the radix tree */
54 for (level = 3; level >= 0; --level) {
56 /* Check a valid size */
57 if (level && bits != p9_supported_radix_bits[level])
59 if (level == 0 && !(bits == 5 || bits == 9))
62 index = (eaddr >> offset) & ((1UL << bits) - 1);
63 /* Check that low bits of page table base are zero */
64 if (base & ((1UL << (bits + 3)) - 1))
66 /* Read the entry from guest memory */
67 addr = base + (index * sizeof(rpte));
68 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
74 pte = __be64_to_cpu(rpte);
75 if (!(pte & _PAGE_PRESENT))
77 /* Check if a leaf entry */
80 /* Get ready to walk the next level */
81 base = pte & RPDB_MASK;
82 bits = pte & RPDS_MASK;
85 /* Need a leaf at lowest level; 512GB pages not supported */
86 if (level < 0 || level == 3)
89 /* We found a valid leaf PTE */
90 /* Offset is now log base 2 of the page size */
91 gpa = pte & 0x01fffffffffff000ul;
92 if (gpa & ((1ul << offset) - 1))
94 gpa |= eaddr & ((1ul << offset) - 1);
95 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
96 if (offset == mmu_psize_defs[ps].shift)
99 gpte->page_shift = offset;
104 /* Work out permissions */
105 gpte->may_read = !!(pte & _PAGE_READ);
106 gpte->may_write = !!(pte & _PAGE_WRITE);
107 gpte->may_execute = !!(pte & _PAGE_EXEC);
109 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
118 * Used to walk a partition or process table radix tree in guest memory
119 * Note: We exploit the fact that a partition table and a process
120 * table have the same layout, a partition-scoped page table and a
121 * process-scoped page table have the same layout, and the 2nd
122 * doubleword of a partition table entry has the same layout as
125 int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
126 struct kvmppc_pte *gpte, u64 table,
127 int table_index, u64 *pte_ret_p)
129 struct kvm *kvm = vcpu->kvm;
131 unsigned long size, ptbl, root;
132 struct prtb_entry entry;
134 if ((table & PRTS_MASK) > 24)
136 size = 1ul << ((table & PRTS_MASK) + 12);
138 /* Is the table big enough to contain this entry? */
139 if ((table_index * sizeof(entry)) >= size)
142 /* Read the table to find the root of the radix tree */
143 ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
144 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
148 /* Root is stored in the first double word */
149 root = be64_to_cpu(entry.prtb0);
151 return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
154 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
155 struct kvmppc_pte *gpte, bool data, bool iswrite)
161 /* Work out effective PID */
162 switch (eaddr >> 62) {
164 pid = vcpu->arch.pid;
173 ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
174 vcpu->kvm->arch.process_table, pid, &pte);
178 /* Check privilege (applies only to process scoped translations) */
179 if (kvmppc_get_msr(vcpu) & MSR_PR) {
180 if (pte & _PAGE_PRIVILEGED) {
183 gpte->may_execute = 0;
186 if (!(pte & _PAGE_PRIVILEGED)) {
187 /* Check AMR/IAMR to see if strict mode is in force */
188 if (vcpu->arch.amr & (1ul << 62))
190 if (vcpu->arch.amr & (1ul << 63))
192 if (vcpu->arch.iamr & (1ul << 62))
193 gpte->may_execute = 0;
200 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
201 unsigned int pshift, unsigned int lpid)
203 unsigned long psize = PAGE_SIZE;
209 psize = 1UL << pshift;
213 addr &= ~(psize - 1);
215 if (!kvmhv_on_pseries()) {
216 radix__flush_tlb_lpid_page(lpid, addr, psize);
220 psi = shift_to_mmu_psize(pshift);
221 rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
222 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
225 pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
228 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
232 if (!kvmhv_on_pseries()) {
233 radix__flush_pwc_lpid(lpid);
237 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
238 lpid, TLBIEL_INVAL_SET_LPID);
240 pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
243 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
244 unsigned long clr, unsigned long set,
245 unsigned long addr, unsigned int shift)
247 return __radix_pte_update(ptep, clr, set);
250 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
251 pte_t *ptep, pte_t pte)
253 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
256 static struct kmem_cache *kvm_pte_cache;
257 static struct kmem_cache *kvm_pmd_cache;
259 static pte_t *kvmppc_pte_alloc(void)
261 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
264 static void kvmppc_pte_free(pte_t *ptep)
266 kmem_cache_free(kvm_pte_cache, ptep);
269 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
270 static inline int pmd_is_leaf(pmd_t pmd)
272 return !!(pmd_val(pmd) & _PAGE_PTE);
275 static pmd_t *kvmppc_pmd_alloc(void)
277 return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
280 static void kvmppc_pmd_free(pmd_t *pmdp)
282 kmem_cache_free(kvm_pmd_cache, pmdp);
285 /* Called with kvm->mmu_lock held */
286 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
287 unsigned int shift, struct kvm_memory_slot *memslot,
292 unsigned long gfn = gpa >> PAGE_SHIFT;
293 unsigned long page_size = PAGE_SIZE;
296 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
297 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
299 /* The following only applies to L1 entries */
300 if (lpid != kvm->arch.lpid)
304 memslot = gfn_to_memslot(kvm, gfn);
309 page_size = 1ul << shift;
311 gpa &= ~(page_size - 1);
312 hpa = old & PTE_RPN_MASK;
313 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
315 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
316 kvmppc_update_dirty_map(memslot, gfn, page_size);
320 * kvmppc_free_p?d are used to free existing page tables, and recursively
321 * descend and clear and free children.
322 * Callers are responsible for flushing the PWC.
324 * When page tables are being unmapped/freed as part of page fault path
325 * (full == false), ptes are not expected. There is code to unmap them
326 * and emit a warning if encountered, but there may already be data
327 * corruption due to the unexpected mappings.
329 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
333 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
338 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
339 if (pte_val(*p) == 0)
342 kvmppc_unmap_pte(kvm, p,
343 pte_pfn(*p) << PAGE_SHIFT,
344 PAGE_SHIFT, NULL, lpid);
348 kvmppc_pte_free(pte);
351 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
357 for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
358 if (!pmd_present(*p))
360 if (pmd_is_leaf(*p)) {
365 kvmppc_unmap_pte(kvm, (pte_t *)p,
366 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
367 PMD_SHIFT, NULL, lpid);
372 pte = pte_offset_map(p, 0);
373 kvmppc_unmap_free_pte(kvm, pte, full, lpid);
377 kvmppc_pmd_free(pmd);
380 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
386 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
387 if (!pud_present(*p))
394 pmd = pmd_offset(p, 0);
395 kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
399 pud_free(kvm->mm, pud);
402 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
406 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
409 if (!pgd_present(*pgd))
411 pud = pud_offset(pgd, 0);
412 kvmppc_unmap_free_pud(kvm, pud, lpid);
417 void kvmppc_free_radix(struct kvm *kvm)
419 if (kvm->arch.pgtable) {
420 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
422 pgd_free(kvm->mm, kvm->arch.pgtable);
423 kvm->arch.pgtable = NULL;
427 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
428 unsigned long gpa, unsigned int lpid)
430 pte_t *pte = pte_offset_kernel(pmd, 0);
433 * Clearing the pmd entry then flushing the PWC ensures that the pte
434 * page no longer be cached by the MMU, so can be freed without
435 * flushing the PWC again.
438 kvmppc_radix_flush_pwc(kvm, lpid);
440 kvmppc_unmap_free_pte(kvm, pte, false, lpid);
443 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
444 unsigned long gpa, unsigned int lpid)
446 pmd_t *pmd = pmd_offset(pud, 0);
449 * Clearing the pud entry then flushing the PWC ensures that the pmd
450 * page and any children pte pages will no longer be cached by the MMU,
451 * so can be freed without flushing the PWC again.
454 kvmppc_radix_flush_pwc(kvm, lpid);
456 kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
460 * There are a number of bits which may differ between different faults to
461 * the same partition scope entry. RC bits, in the course of cleaning and
462 * aging. And the write bit can change, either the access could have been
463 * upgraded, or a read fault could happen concurrently with a write fault
464 * that sets those bits first.
466 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
468 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
469 unsigned long gpa, unsigned int level,
470 unsigned long mmu_seq, unsigned int lpid,
471 unsigned long *rmapp, struct rmap_nested **n_rmap)
474 pud_t *pud, *new_pud = NULL;
475 pmd_t *pmd, *new_pmd = NULL;
476 pte_t *ptep, *new_ptep = NULL;
479 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
480 pgd = pgtable + pgd_index(gpa);
482 if (pgd_present(*pgd))
483 pud = pud_offset(pgd, gpa);
485 new_pud = pud_alloc_one(kvm->mm, gpa);
488 if (pud && pud_present(*pud) && !pud_huge(*pud))
489 pmd = pmd_offset(pud, gpa);
491 new_pmd = kvmppc_pmd_alloc();
493 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
494 new_ptep = kvmppc_pte_alloc();
496 /* Check if we might have been invalidated; let the guest retry if so */
497 spin_lock(&kvm->mmu_lock);
499 if (mmu_notifier_retry(kvm, mmu_seq))
502 /* Now traverse again under the lock and change the tree */
504 if (pgd_none(*pgd)) {
507 pgd_populate(kvm->mm, pgd, new_pud);
510 pud = pud_offset(pgd, gpa);
511 if (pud_huge(*pud)) {
512 unsigned long hgpa = gpa & PUD_MASK;
514 /* Check if we raced and someone else has set the same thing */
516 if (pud_raw(*pud) == pte_raw(pte)) {
520 /* Valid 1GB page here already, add our extra bits */
521 WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
522 PTE_BITS_MUST_MATCH);
523 kvmppc_radix_update_pte(kvm, (pte_t *)pud,
524 0, pte_val(pte), hgpa, PUD_SHIFT);
529 * If we raced with another CPU which has just put
530 * a 1GB pte in after we saw a pmd page, try again.
536 /* Valid 1GB page here already, remove it */
537 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
541 if (!pud_none(*pud)) {
543 * There's a page table page here, but we wanted to
544 * install a large page, so remove and free the page
547 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
549 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
551 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
555 if (pud_none(*pud)) {
558 pud_populate(kvm->mm, pud, new_pmd);
561 pmd = pmd_offset(pud, gpa);
562 if (pmd_is_leaf(*pmd)) {
563 unsigned long lgpa = gpa & PMD_MASK;
565 /* Check if we raced and someone else has set the same thing */
567 if (pmd_raw(*pmd) == pte_raw(pte)) {
571 /* Valid 2MB page here already, add our extra bits */
572 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
573 PTE_BITS_MUST_MATCH);
574 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
575 0, pte_val(pte), lgpa, PMD_SHIFT);
581 * If we raced with another CPU which has just put
582 * a 2MB pte in after we saw a pte page, try again.
588 /* Valid 2MB page here already, remove it */
589 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
593 if (!pmd_none(*pmd)) {
595 * There's a page table page here, but we wanted to
596 * install a large page, so remove and free the page
599 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
601 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
603 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
607 if (pmd_none(*pmd)) {
610 pmd_populate(kvm->mm, pmd, new_ptep);
613 ptep = pte_offset_kernel(pmd, gpa);
614 if (pte_present(*ptep)) {
615 /* Check if someone else set the same thing */
616 if (pte_raw(*ptep) == pte_raw(pte)) {
620 /* Valid page here already, add our extra bits */
621 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
622 PTE_BITS_MUST_MATCH);
623 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
627 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
629 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
633 spin_unlock(&kvm->mmu_lock);
635 pud_free(kvm->mm, new_pud);
637 kvmppc_pmd_free(new_pmd);
639 kvmppc_pte_free(new_ptep);
643 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
644 unsigned long gpa, unsigned int lpid)
646 unsigned long pgflags;
651 * Need to set an R or C bit in the 2nd-level tables;
652 * since we are just helping out the hardware here,
653 * it is sufficient to do what the hardware does.
655 pgflags = _PAGE_ACCESSED;
657 pgflags |= _PAGE_DIRTY;
659 * We are walking the secondary (partition-scoped) page table here.
660 * We can do this without disabling irq because the Linux MM
661 * subsystem doesn't do THP splits and collapses on this tree.
663 ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
664 if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
665 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
671 int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
673 struct kvm_memory_slot *memslot,
674 bool writing, bool kvm_ro,
675 pte_t *inserted_pte, unsigned int *levelp)
677 struct kvm *kvm = vcpu->kvm;
678 struct page *page = NULL;
679 unsigned long mmu_seq;
680 unsigned long hva, gfn = gpa >> PAGE_SHIFT;
681 bool upgrade_write = false;
682 bool *upgrade_p = &upgrade_write;
684 unsigned int shift, level;
687 /* used to check for invalidations in progress */
688 mmu_seq = kvm->mmu_notifier_seq;
692 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
693 * do it with !atomic && !async, which is how we call it.
694 * We always ask for write permission since the common case
695 * is that the page is writable.
697 hva = gfn_to_hva_memslot(memslot, gfn);
698 if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
699 upgrade_write = true;
703 /* Call KVM generic code to do the slow-path check */
704 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
706 if (is_error_noslot_pfn(pfn))
709 if (pfn_valid(pfn)) {
710 page = pfn_to_page(pfn);
711 if (PageReserved(page))
717 * Read the PTE from the process' radix tree and use that
718 * so we get the shift and attribute bits.
721 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
723 * If the PTE disappeared temporarily due to a THP
724 * collapse, just return and let the guest try again.
735 /* Get pte level from shift/size */
736 if (shift == PUD_SHIFT &&
737 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
738 (hva & (PUD_SIZE - PAGE_SIZE))) {
740 } else if (shift == PMD_SHIFT &&
741 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
742 (hva & (PMD_SIZE - PAGE_SIZE))) {
746 if (shift > PAGE_SHIFT) {
748 * If the pte maps more than one page, bring over
749 * bits from the virtual address to get the real
750 * address of the specific single page we want.
752 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
753 pte = __pte(pte_val(pte) | (hva & rpnmask));
757 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
758 if (writing || upgrade_write) {
759 if (pte_val(pte) & _PAGE_WRITE)
760 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
762 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
765 /* Allocate space in the tree and write the PTE */
766 ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
767 mmu_seq, kvm->arch.lpid, NULL, NULL);
774 if (!ret && (pte_val(pte) & _PAGE_WRITE))
775 set_page_dirty_lock(page);
782 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
783 unsigned long ea, unsigned long dsisr)
785 struct kvm *kvm = vcpu->kvm;
786 unsigned long gpa, gfn;
787 struct kvm_memory_slot *memslot;
789 bool writing = !!(dsisr & DSISR_ISSTORE);
792 /* Check for unusual errors */
793 if (dsisr & DSISR_UNSUPP_MMU) {
794 pr_err("KVM: Got unsupported MMU fault\n");
797 if (dsisr & DSISR_BADACCESS) {
798 /* Reflect to the guest as DSI */
799 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
800 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
804 /* Translate the logical address */
805 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
806 gpa &= ~0xF000000000000000ul;
807 gfn = gpa >> PAGE_SHIFT;
808 if (!(dsisr & DSISR_PRTABLE_FAULT))
811 /* Get the corresponding memslot */
812 memslot = gfn_to_memslot(kvm, gfn);
814 /* No memslot means it's an emulated MMIO region */
815 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
816 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
819 * Bad address in guest page table tree, or other
820 * unusual error - reflect it to the guest as DSI.
822 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
825 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
828 if (memslot->flags & KVM_MEM_READONLY) {
830 /* give the guest a DSI */
831 kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
838 /* Failed to set the reference/change bits */
839 if (dsisr & DSISR_SET_RC) {
840 spin_lock(&kvm->mmu_lock);
841 if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
842 writing, gpa, kvm->arch.lpid))
843 dsisr &= ~DSISR_SET_RC;
844 spin_unlock(&kvm->mmu_lock);
846 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
847 DSISR_PROTFAULT | DSISR_SET_RC)))
851 /* Try to insert a pte */
852 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
855 if (ret == 0 || ret == -EAGAIN)
860 /* Called with kvm->lock held */
861 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
865 unsigned long gpa = gfn << PAGE_SHIFT;
868 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
869 if (ptep && pte_present(*ptep))
870 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
875 /* Called with kvm->lock held */
876 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
880 unsigned long gpa = gfn << PAGE_SHIFT;
884 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
885 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
886 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
888 /* XXX need to flush tlb here? */
894 /* Called with kvm->lock held */
895 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
899 unsigned long gpa = gfn << PAGE_SHIFT;
903 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
904 if (ptep && pte_present(*ptep) && pte_young(*ptep))
909 /* Returns the number of PAGE_SIZE pages that are dirty */
910 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
911 struct kvm_memory_slot *memslot, int pagenum)
913 unsigned long gfn = memslot->base_gfn + pagenum;
914 unsigned long gpa = gfn << PAGE_SHIFT;
919 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
920 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
923 ret = 1 << (shift - PAGE_SHIFT);
924 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
926 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
931 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
932 struct kvm_memory_slot *memslot, unsigned long *map)
937 for (i = 0; i < memslot->npages; i = j) {
938 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
941 * Note that if npages > 0 then i must be a multiple of npages,
942 * since huge pages are only used to back the guest at guest
943 * real addresses that are a multiple of their size.
944 * Since we have at most one PTE covering any given guest
945 * real address, if npages > 1 we can skip to i + npages.
949 set_dirty_bits(map, i, npages);
956 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
957 int psize, int *indexp)
959 if (!mmu_psize_defs[psize].shift)
961 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
962 (mmu_psize_defs[psize].ap << 29);
966 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
970 if (!radix_enabled())
972 memset(info, 0, sizeof(*info));
975 info->geometries[0].page_shift = 12;
976 info->geometries[0].level_bits[0] = 9;
977 for (i = 1; i < 4; ++i)
978 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
980 info->geometries[1].page_shift = 16;
981 for (i = 0; i < 4; ++i)
982 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
985 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
986 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
987 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
988 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
993 int kvmppc_init_vm_radix(struct kvm *kvm)
995 kvm->arch.pgtable = pgd_alloc(kvm->mm);
996 if (!kvm->arch.pgtable)
1001 static void pte_ctor(void *addr)
1003 memset(addr, 0, RADIX_PTE_TABLE_SIZE);
1006 static void pmd_ctor(void *addr)
1008 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
1011 struct debugfs_radix_state {
1022 static int debugfs_radix_open(struct inode *inode, struct file *file)
1024 struct kvm *kvm = inode->i_private;
1025 struct debugfs_radix_state *p;
1027 p = kzalloc(sizeof(*p), GFP_KERNEL);
1033 mutex_init(&p->mutex);
1034 file->private_data = p;
1036 return nonseekable_open(inode, file);
1039 static int debugfs_radix_release(struct inode *inode, struct file *file)
1041 struct debugfs_radix_state *p = file->private_data;
1043 kvm_put_kvm(p->kvm);
1048 static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
1049 size_t len, loff_t *ppos)
1051 struct debugfs_radix_state *p = file->private_data;
1057 struct kvm_nested_guest *nested;
1066 if (!kvm_is_radix(kvm))
1069 ret = mutex_lock_interruptible(&p->mutex);
1073 if (p->chars_left) {
1077 r = copy_to_user(buf, p->buf + p->buf_index, n);
1094 while (len != 0 && p->lpid >= 0) {
1095 if (gpa >= RADIX_PGTABLE_RANGE) {
1099 kvmhv_put_nested(nested);
1102 p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
1109 pgt = kvm->arch.pgtable;
1111 nested = kvmhv_get_nested(kvm, p->lpid, false);
1113 gpa = RADIX_PGTABLE_RANGE;
1116 pgt = nested->shadow_pgtable;
1122 n = scnprintf(p->buf, sizeof(p->buf),
1123 "\nNested LPID %d: ", p->lpid);
1124 n += scnprintf(p->buf + n, sizeof(p->buf) - n,
1125 "pgdir: %lx\n", (unsigned long)pgt);
1130 pgdp = pgt + pgd_index(gpa);
1131 pgd = READ_ONCE(*pgdp);
1132 if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
1133 gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
1137 pudp = pud_offset(&pgd, gpa);
1138 pud = READ_ONCE(*pudp);
1139 if (!(pud_val(pud) & _PAGE_PRESENT)) {
1140 gpa = (gpa & PUD_MASK) + PUD_SIZE;
1143 if (pud_val(pud) & _PAGE_PTE) {
1149 pmdp = pmd_offset(&pud, gpa);
1150 pmd = READ_ONCE(*pmdp);
1151 if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
1152 gpa = (gpa & PMD_MASK) + PMD_SIZE;
1155 if (pmd_val(pmd) & _PAGE_PTE) {
1161 ptep = pte_offset_kernel(&pmd, gpa);
1162 pte = pte_val(READ_ONCE(*ptep));
1163 if (!(pte & _PAGE_PRESENT)) {
1169 n = scnprintf(p->buf, sizeof(p->buf),
1170 " %lx: %lx %d\n", gpa, pte, shift);
1171 gpa += 1ul << shift;
1176 r = copy_to_user(buf, p->buf, n);
1191 kvmhv_put_nested(nested);
1194 mutex_unlock(&p->mutex);
1198 static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
1199 size_t len, loff_t *ppos)
1204 static const struct file_operations debugfs_radix_fops = {
1205 .owner = THIS_MODULE,
1206 .open = debugfs_radix_open,
1207 .release = debugfs_radix_release,
1208 .read = debugfs_radix_read,
1209 .write = debugfs_radix_write,
1210 .llseek = generic_file_llseek,
1213 void kvmhv_radix_debugfs_init(struct kvm *kvm)
1215 kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
1216 kvm->arch.debugfs_dir, kvm,
1217 &debugfs_radix_fops);
1220 int kvmppc_radix_init(void)
1222 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
1224 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
1228 size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
1230 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
1231 if (!kvm_pmd_cache) {
1232 kmem_cache_destroy(kvm_pte_cache);
1239 void kvmppc_radix_exit(void)
1241 kmem_cache_destroy(kvm_pte_cache);
1242 kmem_cache_destroy(kvm_pmd_cache);