2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
23 * Supported radix tree geometry.
24 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
25 * for a page size of 64k or 4k.
27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30 struct kvmppc_pte *gpte, bool data, bool iswrite)
32 struct kvm *kvm = vcpu->kvm;
37 unsigned long root, pte, index;
38 unsigned long rts, bits, offset;
40 unsigned long proc_tbl_size;
42 /* Work out effective PID */
43 switch (eaddr >> 62) {
53 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54 if (pid * 16 >= proc_tbl_size)
57 /* Read partition table to find root of tree for effective PID */
58 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
63 root = be64_to_cpu(prte);
64 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65 ((root & RTS2_MASK) >> RTS2_SHIFT);
66 bits = root & RPDS_MASK;
67 root = root & RPDB_MASK;
69 /* P9 DD1 interprets RTS (radix tree size) differently */
71 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
74 /* current implementations only support 52-bit space */
78 for (level = 3; level >= 0; --level) {
79 if (level && bits != p9_supported_radix_bits[level])
81 if (level == 0 && !(bits == 5 || bits == 9))
84 index = (eaddr >> offset) & ((1UL << bits) - 1);
85 /* check that low bits of page table base are zero */
86 if (root & ((1UL << (bits + 3)) - 1))
88 ret = kvm_read_guest(kvm, root + index * 8,
92 pte = __be64_to_cpu(rpte);
93 if (!(pte & _PAGE_PRESENT))
98 root = pte & 0x0fffffffffffff00ul;
100 /* need a leaf at lowest level; 512GB pages not supported */
101 if (level < 0 || level == 3)
104 /* offset is now log base 2 of the page size */
105 gpa = pte & 0x01fffffffffff000ul;
106 if (gpa & ((1ul << offset) - 1))
108 gpa += eaddr & ((1ul << offset) - 1);
109 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
110 if (offset == mmu_psize_defs[ps].shift)
112 gpte->page_size = ps;
117 /* Work out permissions */
118 gpte->may_read = !!(pte & _PAGE_READ);
119 gpte->may_write = !!(pte & _PAGE_WRITE);
120 gpte->may_execute = !!(pte & _PAGE_EXEC);
121 if (kvmppc_get_msr(vcpu) & MSR_PR) {
122 if (pte & _PAGE_PRIVILEGED) {
125 gpte->may_execute = 0;
128 if (!(pte & _PAGE_PRIVILEGED)) {
129 /* Check AMR/IAMR to see if strict mode is in force */
130 if (vcpu->arch.amr & (1ul << 62))
132 if (vcpu->arch.amr & (1ul << 63))
134 if (vcpu->arch.iamr & (1ul << 62))
135 gpte->may_execute = 0;
142 #ifdef CONFIG_PPC_64K_PAGES
143 #define MMU_BASE_PSIZE MMU_PAGE_64K
145 #define MMU_BASE_PSIZE MMU_PAGE_4K
148 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
151 int psize = MMU_BASE_PSIZE;
153 if (pshift >= PMD_SHIFT)
156 addr |= mmu_psize_defs[psize].ap << 5;
157 asm volatile("ptesync": : :"memory");
158 asm volatile(PPC_TLBIE_5(%0, %1, 0, 0, 1)
159 : : "r" (addr), "r" (kvm->arch.lpid) : "memory");
160 asm volatile("ptesync": : :"memory");
163 unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
164 unsigned long clr, unsigned long set,
165 unsigned long addr, unsigned int shift)
167 unsigned long old = 0;
169 if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
170 pte_present(*ptep)) {
171 /* have to invalidate it first */
172 old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
173 kvmppc_radix_tlbie_page(kvm, addr, shift);
174 set |= _PAGE_PRESENT;
175 old &= _PAGE_PRESENT;
177 return __radix_pte_update(ptep, clr, set) | old;
180 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
181 pte_t *ptep, pte_t pte)
183 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
186 static struct kmem_cache *kvm_pte_cache;
188 static pte_t *kvmppc_pte_alloc(void)
190 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
193 static void kvmppc_pte_free(pte_t *ptep)
195 kmem_cache_free(kvm_pte_cache, ptep);
198 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
199 static inline int pmd_is_leaf(pmd_t pmd)
201 return !!(pmd_val(pmd) & _PAGE_PTE);
204 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
205 unsigned int level, unsigned long mmu_seq)
208 pud_t *pud, *new_pud = NULL;
209 pmd_t *pmd, *new_pmd = NULL;
210 pte_t *ptep, *new_ptep = NULL;
214 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
215 pgd = kvm->arch.pgtable + pgd_index(gpa);
217 if (pgd_present(*pgd))
218 pud = pud_offset(pgd, gpa);
220 new_pud = pud_alloc_one(kvm->mm, gpa);
223 if (pud && pud_present(*pud))
224 pmd = pmd_offset(pud, gpa);
226 new_pmd = pmd_alloc_one(kvm->mm, gpa);
228 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
229 new_ptep = kvmppc_pte_alloc();
231 /* Check if we might have been invalidated; let the guest retry if so */
232 spin_lock(&kvm->mmu_lock);
234 if (mmu_notifier_retry(kvm, mmu_seq))
237 /* Now traverse again under the lock and change the tree */
239 if (pgd_none(*pgd)) {
242 pgd_populate(kvm->mm, pgd, new_pud);
245 pud = pud_offset(pgd, gpa);
246 if (pud_none(*pud)) {
249 pud_populate(kvm->mm, pud, new_pmd);
252 pmd = pmd_offset(pud, gpa);
253 if (pmd_is_leaf(*pmd)) {
254 unsigned long lgpa = gpa & PMD_MASK;
257 * If we raced with another CPU which has just put
258 * a 2MB pte in after we saw a pte page, try again.
260 if (level == 0 && !new_ptep) {
264 /* Valid 2MB page here already, remove it */
265 old = kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
266 ~0UL, 0, lgpa, PMD_SHIFT);
267 kvmppc_radix_tlbie_page(kvm, lgpa, PMD_SHIFT);
268 if (old & _PAGE_DIRTY) {
269 unsigned long gfn = lgpa >> PAGE_SHIFT;
270 struct kvm_memory_slot *memslot;
271 memslot = gfn_to_memslot(kvm, gfn);
272 if (memslot && memslot->dirty_bitmap)
273 kvmppc_update_dirty_map(memslot,
276 } else if (level == 1 && !pmd_none(*pmd)) {
278 * There's a page table page here, but we wanted
279 * to install a large page. Tell the caller and let
280 * it try installing a normal page if it wants.
286 if (pmd_none(*pmd)) {
289 pmd_populate(kvm->mm, pmd, new_ptep);
292 ptep = pte_offset_kernel(pmd, gpa);
293 if (pte_present(*ptep)) {
294 /* PTE was previously valid, so invalidate it */
295 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT,
297 kvmppc_radix_tlbie_page(kvm, gpa, 0);
298 if (old & _PAGE_DIRTY)
299 mark_page_dirty(kvm, gpa >> PAGE_SHIFT);
301 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
303 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
308 spin_unlock(&kvm->mmu_lock);
310 pud_free(kvm->mm, new_pud);
312 pmd_free(kvm->mm, new_pmd);
314 kvmppc_pte_free(new_ptep);
318 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 unsigned long ea, unsigned long dsisr)
321 struct kvm *kvm = vcpu->kvm;
322 unsigned long mmu_seq, pte_size;
323 unsigned long gpa, gfn, hva, pfn;
324 struct kvm_memory_slot *memslot;
325 struct page *page = NULL, *pages[1];
326 long ret, npages, ok;
327 unsigned int writing;
328 struct vm_area_struct *vma;
331 unsigned long pgflags;
332 unsigned int shift, level;
334 /* Check for unusual errors */
335 if (dsisr & DSISR_UNSUPP_MMU) {
336 pr_err("KVM: Got unsupported MMU fault\n");
339 if (dsisr & DSISR_BADACCESS) {
340 /* Reflect to the guest as DSI */
341 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
342 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
346 /* Translate the logical address and get the page */
347 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
348 gpa &= ~0xF000000000000000ul;
349 gfn = gpa >> PAGE_SHIFT;
350 if (!(dsisr & DSISR_PRTABLE_FAULT))
352 memslot = gfn_to_memslot(kvm, gfn);
354 /* No memslot means it's an emulated MMIO region */
355 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
356 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
359 * Bad address in guest page table tree, or other
360 * unusual error - reflect it to the guest as DSI.
362 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
365 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
366 dsisr & DSISR_ISSTORE);
369 /* used to check for invalidations in progress */
370 mmu_seq = kvm->mmu_notifier_seq;
373 writing = (dsisr & DSISR_ISSTORE) != 0;
374 hva = gfn_to_hva_memslot(memslot, gfn);
375 if (dsisr & DSISR_SET_RC) {
377 * Need to set an R or C bit in the 2nd-level tables;
378 * if the relevant bits aren't already set in the linux
379 * page tables, fall through to do the gup_fast to
380 * set them in the linux page tables too.
383 pgflags = _PAGE_ACCESSED;
385 pgflags |= _PAGE_DIRTY;
386 local_irq_save(flags);
387 ptep = find_current_mm_pte(current->mm->pgd, hva, NULL, NULL);
389 pte = READ_ONCE(*ptep);
390 if (pte_present(pte) &&
391 (pte_val(pte) & pgflags) == pgflags)
394 local_irq_restore(flags);
396 spin_lock(&kvm->mmu_lock);
397 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
398 spin_unlock(&kvm->mmu_lock);
402 * We are walking the secondary page table here. We can do this
403 * without disabling irq.
405 ptep = __find_linux_pte(kvm->arch.pgtable,
407 if (ptep && pte_present(*ptep)) {
408 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
410 spin_unlock(&kvm->mmu_lock);
413 spin_unlock(&kvm->mmu_lock);
419 pte_size = PAGE_SIZE;
420 pgflags = _PAGE_READ | _PAGE_EXEC;
422 npages = get_user_pages_fast(hva, 1, writing, pages);
424 /* Check if it's an I/O mapping */
425 down_read(¤t->mm->mmap_sem);
426 vma = find_vma(current->mm, hva);
427 if (vma && vma->vm_start <= hva && hva < vma->vm_end &&
428 (vma->vm_flags & VM_PFNMAP)) {
429 pfn = vma->vm_pgoff +
430 ((hva - vma->vm_start) >> PAGE_SHIFT);
431 pgflags = pgprot_val(vma->vm_page_prot);
433 up_read(¤t->mm->mmap_sem);
438 pfn = page_to_pfn(page);
439 if (PageCompound(page)) {
440 pte_size <<= compound_order(compound_head(page));
441 /* See if we can insert a 2MB large-page PTE here */
442 if (pte_size >= PMD_SIZE &&
443 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
444 (hva & (PMD_SIZE - PAGE_SIZE))) {
446 pfn &= ~((PMD_SIZE >> PAGE_SHIFT) - 1);
449 /* See if we can provide write access */
451 pgflags |= _PAGE_WRITE;
453 local_irq_save(flags);
454 ptep = find_current_mm_pte(current->mm->pgd,
456 if (ptep && pte_write(*ptep))
457 pgflags |= _PAGE_WRITE;
458 local_irq_restore(flags);
463 * Compute the PTE value that we need to insert.
465 pgflags |= _PAGE_PRESENT | _PAGE_PTE | _PAGE_ACCESSED;
466 if (pgflags & _PAGE_WRITE)
467 pgflags |= _PAGE_DIRTY;
468 pte = pfn_pte(pfn, __pgprot(pgflags));
470 /* Allocate space in the tree and write the PTE */
471 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
474 * There's already a PMD where wanted to install a large page;
475 * for now, fall back to installing a small page.
478 pfn |= gfn & ((PMD_SIZE >> PAGE_SHIFT) - 1);
479 pte = pfn_pte(pfn, __pgprot(pgflags));
480 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
484 if (!ret && (pgflags & _PAGE_WRITE))
485 set_page_dirty_lock(page);
489 if (ret == 0 || ret == -EAGAIN)
494 /* Called with kvm->lock held */
495 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
499 unsigned long gpa = gfn << PAGE_SHIFT;
503 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
504 if (ptep && pte_present(*ptep)) {
505 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_PRESENT, 0,
507 kvmppc_radix_tlbie_page(kvm, gpa, shift);
508 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
509 unsigned long npages = 1;
511 npages = 1ul << (shift - PAGE_SHIFT);
512 kvmppc_update_dirty_map(memslot, gfn, npages);
518 /* Called with kvm->lock held */
519 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
523 unsigned long gpa = gfn << PAGE_SHIFT;
527 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
528 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
529 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
531 /* XXX need to flush tlb here? */
537 /* Called with kvm->lock held */
538 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
542 unsigned long gpa = gfn << PAGE_SHIFT;
546 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
547 if (ptep && pte_present(*ptep) && pte_young(*ptep))
552 /* Returns the number of PAGE_SIZE pages that are dirty */
553 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
554 struct kvm_memory_slot *memslot, int pagenum)
556 unsigned long gfn = memslot->base_gfn + pagenum;
557 unsigned long gpa = gfn << PAGE_SHIFT;
562 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
563 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
566 ret = 1 << (shift - PAGE_SHIFT);
567 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
569 kvmppc_radix_tlbie_page(kvm, gpa, shift);
574 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
575 struct kvm_memory_slot *memslot, unsigned long *map)
580 for (i = 0; i < memslot->npages; i = j) {
581 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
584 * Note that if npages > 0 then i must be a multiple of npages,
585 * since huge pages are only used to back the guest at guest
586 * real addresses that are a multiple of their size.
587 * Since we have at most one PTE covering any given guest
588 * real address, if npages > 1 we can skip to i + npages.
592 set_dirty_bits(map, i, npages);
599 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
600 int psize, int *indexp)
602 if (!mmu_psize_defs[psize].shift)
604 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
605 (mmu_psize_defs[psize].ap << 29);
609 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
613 if (!radix_enabled())
615 memset(info, 0, sizeof(*info));
618 info->geometries[0].page_shift = 12;
619 info->geometries[0].level_bits[0] = 9;
620 for (i = 1; i < 4; ++i)
621 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
623 info->geometries[1].page_shift = 16;
624 for (i = 0; i < 4; ++i)
625 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
628 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
629 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
630 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
631 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
636 int kvmppc_init_vm_radix(struct kvm *kvm)
638 kvm->arch.pgtable = pgd_alloc(kvm->mm);
639 if (!kvm->arch.pgtable)
644 void kvmppc_free_radix(struct kvm *kvm)
646 unsigned long ig, iu, im;
652 if (!kvm->arch.pgtable)
654 pgd = kvm->arch.pgtable;
655 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
656 if (!pgd_present(*pgd))
658 pud = pud_offset(pgd, 0);
659 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++pud) {
660 if (!pud_present(*pud))
662 pmd = pmd_offset(pud, 0);
663 for (im = 0; im < PTRS_PER_PMD; ++im, ++pmd) {
664 if (pmd_is_leaf(*pmd)) {
668 if (!pmd_present(*pmd))
670 pte = pte_offset_map(pmd, 0);
671 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
672 kvmppc_pte_free(pte);
675 pmd_free(kvm->mm, pmd_offset(pud, 0));
678 pud_free(kvm->mm, pud_offset(pgd, 0));
681 pgd_free(kvm->mm, kvm->arch.pgtable);
682 kvm->arch.pgtable = NULL;
685 static void pte_ctor(void *addr)
687 memset(addr, 0, PTE_TABLE_SIZE);
690 int kvmppc_radix_init(void)
692 unsigned long size = sizeof(void *) << PTE_INDEX_SIZE;
694 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
700 void kvmppc_radix_exit(void)
702 kmem_cache_destroy(kvm_pte_cache);