2 * PPC Huge TLB Page Support for Kernel.
4 * Copyright (C) 2003 David Gibson, IBM Corporation.
5 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
7 * Based on the IA-32 version:
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
13 #include <linux/slab.h>
14 #include <linux/hugetlb.h>
15 #include <linux/export.h>
16 #include <linux/of_fdt.h>
17 #include <linux/memblock.h>
18 #include <linux/moduleparam.h>
19 #include <linux/swap.h>
20 #include <linux/swapops.h>
21 #include <linux/kmemleak.h>
22 #include <asm/pgalloc.h>
24 #include <asm/setup.h>
25 #include <asm/hugetlb.h>
26 #include <asm/pte-walk.h>
28 bool hugetlb_disabled = false;
30 #define hugepd_none(hpd) (hpd_val(hpd) == 0)
32 #define PTE_T_ORDER (__builtin_ffs(sizeof(pte_basic_t)) - \
33 __builtin_ffs(sizeof(void *)))
35 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
38 * Only called for hugetlbfs pages, hence can ignore THP and the
41 return __find_linux_pte(mm->pgd, addr, NULL, NULL);
44 static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45 unsigned long address, unsigned int pdshift,
46 unsigned int pshift, spinlock_t *ptl)
48 struct kmem_cache *cachep;
53 if (pshift >= pdshift) {
54 cachep = PGT_CACHE(PTE_T_ORDER);
55 num_hugepd = 1 << (pshift - pdshift);
57 cachep = PGT_CACHE(pdshift - pshift);
62 WARN_ONCE(1, "No page table cache created for hugetlb tables");
66 new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
68 BUG_ON(pshift > HUGEPD_SHIFT_MASK);
69 BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
75 * Make sure other cpus find the hugepd set only after a
76 * properly initialized page table is visible to them.
77 * For more details look for comment in __pte_alloc().
83 * We have multiple higher-level entries that point to the same
84 * actual pte location. Fill in each as we go and backtrack on error.
85 * We need all of these so the DTLB pgtable walk code can find the
86 * right higher-level entry without knowing if it's a hugepage or not.
88 for (i = 0; i < num_hugepd; i++, hpdp++) {
89 if (unlikely(!hugepd_none(*hpdp)))
91 hugepd_populate(hpdp, new, pshift);
93 /* If we bailed from the for loop early, an error occurred, clean up */
95 for (i = i - 1 ; i >= 0; i--, hpdp--)
97 kmem_cache_free(cachep, new);
106 * At this point we do the placement change only for BOOK3S 64. This would
107 * possibly work on other subarchs.
109 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
115 hugepd_t *hpdp = NULL;
116 unsigned pshift = __ffs(sz);
117 unsigned pdshift = PGDIR_SHIFT;
121 pg = pgd_offset(mm, addr);
122 p4 = p4d_offset(pg, addr);
124 #ifdef CONFIG_PPC_BOOK3S_64
125 if (pshift == PGDIR_SHIFT)
128 else if (pshift > PUD_SHIFT) {
130 * We need to use hugepd table
132 ptl = &mm->page_table_lock;
133 hpdp = (hugepd_t *)p4;
136 pu = pud_alloc(mm, p4, addr);
139 if (pshift == PUD_SHIFT)
141 else if (pshift > PMD_SHIFT) {
142 ptl = pud_lockptr(mm, pu);
143 hpdp = (hugepd_t *)pu;
146 pm = pmd_alloc(mm, pu, addr);
149 if (pshift == PMD_SHIFT)
153 ptl = pmd_lockptr(mm, pm);
154 hpdp = (hugepd_t *)pm;
159 if (pshift >= PGDIR_SHIFT) {
160 ptl = &mm->page_table_lock;
161 hpdp = (hugepd_t *)p4;
164 pu = pud_alloc(mm, p4, addr);
167 if (pshift >= PUD_SHIFT) {
168 ptl = pud_lockptr(mm, pu);
169 hpdp = (hugepd_t *)pu;
172 pm = pmd_alloc(mm, pu, addr);
175 ptl = pmd_lockptr(mm, pm);
176 hpdp = (hugepd_t *)pm;
183 if (IS_ENABLED(CONFIG_PPC_8xx) && sz == SZ_512K)
184 return pte_alloc_map(mm, (pmd_t *)hpdp, addr);
186 BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
188 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
189 pdshift, pshift, ptl))
192 return hugepte_offset(*hpdp, addr, pdshift);
195 #ifdef CONFIG_PPC_BOOK3S_64
197 * Tracks gpages after the device tree is scanned and before the
198 * huge_boot_pages list is ready on pseries.
200 #define MAX_NUMBER_GPAGES 1024
201 __initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
202 __initdata static unsigned nr_gpages;
205 * Build list of addresses of gigantic pages. This function is used in early
206 * boot before the buddy allocator is setup.
208 void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
212 while (number_of_pages > 0) {
213 gpage_freearray[nr_gpages] = addr;
220 int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
222 struct huge_bootmem_page *m;
225 m = phys_to_virt(gpage_freearray[--nr_gpages]);
226 gpage_freearray[nr_gpages] = 0;
227 list_add(&m->list, &huge_boot_pages);
234 int __init alloc_bootmem_huge_page(struct hstate *h)
237 #ifdef CONFIG_PPC_BOOK3S_64
238 if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
239 return pseries_alloc_bootmem_huge_page(h);
241 return __alloc_bootmem_huge_page(h);
244 #ifndef CONFIG_PPC_BOOK3S_64
245 #define HUGEPD_FREELIST_SIZE \
246 ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
248 struct hugepd_freelist {
254 static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
256 static void hugepd_free_rcu_callback(struct rcu_head *head)
258 struct hugepd_freelist *batch =
259 container_of(head, struct hugepd_freelist, rcu);
262 for (i = 0; i < batch->index; i++)
263 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
265 free_page((unsigned long)batch);
268 static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
270 struct hugepd_freelist **batchp;
272 batchp = &get_cpu_var(hugepd_freelist_cur);
274 if (atomic_read(&tlb->mm->mm_users) < 2 ||
275 mm_is_thread_local(tlb->mm)) {
276 kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
277 put_cpu_var(hugepd_freelist_cur);
281 if (*batchp == NULL) {
282 *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
283 (*batchp)->index = 0;
286 (*batchp)->ptes[(*batchp)->index++] = hugepte;
287 if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
288 call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
291 put_cpu_var(hugepd_freelist_cur);
294 static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
297 static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
298 unsigned long start, unsigned long end,
299 unsigned long floor, unsigned long ceiling)
301 pte_t *hugepte = hugepd_page(*hpdp);
304 unsigned long pdmask = ~((1UL << pdshift) - 1);
305 unsigned int num_hugepd = 1;
306 unsigned int shift = hugepd_shift(*hpdp);
308 /* Note: On fsl the hpdp may be the first of several */
310 num_hugepd = 1 << (shift - pdshift);
320 if (end - 1 > ceiling - 1)
323 for (i = 0; i < num_hugepd; i++, hpdp++)
326 if (shift >= pdshift)
327 hugepd_free(tlb, hugepte);
329 pgtable_free_tlb(tlb, hugepte,
330 get_hugepd_cache_index(pdshift - shift));
333 static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr)
335 pgtable_t token = pmd_pgtable(*pmd);
338 pte_free_tlb(tlb, token, addr);
339 mm_dec_nr_ptes(tlb->mm);
342 static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
343 unsigned long addr, unsigned long end,
344 unsigned long floor, unsigned long ceiling)
354 pmd = pmd_offset(pud, addr);
355 next = pmd_addr_end(addr, end);
356 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
357 if (pmd_none_or_clear_bad(pmd))
361 * if it is not hugepd pointer, we should already find
364 WARN_ON(!IS_ENABLED(CONFIG_PPC_8xx));
366 hugetlb_free_pte_range(tlb, pmd, addr);
371 * Increment next by the size of the huge mapping since
372 * there may be more than one entry at this level for a
373 * single hugepage, but all of them point to
374 * the same kmem cache that holds the hugepte.
376 more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
380 free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
381 addr, next, floor, ceiling);
382 } while (addr = next, addr != end);
392 if (end - 1 > ceiling - 1)
395 pmd = pmd_offset(pud, start);
397 pmd_free_tlb(tlb, pmd, start);
398 mm_dec_nr_pmds(tlb->mm);
401 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
402 unsigned long addr, unsigned long end,
403 unsigned long floor, unsigned long ceiling)
411 pud = pud_offset(p4d, addr);
412 next = pud_addr_end(addr, end);
413 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
414 if (pud_none_or_clear_bad(pud))
416 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
421 * Increment next by the size of the huge mapping since
422 * there may be more than one entry at this level for a
423 * single hugepage, but all of them point to
424 * the same kmem cache that holds the hugepte.
426 more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
430 free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
431 addr, next, floor, ceiling);
433 } while (addr = next, addr != end);
439 ceiling &= PGDIR_MASK;
443 if (end - 1 > ceiling - 1)
446 pud = pud_offset(p4d, start);
448 pud_free_tlb(tlb, pud, start);
449 mm_dec_nr_puds(tlb->mm);
453 * This function frees user-level page tables of a process.
455 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
456 unsigned long addr, unsigned long end,
457 unsigned long floor, unsigned long ceiling)
464 * Because there are a number of different possible pagetable
465 * layouts for hugepage ranges, we limit knowledge of how
466 * things should be laid out to the allocation path
467 * (huge_pte_alloc(), above). Everything else works out the
468 * structure as it goes from information in the hugepd
469 * pointers. That means that we can't here use the
470 * optimization used in the normal page free_pgd_range(), of
471 * checking whether we're actually covering a large enough
472 * range to have to do anything at the top level of the walk
473 * instead of at the bottom.
475 * To make sense of this, you should probably go read the big
476 * block comment at the top of the normal free_pgd_range(),
481 next = pgd_addr_end(addr, end);
482 pgd = pgd_offset(tlb->mm, addr);
483 p4d = p4d_offset(pgd, addr);
484 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
485 if (p4d_none_or_clear_bad(p4d))
487 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
491 * Increment next by the size of the huge mapping since
492 * there may be more than one entry at the pgd level
493 * for a single hugepage, but all of them point to the
494 * same kmem cache that holds the hugepte.
496 more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
500 free_hugepd_range(tlb, (hugepd_t *)p4d, PGDIR_SHIFT,
501 addr, next, floor, ceiling);
503 } while (addr = next, addr != end);
506 struct page *follow_huge_pd(struct vm_area_struct *vma,
507 unsigned long address, hugepd_t hpd,
508 int flags, int pdshift)
512 struct page *page = NULL;
514 int shift = hugepd_shift(hpd);
515 struct mm_struct *mm = vma->vm_mm;
519 * hugepage directory entries are protected by mm->page_table_lock
520 * Use this instead of huge_pte_lockptr
522 ptl = &mm->page_table_lock;
525 ptep = hugepte_offset(hpd, address, pdshift);
526 if (pte_present(*ptep)) {
527 mask = (1UL << shift) - 1;
528 page = pte_page(*ptep);
529 page += ((address & mask) >> PAGE_SHIFT);
530 if (flags & FOLL_GET)
533 if (is_hugetlb_entry_migration(*ptep)) {
535 __migration_entry_wait(mm, ptep, ptl);
543 #ifdef CONFIG_PPC_MM_SLICES
544 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
545 unsigned long len, unsigned long pgoff,
548 struct hstate *hstate = hstate_file(file);
549 int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
551 #ifdef CONFIG_PPC_RADIX_MMU
553 return radix__hugetlb_get_unmapped_area(file, addr, len,
556 return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
560 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
562 /* With radix we don't use slice, so derive it from vma*/
563 if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
564 unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
566 return 1UL << mmu_psize_to_shift(psize);
568 return vma_kernel_pagesize(vma);
571 bool __init arch_hugetlb_valid_size(unsigned long size)
573 int shift = __ffs(size);
576 /* Check that it is a page size supported by the hardware and
577 * that it fits within pagetable and slice limits. */
578 if (size <= PAGE_SIZE || !is_power_of_2(size))
581 mmu_psize = check_and_get_huge_psize(shift);
585 BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
590 static int __init add_huge_page_size(unsigned long long size)
592 int shift = __ffs(size);
594 if (!arch_hugetlb_valid_size((unsigned long)size))
597 hugetlb_add_hstate(shift - PAGE_SHIFT);
601 static int __init hugetlbpage_init(void)
603 bool configured = false;
606 if (hugetlb_disabled) {
607 pr_info("HugeTLB support is disabled!\n");
611 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
612 !mmu_has_feature(MMU_FTR_16M_PAGE))
615 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
619 if (!mmu_psize_defs[psize].shift)
622 shift = mmu_psize_to_shift(psize);
624 #ifdef CONFIG_PPC_BOOK3S_64
625 if (shift > PGDIR_SHIFT)
627 else if (shift > PUD_SHIFT)
628 pdshift = PGDIR_SHIFT;
629 else if (shift > PMD_SHIFT)
634 if (shift < PUD_SHIFT)
636 else if (shift < PGDIR_SHIFT)
639 pdshift = PGDIR_SHIFT;
642 if (add_huge_page_size(1ULL << shift) < 0)
645 * if we have pdshift and shift value same, we don't
646 * use pgt cache for hugepd.
648 if (pdshift > shift) {
649 if (!IS_ENABLED(CONFIG_PPC_8xx))
650 pgtable_cache_add(pdshift - shift);
651 } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
652 IS_ENABLED(CONFIG_PPC_8xx)) {
653 pgtable_cache_add(PTE_T_ORDER);
660 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
661 hugetlbpage_init_default();
663 pr_info("Failed to initialize. Disabling HugeTLB");
668 arch_initcall(hugetlbpage_init);
670 void flush_dcache_icache_hugepage(struct page *page)
675 BUG_ON(!PageCompound(page));
677 for (i = 0; i < compound_nr(page); i++) {
678 if (!PageHighMem(page)) {
679 __flush_dcache_icache(page_address(page+i));
681 start = kmap_atomic(page+i);
682 __flush_dcache_icache(start);
683 kunmap_atomic(start);