1 // SPDX-License-Identifier: GPL-2.0-only
3 * arch/arm64/mm/hugetlbpage.c
5 * Copyright (C) 2013 Linaro Ltd.
7 * Based on arch/x86/mm/hugetlbpage.c.
10 #include <linux/init.h>
13 #include <linux/hugetlb.h>
14 #include <linux/pagemap.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
19 #include <asm/tlbflush.h>
22 * HugeTLB Support Matrix
24 * ---------------------------------------------------
25 * | Page Size | CONT PTE | PMD | CONT PMD | PUD |
26 * ---------------------------------------------------
27 * | 4K | 64K | 2M | 32M | 1G |
28 * | 16K | 2M | 32M | 1G | |
29 * | 64K | 2M | 512M | 16G | |
30 * ---------------------------------------------------
34 * Reserve CMA areas for the largest supported gigantic
35 * huge page when requested. Any other smaller gigantic
36 * huge pages could still be served from those areas.
39 void __init arm64_hugetlb_cma_reserve(void)
43 if (pud_sect_supported())
44 order = PUD_SHIFT - PAGE_SHIFT;
46 order = CONT_PMD_SHIFT - PAGE_SHIFT;
49 * HugeTLB CMA reservation is required for gigantic
50 * huge pages which could not be allocated via the
51 * page allocator. Just warn if there is any change
52 * breaking this assumption.
54 WARN_ON(order <= MAX_ORDER);
55 hugetlb_cma_reserve(order);
57 #endif /* CONFIG_CMA */
59 static bool __hugetlb_valid_size(unsigned long size)
62 #ifndef __PAGETABLE_PMD_FOLDED
64 return pud_sect_supported();
75 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
76 bool arch_hugetlb_migration_supported(struct hstate *h)
78 size_t pagesize = huge_page_size(h);
80 if (!__hugetlb_valid_size(pagesize)) {
81 pr_warn("%s: unrecognized huge page size 0x%lx\n",
89 int pmd_huge(pmd_t pmd)
91 return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
94 int pud_huge(pud_t pud)
96 #ifndef __PAGETABLE_PMD_FOLDED
97 return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT);
103 static int find_num_contig(struct mm_struct *mm, unsigned long addr,
104 pte_t *ptep, size_t *pgsize)
106 pgd_t *pgdp = pgd_offset(mm, addr);
112 p4dp = p4d_offset(pgdp, addr);
113 pudp = pud_offset(p4dp, addr);
114 pmdp = pmd_offset(pudp, addr);
115 if ((pte_t *)pmdp == ptep) {
122 static inline int num_contig_ptes(unsigned long size, size_t *pgsize)
129 #ifndef __PAGETABLE_PMD_FOLDED
131 if (pud_sect_supported())
140 contig_ptes = CONT_PMDS;
144 contig_ptes = CONT_PTES;
151 pte_t huge_ptep_get(pte_t *ptep)
155 pte_t orig_pte = ptep_get(ptep);
157 if (!pte_present(orig_pte) || !pte_cont(orig_pte))
160 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize);
161 for (i = 0; i < ncontig; i++, ptep++) {
162 pte_t pte = ptep_get(ptep);
165 orig_pte = pte_mkdirty(orig_pte);
168 orig_pte = pte_mkyoung(orig_pte);
174 * Changing some bits of contiguous entries requires us to follow a
175 * Break-Before-Make approach, breaking the whole contiguous set
176 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
177 * "Misprogramming of the Contiguous bit", page D4-1762.
179 * This helper performs the break step.
181 static pte_t get_clear_contig(struct mm_struct *mm,
184 unsigned long pgsize,
185 unsigned long ncontig)
187 pte_t orig_pte = ptep_get(ptep);
190 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
191 pte_t pte = ptep_get_and_clear(mm, addr, ptep);
194 * If HW_AFDBM is enabled, then the HW could turn on
195 * the dirty or accessed bit for any page in the set,
199 orig_pte = pte_mkdirty(orig_pte);
202 orig_pte = pte_mkyoung(orig_pte);
207 static pte_t get_clear_contig_flush(struct mm_struct *mm,
210 unsigned long pgsize,
211 unsigned long ncontig)
213 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig);
214 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
216 flush_tlb_range(&vma, addr, addr + (pgsize * ncontig));
221 * Changing some bits of contiguous entries requires us to follow a
222 * Break-Before-Make approach, breaking the whole contiguous set
223 * before we can change any entries. See ARM DDI 0487A.k_iss10775,
224 * "Misprogramming of the Contiguous bit", page D4-1762.
226 * This helper performs the break step for use cases where the
227 * original pte is not needed.
229 static void clear_flush(struct mm_struct *mm,
232 unsigned long pgsize,
233 unsigned long ncontig)
235 struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
236 unsigned long i, saddr = addr;
238 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
239 ptep_clear(mm, addr, ptep);
241 flush_tlb_range(&vma, saddr, addr);
244 static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry)
246 VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry));
248 return page_folio(pfn_to_page(swp_offset_pfn(entry)));
251 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
252 pte_t *ptep, pte_t pte)
257 unsigned long pfn, dpfn;
260 if (!pte_present(pte)) {
263 folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte));
264 ncontig = num_contig_ptes(folio_size(folio), &pgsize);
266 for (i = 0; i < ncontig; i++, ptep++)
267 set_pte_at(mm, addr, ptep, pte);
271 if (!pte_cont(pte)) {
272 set_pte_at(mm, addr, ptep, pte);
276 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
278 dpfn = pgsize >> PAGE_SHIFT;
279 hugeprot = pte_pgprot(pte);
281 clear_flush(mm, addr, ptep, pgsize, ncontig);
283 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
284 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
287 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
288 unsigned long addr, unsigned long sz)
296 pgdp = pgd_offset(mm, addr);
297 p4dp = p4d_offset(pgdp, addr);
298 pudp = pud_alloc(mm, p4dp, addr);
302 if (sz == PUD_SIZE) {
303 ptep = (pte_t *)pudp;
304 } else if (sz == (CONT_PTE_SIZE)) {
305 pmdp = pmd_alloc(mm, pudp, addr);
309 WARN_ON(addr & (sz - 1));
310 ptep = pte_alloc_huge(mm, pmdp, addr);
311 } else if (sz == PMD_SIZE) {
312 if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp)))
313 ptep = huge_pmd_share(mm, vma, addr, pudp);
315 ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
316 } else if (sz == (CONT_PMD_SIZE)) {
317 pmdp = pmd_alloc(mm, pudp, addr);
318 WARN_ON(addr & (sz - 1));
319 return (pte_t *)pmdp;
325 pte_t *huge_pte_offset(struct mm_struct *mm,
326 unsigned long addr, unsigned long sz)
333 pgdp = pgd_offset(mm, addr);
334 if (!pgd_present(READ_ONCE(*pgdp)))
337 p4dp = p4d_offset(pgdp, addr);
338 if (!p4d_present(READ_ONCE(*p4dp)))
341 pudp = pud_offset(p4dp, addr);
342 pud = READ_ONCE(*pudp);
343 if (sz != PUD_SIZE && pud_none(pud))
345 /* hugepage or swap? */
346 if (pud_huge(pud) || !pud_present(pud))
347 return (pte_t *)pudp;
348 /* table; check the next level */
350 if (sz == CONT_PMD_SIZE)
351 addr &= CONT_PMD_MASK;
353 pmdp = pmd_offset(pudp, addr);
354 pmd = READ_ONCE(*pmdp);
355 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
358 if (pmd_huge(pmd) || !pmd_present(pmd))
359 return (pte_t *)pmdp;
361 if (sz == CONT_PTE_SIZE)
362 return pte_offset_huge(pmdp, (addr & CONT_PTE_MASK));
367 unsigned long hugetlb_mask_last_page(struct hstate *h)
369 unsigned long hp_size = huge_page_size(h);
372 #ifndef __PAGETABLE_PMD_FOLDED
374 return PGDIR_SIZE - PUD_SIZE;
377 return PUD_SIZE - CONT_PMD_SIZE;
379 return PUD_SIZE - PMD_SIZE;
381 return PMD_SIZE - CONT_PTE_SIZE;
389 pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
391 size_t pagesize = 1UL << shift;
393 entry = pte_mkhuge(entry);
394 if (pagesize == CONT_PTE_SIZE) {
395 entry = pte_mkcont(entry);
396 } else if (pagesize == CONT_PMD_SIZE) {
397 entry = pmd_pte(pmd_mkcont(pte_pmd(entry)));
398 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) {
399 pr_warn("%s: unrecognized huge page size 0x%lx\n",
405 void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
406 pte_t *ptep, unsigned long sz)
411 ncontig = num_contig_ptes(sz, &pgsize);
413 for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
414 pte_clear(mm, addr, ptep);
417 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
418 unsigned long addr, pte_t *ptep)
422 pte_t orig_pte = ptep_get(ptep);
424 if (!pte_cont(orig_pte))
425 return ptep_get_and_clear(mm, addr, ptep);
427 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
429 return get_clear_contig(mm, addr, ptep, pgsize, ncontig);
433 * huge_ptep_set_access_flags will update access flags (dirty, accesssed)
434 * and write permission.
436 * For a contiguous huge pte range we need to check whether or not write
437 * permission has to change only on the first pte in the set. Then for
438 * all the contiguous ptes we need to check whether or not there is a
439 * discrepancy between dirty or young.
441 static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
445 if (pte_write(pte) != pte_write(ptep_get(ptep)))
448 for (i = 0; i < ncontig; i++) {
449 pte_t orig_pte = ptep_get(ptep + i);
451 if (pte_dirty(pte) != pte_dirty(orig_pte))
454 if (pte_young(pte) != pte_young(orig_pte))
461 int huge_ptep_set_access_flags(struct vm_area_struct *vma,
462 unsigned long addr, pte_t *ptep,
463 pte_t pte, int dirty)
467 unsigned long pfn = pte_pfn(pte), dpfn;
468 struct mm_struct *mm = vma->vm_mm;
473 return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
475 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
476 dpfn = pgsize >> PAGE_SHIFT;
478 if (!__cont_access_flags_changed(ptep, pte, ncontig))
481 orig_pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
483 /* Make sure we don't lose the dirty or young state */
484 if (pte_dirty(orig_pte))
485 pte = pte_mkdirty(pte);
487 if (pte_young(orig_pte))
488 pte = pte_mkyoung(pte);
490 hugeprot = pte_pgprot(pte);
491 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
492 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
497 void huge_ptep_set_wrprotect(struct mm_struct *mm,
498 unsigned long addr, pte_t *ptep)
500 unsigned long pfn, dpfn;
506 if (!pte_cont(READ_ONCE(*ptep))) {
507 ptep_set_wrprotect(mm, addr, ptep);
511 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
512 dpfn = pgsize >> PAGE_SHIFT;
514 pte = get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
515 pte = pte_wrprotect(pte);
517 hugeprot = pte_pgprot(pte);
520 for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
521 set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
524 pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
525 unsigned long addr, pte_t *ptep)
527 struct mm_struct *mm = vma->vm_mm;
531 if (!pte_cont(READ_ONCE(*ptep)))
532 return ptep_clear_flush(vma, addr, ptep);
534 ncontig = find_num_contig(mm, addr, ptep, &pgsize);
535 return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig);
538 static int __init hugetlbpage_init(void)
540 if (pud_sect_supported())
541 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
543 hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT);
544 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
545 hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT);
549 arch_initcall(hugetlbpage_init);
551 bool __init arch_hugetlb_valid_size(unsigned long size)
553 return __hugetlb_valid_size(size);
556 pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
558 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
559 cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
561 * Break-before-make (BBM) is required for all user space mappings
562 * when the permission changes from executable to non-executable
563 * in cases where cpu is affected with errata #2645198.
565 if (pte_user_exec(READ_ONCE(*ptep)))
566 return huge_ptep_clear_flush(vma, addr, ptep);
568 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
571 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
572 pte_t old_pte, pte_t pte)
574 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);