1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGETLB_H
3 #define _LINUX_HUGETLB_H
6 #include <linux/mm_types.h>
7 #include <linux/mmdebug.h>
9 #include <linux/hugetlb_inline.h>
10 #include <linux/cgroup.h>
11 #include <linux/page_ref.h>
12 #include <linux/list.h>
13 #include <linux/kref.h>
14 #include <linux/pgtable.h>
15 #include <linux/gfp.h>
16 #include <linux/userfaultfd_k.h>
23 #ifndef CONFIG_ARCH_HAS_HUGEPD
24 typedef struct { unsigned long pd; } hugepd_t;
25 #define is_hugepd(hugepd) (0)
26 #define __hugepd(x) ((hugepd_t) { (x) })
29 #ifdef CONFIG_HUGETLB_PAGE
31 #include <linux/mempolicy.h>
32 #include <linux/shm.h>
33 #include <asm/tlbflush.h>
36 * For HugeTLB page, there are more metadata to save in the struct page. But
37 * the head struct page cannot meet our needs, so we have to abuse other tail
38 * struct page to store the metadata.
40 #define __NR_USED_SUBPAGE 3
42 struct hugepage_subpool {
45 long max_hpages; /* Maximum huge pages or -1 if no maximum. */
46 long used_hpages; /* Used count against maximum, includes */
47 /* both allocated and reserved pages. */
48 struct hstate *hstate;
49 long min_hpages; /* Minimum huge pages or -1 if no minimum. */
50 long rsv_hpages; /* Pages reserved against global pool to */
51 /* satisfy minimum size. */
57 struct list_head regions;
58 long adds_in_progress;
59 struct list_head region_cache;
60 long region_cache_count;
61 #ifdef CONFIG_CGROUP_HUGETLB
63 * On private mappings, the counter to uncharge reservations is stored
64 * here. If these fields are 0, then either the mapping is shared, or
65 * cgroup accounting is disabled for this resv_map.
67 struct page_counter *reservation_counter;
68 unsigned long pages_per_hpage;
69 struct cgroup_subsys_state *css;
74 * Region tracking -- allows tracking of reservations and instantiated pages
75 * across the pages in a mapping.
77 * The region data structures are embedded into a resv_map and protected
78 * by a resv_map's lock. The set of regions within the resv_map represent
79 * reservations for huge pages, or huge pages that have already been
80 * instantiated within the map. The from and to elements are huge page
81 * indices into the associated mapping. from indicates the starting index
82 * of the region. to represents the first index past the end of the region.
84 * For example, a file region structure with from == 0 and to == 4 represents
85 * four huge pages in a mapping. It is important to note that the to element
86 * represents the first element past the end of the region. This is used in
87 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
89 * Interval notation of the form [from, to) will be used to indicate that
90 * the endpoint from is inclusive and to is exclusive.
93 struct list_head link;
96 #ifdef CONFIG_CGROUP_HUGETLB
98 * On shared mappings, each reserved region appears as a struct
99 * file_region in resv_map. These fields hold the info needed to
100 * uncharge each reservation.
102 struct page_counter *reservation_counter;
103 struct cgroup_subsys_state *css;
107 struct hugetlb_vma_lock {
109 struct rw_semaphore rw_sema;
110 struct vm_area_struct *vma;
113 extern struct resv_map *resv_map_alloc(void);
114 void resv_map_release(struct kref *ref);
116 extern spinlock_t hugetlb_lock;
117 extern int hugetlb_max_hstate __read_mostly;
118 #define for_each_hstate(h) \
119 for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
121 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
123 void hugepage_put_subpool(struct hugepage_subpool *spool);
125 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
126 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
127 int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
128 int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
130 int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
132 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
135 int move_hugetlb_page_tables(struct vm_area_struct *vma,
136 struct vm_area_struct *new_vma,
137 unsigned long old_addr, unsigned long new_addr,
139 int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
140 struct vm_area_struct *, struct vm_area_struct *);
141 struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
142 unsigned long address, unsigned int flags);
143 long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
144 struct page **, struct vm_area_struct **,
145 unsigned long *, unsigned long *, long, unsigned int,
147 void unmap_hugepage_range(struct vm_area_struct *,
148 unsigned long, unsigned long, struct page *,
150 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
151 struct vm_area_struct *vma,
152 unsigned long start, unsigned long end,
153 struct page *ref_page, zap_flags_t zap_flags);
154 void hugetlb_report_meminfo(struct seq_file *);
155 int hugetlb_report_node_meminfo(char *buf, int len, int nid);
156 void hugetlb_show_meminfo_node(int nid);
157 unsigned long hugetlb_total_pages(void);
158 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
159 unsigned long address, unsigned int flags);
160 #ifdef CONFIG_USERFAULTFD
161 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
162 struct vm_area_struct *dst_vma,
163 unsigned long dst_addr,
164 unsigned long src_addr,
166 struct folio **foliop);
167 #endif /* CONFIG_USERFAULTFD */
168 bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
169 struct vm_area_struct *vma,
170 vm_flags_t vm_flags);
171 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
173 bool isolate_hugetlb(struct folio *folio, struct list_head *list);
174 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
175 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
176 bool *migratable_cleared);
177 void folio_putback_active_hugetlb(struct folio *folio);
178 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
179 void free_huge_page(struct page *page);
180 void hugetlb_fix_reserve_counts(struct inode *inode);
181 extern struct mutex *hugetlb_fault_mutex_table;
182 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
184 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
185 unsigned long addr, pud_t *pud);
187 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
189 extern int sysctl_hugetlb_shm_group;
190 extern struct list_head huge_boot_pages;
194 #ifndef CONFIG_HIGHPTE
196 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
197 * which may go down to the lowest PTE level in their huge_pte_offset() and
198 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
200 static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
202 return pte_offset_kernel(pmd, address);
204 static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
205 unsigned long address)
207 return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
211 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
212 unsigned long addr, unsigned long sz);
214 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
215 * Returns the pte_t* if found, or NULL if the address is not mapped.
217 * IMPORTANT: we should normally not directly call this function, instead
218 * this is only a common interface to implement arch-specific
219 * walker. Please use hugetlb_walk() instead, because that will attempt to
220 * verify the locking for you.
222 * Since this function will walk all the pgtable pages (including not only
223 * high-level pgtable page, but also PUD entry that can be unshared
224 * concurrently for VM_SHARED), the caller of this function should be
225 * responsible of its thread safety. One can follow this rule:
227 * (1) For private mappings: pmd unsharing is not possible, so holding the
228 * mmap_lock for either read or write is sufficient. Most callers
229 * already hold the mmap_lock, so normally, no special action is
232 * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
233 * pgtable page can go away from under us! It can be done by a pmd
234 * unshare with a follow up munmap() on the other process), then we
237 * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
238 * won't happen upon the range (it also makes sure the pte_t we
239 * read is the right and stable one), or,
241 * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
242 * sure even if unshare happened the racy unmap() will wait until
243 * i_mmap_rwsem is released.
245 * Option (2.1) is the safest, which guarantees pte stability from pmd
246 * sharing pov, until the vma lock released. Option (2.2) doesn't protect
247 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
250 pte_t *huge_pte_offset(struct mm_struct *mm,
251 unsigned long addr, unsigned long sz);
252 unsigned long hugetlb_mask_last_page(struct hstate *h);
253 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
254 unsigned long addr, pte_t *ptep);
255 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
256 unsigned long *start, unsigned long *end);
258 void hugetlb_vma_lock_read(struct vm_area_struct *vma);
259 void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
260 void hugetlb_vma_lock_write(struct vm_area_struct *vma);
261 void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
262 int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
263 void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
264 void hugetlb_vma_lock_release(struct kref *kref);
266 int pmd_huge(pmd_t pmd);
267 int pud_huge(pud_t pud);
268 long hugetlb_change_protection(struct vm_area_struct *vma,
269 unsigned long address, unsigned long end, pgprot_t newprot,
270 unsigned long cp_flags);
272 bool is_hugetlb_entry_migration(pte_t pte);
273 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
275 #else /* !CONFIG_HUGETLB_PAGE */
277 static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
281 static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
285 static inline unsigned long hugetlb_total_pages(void)
290 static inline struct address_space *hugetlb_page_mapping_lock_write(
296 static inline int huge_pmd_unshare(struct mm_struct *mm,
297 struct vm_area_struct *vma,
298 unsigned long addr, pte_t *ptep)
303 static inline void adjust_range_if_pmd_sharing_possible(
304 struct vm_area_struct *vma,
305 unsigned long *start, unsigned long *end)
309 static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
310 unsigned long address, unsigned int flags)
312 BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
315 static inline long follow_hugetlb_page(struct mm_struct *mm,
316 struct vm_area_struct *vma, struct page **pages,
317 struct vm_area_struct **vmas, unsigned long *position,
318 unsigned long *nr_pages, long i, unsigned int flags,
325 static inline int copy_hugetlb_page_range(struct mm_struct *dst,
326 struct mm_struct *src,
327 struct vm_area_struct *dst_vma,
328 struct vm_area_struct *src_vma)
334 static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
335 struct vm_area_struct *new_vma,
336 unsigned long old_addr,
337 unsigned long new_addr,
344 static inline void hugetlb_report_meminfo(struct seq_file *m)
348 static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
353 static inline void hugetlb_show_meminfo_node(int nid)
357 static inline int prepare_hugepage_range(struct file *file,
358 unsigned long addr, unsigned long len)
363 static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
367 static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
371 static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
375 static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
379 static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
384 static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
388 static inline int pmd_huge(pmd_t pmd)
393 static inline int pud_huge(pud_t pud)
398 static inline int is_hugepage_only_range(struct mm_struct *mm,
399 unsigned long addr, unsigned long len)
404 static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
405 unsigned long addr, unsigned long end,
406 unsigned long floor, unsigned long ceiling)
411 #ifdef CONFIG_USERFAULTFD
412 static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
413 struct vm_area_struct *dst_vma,
414 unsigned long dst_addr,
415 unsigned long src_addr,
417 struct folio **foliop)
422 #endif /* CONFIG_USERFAULTFD */
424 static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
430 static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
435 static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
440 static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
441 bool *migratable_cleared)
446 static inline void folio_putback_active_hugetlb(struct folio *folio)
450 static inline void move_hugetlb_state(struct folio *old_folio,
451 struct folio *new_folio, int reason)
455 static inline long hugetlb_change_protection(
456 struct vm_area_struct *vma, unsigned long address,
457 unsigned long end, pgprot_t newprot,
458 unsigned long cp_flags)
463 static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
464 struct vm_area_struct *vma, unsigned long start,
465 unsigned long end, struct page *ref_page,
466 zap_flags_t zap_flags)
471 static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
472 struct vm_area_struct *vma, unsigned long address,
479 static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
481 #endif /* !CONFIG_HUGETLB_PAGE */
483 * hugepages at page global directory. If arch support
484 * hugepages at pgd level, they need to define this.
487 #define pgd_huge(x) 0
490 #define p4d_huge(x) 0
494 static inline int pgd_write(pgd_t pgd)
501 #define HUGETLB_ANON_FILE "anon_hugepage"
505 * The file will be used as an shm file so shmfs accounting rules
508 HUGETLB_SHMFS_INODE = 1,
510 * The file is being created on the internal vfs mount and shmfs
511 * accounting rules do not apply
513 HUGETLB_ANONHUGE_INODE = 2,
516 #ifdef CONFIG_HUGETLBFS
517 struct hugetlbfs_sb_info {
518 long max_inodes; /* inodes allowed */
519 long free_inodes; /* inodes free */
520 spinlock_t stat_lock;
521 struct hstate *hstate;
522 struct hugepage_subpool *spool;
528 static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
530 return sb->s_fs_info;
533 struct hugetlbfs_inode_info {
534 struct shared_policy policy;
535 struct inode vfs_inode;
539 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
541 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
544 extern const struct file_operations hugetlbfs_file_operations;
545 extern const struct vm_operations_struct hugetlb_vm_ops;
546 struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
547 int creat_flags, int page_size_log);
549 static inline bool is_file_hugepages(struct file *file)
551 if (file->f_op == &hugetlbfs_file_operations)
554 return is_file_shm_hugepages(file);
557 static inline struct hstate *hstate_inode(struct inode *i)
559 return HUGETLBFS_SB(i->i_sb)->hstate;
561 #else /* !CONFIG_HUGETLBFS */
563 #define is_file_hugepages(file) false
564 static inline struct file *
565 hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
566 int creat_flags, int page_size_log)
568 return ERR_PTR(-ENOSYS);
571 static inline struct hstate *hstate_inode(struct inode *i)
575 #endif /* !CONFIG_HUGETLBFS */
577 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
578 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
579 unsigned long len, unsigned long pgoff,
580 unsigned long flags);
581 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
584 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
585 unsigned long len, unsigned long pgoff,
586 unsigned long flags);
589 * huegtlb page specific state flags. These flags are located in page.private
590 * of the hugetlb head page. Functions created via the below macros should be
591 * used to manipulate these flags.
593 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
594 * allocation time. Cleared when page is fully instantiated. Free
595 * routine checks flag to restore a reservation on error paths.
596 * Synchronization: Examined or modified by code that knows it has
597 * the only reference to page. i.e. After allocation but before use
598 * or when the page is being freed.
599 * HPG_migratable - Set after a newly allocated page is added to the page
600 * cache and/or page tables. Indicates the page is a candidate for
602 * Synchronization: Initially set after new page allocation with no
603 * locking. When examined and modified during migration processing
604 * (isolate, migrate, putback) the hugetlb_lock is held.
605 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
606 * allocator. Typically used for migration target pages when no pages
607 * are available in the pool. The hugetlb free page path will
608 * immediately free pages with this flag set to the buddy allocator.
609 * Synchronization: Can be set after huge page allocation from buddy when
610 * code knows it has only reference. All other examinations and
611 * modifications require hugetlb_lock.
612 * HPG_freed - Set when page is on the free lists.
613 * Synchronization: hugetlb_lock held for examination and modification.
614 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
615 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
616 * that is not tracked by raw_hwp_page list.
618 enum hugetlb_page_flags {
619 HPG_restore_reserve = 0,
623 HPG_vmemmap_optimized,
624 HPG_raw_hwp_unreliable,
629 * Macros to create test, set and clear function definitions for
630 * hugetlb specific page flags.
632 #ifdef CONFIG_HUGETLB_PAGE
633 #define TESTHPAGEFLAG(uname, flname) \
634 static __always_inline \
635 bool folio_test_hugetlb_##flname(struct folio *folio) \
636 { void *private = &folio->private; \
637 return test_bit(HPG_##flname, private); \
639 static inline int HPage##uname(struct page *page) \
640 { return test_bit(HPG_##flname, &(page->private)); }
642 #define SETHPAGEFLAG(uname, flname) \
643 static __always_inline \
644 void folio_set_hugetlb_##flname(struct folio *folio) \
645 { void *private = &folio->private; \
646 set_bit(HPG_##flname, private); \
648 static inline void SetHPage##uname(struct page *page) \
649 { set_bit(HPG_##flname, &(page->private)); }
651 #define CLEARHPAGEFLAG(uname, flname) \
652 static __always_inline \
653 void folio_clear_hugetlb_##flname(struct folio *folio) \
654 { void *private = &folio->private; \
655 clear_bit(HPG_##flname, private); \
657 static inline void ClearHPage##uname(struct page *page) \
658 { clear_bit(HPG_##flname, &(page->private)); }
660 #define TESTHPAGEFLAG(uname, flname) \
662 folio_test_hugetlb_##flname(struct folio *folio) \
664 static inline int HPage##uname(struct page *page) \
667 #define SETHPAGEFLAG(uname, flname) \
669 folio_set_hugetlb_##flname(struct folio *folio) \
671 static inline void SetHPage##uname(struct page *page) \
674 #define CLEARHPAGEFLAG(uname, flname) \
676 folio_clear_hugetlb_##flname(struct folio *folio) \
678 static inline void ClearHPage##uname(struct page *page) \
682 #define HPAGEFLAG(uname, flname) \
683 TESTHPAGEFLAG(uname, flname) \
684 SETHPAGEFLAG(uname, flname) \
685 CLEARHPAGEFLAG(uname, flname) \
688 * Create functions associated with hugetlb page flags
690 HPAGEFLAG(RestoreReserve, restore_reserve)
691 HPAGEFLAG(Migratable, migratable)
692 HPAGEFLAG(Temporary, temporary)
693 HPAGEFLAG(Freed, freed)
694 HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
695 HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
697 #ifdef CONFIG_HUGETLB_PAGE
699 #define HSTATE_NAME_LEN 32
700 /* Defines one hugetlb page size */
702 struct mutex resize_lock;
703 int next_nid_to_alloc;
704 int next_nid_to_free;
706 unsigned int demote_order;
708 unsigned long max_huge_pages;
709 unsigned long nr_huge_pages;
710 unsigned long free_huge_pages;
711 unsigned long resv_huge_pages;
712 unsigned long surplus_huge_pages;
713 unsigned long nr_overcommit_huge_pages;
714 struct list_head hugepage_activelist;
715 struct list_head hugepage_freelists[MAX_NUMNODES];
716 unsigned int max_huge_pages_node[MAX_NUMNODES];
717 unsigned int nr_huge_pages_node[MAX_NUMNODES];
718 unsigned int free_huge_pages_node[MAX_NUMNODES];
719 unsigned int surplus_huge_pages_node[MAX_NUMNODES];
720 #ifdef CONFIG_CGROUP_HUGETLB
721 /* cgroup control files */
722 struct cftype cgroup_files_dfl[8];
723 struct cftype cgroup_files_legacy[10];
725 char name[HSTATE_NAME_LEN];
728 struct huge_bootmem_page {
729 struct list_head list;
730 struct hstate *hstate;
733 int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
734 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
735 unsigned long addr, int avoid_reserve);
736 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
737 nodemask_t *nmask, gfp_t gfp_mask);
738 struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
739 unsigned long address);
740 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
742 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
743 unsigned long address, struct folio *folio);
746 int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
747 int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
748 bool __init hugetlb_node_alloc_supported(void);
750 void __init hugetlb_add_hstate(unsigned order);
751 bool __init arch_hugetlb_valid_size(unsigned long size);
752 struct hstate *size_to_hstate(unsigned long size);
754 #ifndef HUGE_MAX_HSTATE
755 #define HUGE_MAX_HSTATE 1
758 extern struct hstate hstates[HUGE_MAX_HSTATE];
759 extern unsigned int default_hstate_idx;
761 #define default_hstate (hstates[default_hstate_idx])
763 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
765 return folio->_hugetlb_subpool;
769 * hugetlb page subpool pointer located in hpage[2].hugetlb_subpool
771 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
773 return hugetlb_folio_subpool(page_folio(hpage));
776 static inline void hugetlb_set_folio_subpool(struct folio *folio,
777 struct hugepage_subpool *subpool)
779 folio->_hugetlb_subpool = subpool;
782 static inline void hugetlb_set_page_subpool(struct page *hpage,
783 struct hugepage_subpool *subpool)
785 hugetlb_set_folio_subpool(page_folio(hpage), subpool);
788 static inline struct hstate *hstate_file(struct file *f)
790 return hstate_inode(file_inode(f));
793 static inline struct hstate *hstate_sizelog(int page_size_log)
796 return &default_hstate;
798 if (page_size_log < BITS_PER_LONG)
799 return size_to_hstate(1UL << page_size_log);
804 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
806 return hstate_file(vma->vm_file);
809 static inline unsigned long huge_page_size(const struct hstate *h)
811 return (unsigned long)PAGE_SIZE << h->order;
814 extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
816 extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
818 static inline unsigned long huge_page_mask(struct hstate *h)
823 static inline unsigned int huge_page_order(struct hstate *h)
828 static inline unsigned huge_page_shift(struct hstate *h)
830 return h->order + PAGE_SHIFT;
833 static inline bool hstate_is_gigantic(struct hstate *h)
835 return huge_page_order(h) > MAX_ORDER;
838 static inline unsigned int pages_per_huge_page(const struct hstate *h)
840 return 1 << h->order;
843 static inline unsigned int blocks_per_huge_page(struct hstate *h)
845 return huge_page_size(h) / 512;
848 #include <asm/hugetlb.h>
850 #ifndef is_hugepage_only_range
851 static inline int is_hugepage_only_range(struct mm_struct *mm,
852 unsigned long addr, unsigned long len)
856 #define is_hugepage_only_range is_hugepage_only_range
859 #ifndef arch_clear_hugepage_flags
860 static inline void arch_clear_hugepage_flags(struct page *page) { }
861 #define arch_clear_hugepage_flags arch_clear_hugepage_flags
864 #ifndef arch_make_huge_pte
865 static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
868 return pte_mkhuge(entry);
872 static inline struct hstate *folio_hstate(struct folio *folio)
874 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
875 return size_to_hstate(folio_size(folio));
878 static inline struct hstate *page_hstate(struct page *page)
880 return folio_hstate(page_folio(page));
883 static inline unsigned hstate_index_to_shift(unsigned index)
885 return hstates[index].order + PAGE_SHIFT;
888 static inline int hstate_index(struct hstate *h)
893 extern int dissolve_free_huge_page(struct page *page);
894 extern int dissolve_free_huge_pages(unsigned long start_pfn,
895 unsigned long end_pfn);
897 #ifdef CONFIG_MEMORY_FAILURE
898 extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
900 static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
905 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
906 #ifndef arch_hugetlb_migration_supported
907 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
909 if ((huge_page_shift(h) == PMD_SHIFT) ||
910 (huge_page_shift(h) == PUD_SHIFT) ||
911 (huge_page_shift(h) == PGDIR_SHIFT))
918 static inline bool arch_hugetlb_migration_supported(struct hstate *h)
924 static inline bool hugepage_migration_supported(struct hstate *h)
926 return arch_hugetlb_migration_supported(h);
930 * Movability check is different as compared to migration check.
931 * It determines whether or not a huge page should be placed on
932 * movable zone or not. Movability of any huge page should be
933 * required only if huge page size is supported for migration.
934 * There won't be any reason for the huge page to be movable if
935 * it is not migratable to start with. Also the size of the huge
936 * page should be large enough to be placed under a movable zone
937 * and still feasible enough to be migratable. Just the presence
938 * in movable zone does not make the migration feasible.
940 * So even though large huge page sizes like the gigantic ones
941 * are migratable they should not be movable because its not
942 * feasible to migrate them from movable zone.
944 static inline bool hugepage_movable_supported(struct hstate *h)
946 if (!hugepage_migration_supported(h))
949 if (hstate_is_gigantic(h))
954 /* Movability of hugepages depends on migration support. */
955 static inline gfp_t htlb_alloc_mask(struct hstate *h)
957 if (hugepage_movable_supported(h))
958 return GFP_HIGHUSER_MOVABLE;
963 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
965 gfp_t modified_mask = htlb_alloc_mask(h);
967 /* Some callers might want to enforce node */
968 modified_mask |= (gfp_mask & __GFP_THISNODE);
970 modified_mask |= (gfp_mask & __GFP_NOWARN);
972 return modified_mask;
975 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
976 struct mm_struct *mm, pte_t *pte)
978 if (huge_page_size(h) == PMD_SIZE)
979 return pmd_lockptr(mm, (pmd_t *) pte);
980 VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
981 return &mm->page_table_lock;
984 #ifndef hugepages_supported
986 * Some platform decide whether they support huge pages at boot
987 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
988 * when there is no such support
990 #define hugepages_supported() (HPAGE_SHIFT != 0)
993 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
995 static inline void hugetlb_count_init(struct mm_struct *mm)
997 atomic_long_set(&mm->hugetlb_usage, 0);
1000 static inline void hugetlb_count_add(long l, struct mm_struct *mm)
1002 atomic_long_add(l, &mm->hugetlb_usage);
1005 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1007 atomic_long_sub(l, &mm->hugetlb_usage);
1010 #ifndef huge_ptep_modify_prot_start
1011 #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1012 static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1013 unsigned long addr, pte_t *ptep)
1015 return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1019 #ifndef huge_ptep_modify_prot_commit
1020 #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1021 static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1022 unsigned long addr, pte_t *ptep,
1023 pte_t old_pte, pte_t pte)
1025 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
1030 void hugetlb_register_node(struct node *node);
1031 void hugetlb_unregister_node(struct node *node);
1034 #else /* CONFIG_HUGETLB_PAGE */
1037 static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1042 static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
1047 static inline int isolate_or_dissolve_huge_page(struct page *page,
1048 struct list_head *list)
1053 static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1060 static inline struct folio *
1061 alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1062 nodemask_t *nmask, gfp_t gfp_mask)
1067 static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
1068 struct vm_area_struct *vma,
1069 unsigned long address)
1074 static inline int __alloc_bootmem_huge_page(struct hstate *h)
1079 static inline struct hstate *hstate_file(struct file *f)
1084 static inline struct hstate *hstate_sizelog(int page_size_log)
1089 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1094 static inline struct hstate *folio_hstate(struct folio *folio)
1099 static inline struct hstate *page_hstate(struct page *page)
1104 static inline struct hstate *size_to_hstate(unsigned long size)
1109 static inline unsigned long huge_page_size(struct hstate *h)
1114 static inline unsigned long huge_page_mask(struct hstate *h)
1119 static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1124 static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1129 static inline unsigned int huge_page_order(struct hstate *h)
1134 static inline unsigned int huge_page_shift(struct hstate *h)
1139 static inline bool hstate_is_gigantic(struct hstate *h)
1144 static inline unsigned int pages_per_huge_page(struct hstate *h)
1149 static inline unsigned hstate_index_to_shift(unsigned index)
1154 static inline int hstate_index(struct hstate *h)
1159 static inline int dissolve_free_huge_page(struct page *page)
1164 static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1165 unsigned long end_pfn)
1170 static inline bool hugepage_migration_supported(struct hstate *h)
1175 static inline bool hugepage_movable_supported(struct hstate *h)
1180 static inline gfp_t htlb_alloc_mask(struct hstate *h)
1185 static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1190 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1191 struct mm_struct *mm, pte_t *pte)
1193 return &mm->page_table_lock;
1196 static inline void hugetlb_count_init(struct mm_struct *mm)
1200 static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1204 static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1208 static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1209 unsigned long addr, pte_t *ptep)
1214 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1215 pte_t *ptep, pte_t pte)
1219 static inline void hugetlb_register_node(struct node *node)
1223 static inline void hugetlb_unregister_node(struct node *node)
1226 #endif /* CONFIG_HUGETLB_PAGE */
1228 static inline spinlock_t *huge_pte_lock(struct hstate *h,
1229 struct mm_struct *mm, pte_t *pte)
1233 ptl = huge_pte_lockptr(h, mm, pte);
1238 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1239 extern void __init hugetlb_cma_reserve(int order);
1241 static inline __init void hugetlb_cma_reserve(int order)
1246 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1247 static inline bool hugetlb_pmd_shared(pte_t *pte)
1249 return page_count(virt_to_page(pte)) > 1;
1252 static inline bool hugetlb_pmd_shared(pte_t *pte)
1258 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1260 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1262 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1265 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
1268 static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1270 return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1274 * Safe version of huge_pte_offset() to check the locks. See comments
1275 * above huge_pte_offset().
1277 static inline pte_t *
1278 hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1280 #if defined(CONFIG_HUGETLB_PAGE) && \
1281 defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1285 * If pmd sharing possible, locking needed to safely walk the
1286 * hugetlb pgtables. More information can be found at the comment
1287 * above huge_pte_offset() in the same file.
1289 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1291 if (__vma_shareable_lock(vma))
1292 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1294 &vma->vm_file->f_mapping->i_mmap_rwsem));
1296 return huge_pte_offset(vma->vm_mm, addr, sz);
1299 #endif /* _LINUX_HUGETLB_H */