1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
8 #include <linux/fs.h> /* only for vma_is_dax() */
10 extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *vma);
14 extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
27 extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
32 extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
33 struct vm_area_struct *vma,
34 pmd_t *pmd, unsigned long addr, unsigned long next);
35 extern int zap_huge_pmd(struct mmu_gather *tlb,
36 struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr);
38 extern int zap_huge_pud(struct mmu_gather *tlb,
39 struct vm_area_struct *vma,
40 pud_t *pud, unsigned long addr);
41 extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
42 unsigned long addr, unsigned long end,
44 extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
45 unsigned long new_addr, unsigned long old_end,
46 pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
47 extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
48 unsigned long addr, pgprot_t newprot,
50 vm_fault_t vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
51 pmd_t *pmd, pfn_t pfn, bool write);
52 vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
53 pud_t *pud, pfn_t pfn, bool write);
54 enum transparent_hugepage_flag {
55 TRANSPARENT_HUGEPAGE_FLAG,
56 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
57 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
58 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
59 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
60 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
61 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
62 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
63 #ifdef CONFIG_DEBUG_VM
64 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
69 struct kobj_attribute;
71 extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
72 struct kobj_attribute *attr,
73 const char *buf, size_t count,
74 enum transparent_hugepage_flag flag);
75 extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
76 struct kobj_attribute *attr, char *buf,
77 enum transparent_hugepage_flag flag);
78 extern struct kobj_attribute shmem_enabled_attr;
80 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
81 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
83 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
84 #define HPAGE_PMD_SHIFT PMD_SHIFT
85 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
86 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
88 #define HPAGE_PUD_SHIFT PUD_SHIFT
89 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
90 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
92 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
94 extern unsigned long transparent_hugepage_flags;
96 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
98 if (vma->vm_flags & VM_NOHUGEPAGE)
101 if (is_vma_temporary_stack(vma))
104 if (test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
107 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
113 if (transparent_hugepage_flags &
114 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
115 return !!(vma->vm_flags & VM_HUGEPAGE);
120 #define transparent_hugepage_use_zero_page() \
121 (transparent_hugepage_flags & \
122 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
123 #ifdef CONFIG_DEBUG_VM
124 #define transparent_hugepage_debug_cow() \
125 (transparent_hugepage_flags & \
126 (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
127 #else /* CONFIG_DEBUG_VM */
128 #define transparent_hugepage_debug_cow() 0
129 #endif /* CONFIG_DEBUG_VM */
131 extern unsigned long thp_get_unmapped_area(struct file *filp,
132 unsigned long addr, unsigned long len, unsigned long pgoff,
133 unsigned long flags);
135 extern void prep_transhuge_page(struct page *page);
136 extern void free_transhuge_page(struct page *page);
138 bool can_split_huge_page(struct page *page, int *pextra_pins);
139 int split_huge_page_to_list(struct page *page, struct list_head *list);
140 static inline int split_huge_page(struct page *page)
142 return split_huge_page_to_list(page, NULL);
144 void deferred_split_huge_page(struct page *page);
146 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
147 unsigned long address, bool freeze, struct page *page);
149 #define split_huge_pmd(__vma, __pmd, __address) \
151 pmd_t *____pmd = (__pmd); \
152 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
153 || pmd_devmap(*____pmd)) \
154 __split_huge_pmd(__vma, __pmd, __address, \
159 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
160 bool freeze, struct page *page);
162 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
163 unsigned long address);
165 #define split_huge_pud(__vma, __pud, __address) \
167 pud_t *____pud = (__pud); \
168 if (pud_trans_huge(*____pud) \
169 || pud_devmap(*____pud)) \
170 __split_huge_pud(__vma, __pud, __address); \
173 extern int hugepage_madvise(struct vm_area_struct *vma,
174 unsigned long *vm_flags, int advice);
175 extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
179 extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
180 struct vm_area_struct *vma);
181 extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
182 struct vm_area_struct *vma);
184 static inline int is_swap_pmd(pmd_t pmd)
186 return !pmd_none(pmd) && !pmd_present(pmd);
189 /* mmap_sem must be held on entry */
190 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
191 struct vm_area_struct *vma)
193 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
194 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
195 return __pmd_trans_huge_lock(pmd, vma);
199 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
200 struct vm_area_struct *vma)
202 VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
203 if (pud_trans_huge(*pud) || pud_devmap(*pud))
204 return __pud_trans_huge_lock(pud, vma);
208 static inline int hpage_nr_pages(struct page *page)
210 if (unlikely(PageTransHuge(page)))
215 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
216 pmd_t *pmd, int flags);
217 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
218 pud_t *pud, int flags);
220 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
222 extern struct page *huge_zero_page;
224 static inline bool is_huge_zero_page(struct page *page)
226 return READ_ONCE(huge_zero_page) == page;
229 static inline bool is_huge_zero_pmd(pmd_t pmd)
231 return is_huge_zero_page(pmd_page(pmd));
234 static inline bool is_huge_zero_pud(pud_t pud)
239 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
240 void mm_put_huge_zero_page(struct mm_struct *mm);
242 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
244 static inline bool thp_migration_supported(void)
246 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
249 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
250 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
251 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
252 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
254 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
255 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
256 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
258 #define hpage_nr_pages(x) 1
260 static inline bool transparent_hugepage_enabled(struct vm_area_struct *vma)
265 static inline void prep_transhuge_page(struct page *page) {}
267 #define transparent_hugepage_flags 0UL
269 #define thp_get_unmapped_area NULL
272 can_split_huge_page(struct page *page, int *pextra_pins)
278 split_huge_page_to_list(struct page *page, struct list_head *list)
282 static inline int split_huge_page(struct page *page)
286 static inline void deferred_split_huge_page(struct page *page) {}
287 #define split_huge_pmd(__vma, __pmd, __address) \
290 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
291 unsigned long address, bool freeze, struct page *page) {}
292 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
293 unsigned long address, bool freeze, struct page *page) {}
295 #define split_huge_pud(__vma, __pmd, __address) \
298 static inline int hugepage_madvise(struct vm_area_struct *vma,
299 unsigned long *vm_flags, int advice)
304 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
310 static inline int is_swap_pmd(pmd_t pmd)
314 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
315 struct vm_area_struct *vma)
319 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
320 struct vm_area_struct *vma)
325 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
331 static inline bool is_huge_zero_page(struct page *page)
336 static inline bool is_huge_zero_pud(pud_t pud)
341 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
346 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
347 unsigned long addr, pmd_t *pmd, int flags)
352 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
353 unsigned long addr, pud_t *pud, int flags)
358 static inline bool thp_migration_supported(void)
362 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
364 #endif /* _LINUX_HUGE_MM_H */