mm: thp: kill transparent_hugepage_active()
[platform/kernel/linux-starfive.git] / include / linux / huge_mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
7
8 #include <linux/fs.h> /* only for vma_is_dax() */
9
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13                   struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16                   pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17                   struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29                                    unsigned long addr, pmd_t *pmd,
30                                    unsigned int flags);
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32                            pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34                  unsigned long addr);
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36                  unsigned long addr);
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38                    unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40                     pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41                     unsigned long cp_flags);
42 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
43                                    pgprot_t pgprot, bool write);
44
45 /**
46  * vmf_insert_pfn_pmd - insert a pmd size pfn
47  * @vmf: Structure describing the fault
48  * @pfn: pfn to insert
49  * @pgprot: page protection to use
50  * @write: whether it's a write fault
51  *
52  * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
53  *
54  * Return: vm_fault_t value.
55  */
56 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
57                                             bool write)
58 {
59         return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
60 }
61 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
62                                    pgprot_t pgprot, bool write);
63
64 /**
65  * vmf_insert_pfn_pud - insert a pud size pfn
66  * @vmf: Structure describing the fault
67  * @pfn: pfn to insert
68  * @pgprot: page protection to use
69  * @write: whether it's a write fault
70  *
71  * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
72  *
73  * Return: vm_fault_t value.
74  */
75 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
76                                             bool write)
77 {
78         return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
79 }
80
81 enum transparent_hugepage_flag {
82         TRANSPARENT_HUGEPAGE_NEVER_DAX,
83         TRANSPARENT_HUGEPAGE_FLAG,
84         TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
85         TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
86         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
87         TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
88         TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
89         TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
90         TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
91 };
92
93 struct kobject;
94 struct kobj_attribute;
95
96 ssize_t single_hugepage_flag_store(struct kobject *kobj,
97                                    struct kobj_attribute *attr,
98                                    const char *buf, size_t count,
99                                    enum transparent_hugepage_flag flag);
100 ssize_t single_hugepage_flag_show(struct kobject *kobj,
101                                   struct kobj_attribute *attr, char *buf,
102                                   enum transparent_hugepage_flag flag);
103 extern struct kobj_attribute shmem_enabled_attr;
104
105 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
107
108 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
109 #define HPAGE_PMD_SHIFT PMD_SHIFT
110 #define HPAGE_PMD_SIZE  ((1UL) << HPAGE_PMD_SHIFT)
111 #define HPAGE_PMD_MASK  (~(HPAGE_PMD_SIZE - 1))
112
113 #define HPAGE_PUD_SHIFT PUD_SHIFT
114 #define HPAGE_PUD_SIZE  ((1UL) << HPAGE_PUD_SHIFT)
115 #define HPAGE_PUD_MASK  (~(HPAGE_PUD_SIZE - 1))
116
117 extern unsigned long transparent_hugepage_flags;
118
119 /*
120  * Do the below checks:
121  *   - For file vma, check if the linear page offset of vma is
122  *     HPAGE_PMD_NR aligned within the file.  The hugepage is
123  *     guaranteed to be hugepage-aligned within the file, but we must
124  *     check that the PMD-aligned addresses in the VMA map to
125  *     PMD-aligned offsets within the file, else the hugepage will
126  *     not be PMD-mappable.
127  *   - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
128  *     area.
129  */
130 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
131                 unsigned long addr)
132 {
133         unsigned long haddr;
134
135         /* Don't have to check pgoff for anonymous vma */
136         if (!vma_is_anonymous(vma)) {
137                 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
138                                 HPAGE_PMD_NR))
139                         return false;
140         }
141
142         haddr = addr & HPAGE_PMD_MASK;
143
144         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
145                 return false;
146         return true;
147 }
148
149 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
150                                           unsigned long vm_flags)
151 {
152         /* Explicitly disabled through madvise. */
153         if ((vm_flags & VM_NOHUGEPAGE) ||
154             test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
155                 return false;
156         return true;
157 }
158
159 /*
160  * to be used on vmas which are known to support THP.
161  * Use transparent_hugepage_active otherwise
162  */
163 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
164 {
165
166         /*
167          * If the hardware/firmware marked hugepage support disabled.
168          */
169         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
170                 return false;
171
172         if (!transhuge_vma_enabled(vma, vma->vm_flags))
173                 return false;
174
175         if (vma_is_temporary_stack(vma))
176                 return false;
177
178         if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
179                 return true;
180
181         if (vma_is_dax(vma))
182                 return true;
183
184         if (transparent_hugepage_flags &
185                                 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
186                 return !!(vma->vm_flags & VM_HUGEPAGE);
187
188         return false;
189 }
190
191 static inline bool file_thp_enabled(struct vm_area_struct *vma)
192 {
193         struct inode *inode;
194
195         if (!vma->vm_file)
196                 return false;
197
198         inode = vma->vm_file->f_inode;
199
200         return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
201                (vma->vm_flags & VM_EXEC) &&
202                !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
203 }
204
205 bool hugepage_vma_check(struct vm_area_struct *vma,
206                         unsigned long vm_flags,
207                         bool smaps);
208
209 #define transparent_hugepage_use_zero_page()                            \
210         (transparent_hugepage_flags &                                   \
211          (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
212
213 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
214                 unsigned long len, unsigned long pgoff, unsigned long flags);
215
216 void prep_transhuge_page(struct page *page);
217 void free_transhuge_page(struct page *page);
218
219 bool can_split_folio(struct folio *folio, int *pextra_pins);
220 int split_huge_page_to_list(struct page *page, struct list_head *list);
221 static inline int split_huge_page(struct page *page)
222 {
223         return split_huge_page_to_list(page, NULL);
224 }
225 void deferred_split_huge_page(struct page *page);
226
227 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
228                 unsigned long address, bool freeze, struct folio *folio);
229
230 #define split_huge_pmd(__vma, __pmd, __address)                         \
231         do {                                                            \
232                 pmd_t *____pmd = (__pmd);                               \
233                 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)   \
234                                         || pmd_devmap(*____pmd))        \
235                         __split_huge_pmd(__vma, __pmd, __address,       \
236                                                 false, NULL);           \
237         }  while (0)
238
239
240 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
241                 bool freeze, struct folio *folio);
242
243 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
244                 unsigned long address);
245
246 #define split_huge_pud(__vma, __pud, __address)                         \
247         do {                                                            \
248                 pud_t *____pud = (__pud);                               \
249                 if (pud_trans_huge(*____pud)                            \
250                                         || pud_devmap(*____pud))        \
251                         __split_huge_pud(__vma, __pud, __address);      \
252         }  while (0)
253
254 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
255                      int advice);
256 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
257                            unsigned long end, long adjust_next);
258 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
259 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
260
261 static inline int is_swap_pmd(pmd_t pmd)
262 {
263         return !pmd_none(pmd) && !pmd_present(pmd);
264 }
265
266 /* mmap_lock must be held on entry */
267 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
268                 struct vm_area_struct *vma)
269 {
270         if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
271                 return __pmd_trans_huge_lock(pmd, vma);
272         else
273                 return NULL;
274 }
275 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
276                 struct vm_area_struct *vma)
277 {
278         if (pud_trans_huge(*pud) || pud_devmap(*pud))
279                 return __pud_trans_huge_lock(pud, vma);
280         else
281                 return NULL;
282 }
283
284 /**
285  * folio_test_pmd_mappable - Can we map this folio with a PMD?
286  * @folio: The folio to test
287  */
288 static inline bool folio_test_pmd_mappable(struct folio *folio)
289 {
290         return folio_order(folio) >= HPAGE_PMD_ORDER;
291 }
292
293 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
294                 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
295 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
296                 pud_t *pud, int flags, struct dev_pagemap **pgmap);
297
298 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
299
300 extern struct page *huge_zero_page;
301 extern unsigned long huge_zero_pfn;
302
303 static inline bool is_huge_zero_page(struct page *page)
304 {
305         return READ_ONCE(huge_zero_page) == page;
306 }
307
308 static inline bool is_huge_zero_pmd(pmd_t pmd)
309 {
310         return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
311 }
312
313 static inline bool is_huge_zero_pud(pud_t pud)
314 {
315         return false;
316 }
317
318 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
319 void mm_put_huge_zero_page(struct mm_struct *mm);
320
321 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
322
323 static inline bool thp_migration_supported(void)
324 {
325         return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
326 }
327
328 static inline struct list_head *page_deferred_list(struct page *page)
329 {
330         /*
331          * Global or memcg deferred list in the second tail pages is
332          * occupied by compound_head.
333          */
334         return &page[2].deferred_list;
335 }
336
337 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
338 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
339 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
340 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
341
342 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
343 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
344 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
345
346 static inline bool folio_test_pmd_mappable(struct folio *folio)
347 {
348         return false;
349 }
350
351 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
352 {
353         return false;
354 }
355
356 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
357                 unsigned long addr)
358 {
359         return false;
360 }
361
362 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
363                                           unsigned long vm_flags)
364 {
365         return false;
366 }
367
368 static inline bool hugepage_vma_check(struct vm_area_struct *vma,
369                                        unsigned long vm_flags,
370                                        bool smaps)
371 {
372         return false;
373 }
374
375 static inline void prep_transhuge_page(struct page *page) {}
376
377 #define transparent_hugepage_flags 0UL
378
379 #define thp_get_unmapped_area   NULL
380
381 static inline bool
382 can_split_folio(struct folio *folio, int *pextra_pins)
383 {
384         return false;
385 }
386 static inline int
387 split_huge_page_to_list(struct page *page, struct list_head *list)
388 {
389         return 0;
390 }
391 static inline int split_huge_page(struct page *page)
392 {
393         return 0;
394 }
395 static inline void deferred_split_huge_page(struct page *page) {}
396 #define split_huge_pmd(__vma, __pmd, __address) \
397         do { } while (0)
398
399 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
400                 unsigned long address, bool freeze, struct folio *folio) {}
401 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
402                 unsigned long address, bool freeze, struct folio *folio) {}
403
404 #define split_huge_pud(__vma, __pmd, __address) \
405         do { } while (0)
406
407 static inline int hugepage_madvise(struct vm_area_struct *vma,
408                                    unsigned long *vm_flags, int advice)
409 {
410         BUG();
411         return 0;
412 }
413 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
414                                          unsigned long start,
415                                          unsigned long end,
416                                          long adjust_next)
417 {
418 }
419 static inline int is_swap_pmd(pmd_t pmd)
420 {
421         return 0;
422 }
423 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
424                 struct vm_area_struct *vma)
425 {
426         return NULL;
427 }
428 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
429                 struct vm_area_struct *vma)
430 {
431         return NULL;
432 }
433
434 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
435 {
436         return 0;
437 }
438
439 static inline bool is_huge_zero_page(struct page *page)
440 {
441         return false;
442 }
443
444 static inline bool is_huge_zero_pmd(pmd_t pmd)
445 {
446         return false;
447 }
448
449 static inline bool is_huge_zero_pud(pud_t pud)
450 {
451         return false;
452 }
453
454 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
455 {
456         return;
457 }
458
459 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
460         unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
461 {
462         return NULL;
463 }
464
465 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
466         unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
467 {
468         return NULL;
469 }
470
471 static inline bool thp_migration_supported(void)
472 {
473         return false;
474 }
475 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
476
477 static inline int split_folio_to_list(struct folio *folio,
478                 struct list_head *list)
479 {
480         return split_huge_page_to_list(&folio->page, list);
481 }
482
483 #endif /* _LINUX_HUGE_MM_H */